diff --git a/sys/dev/al_eth/al_eth.c b/sys/dev/al_eth/al_eth.c index f3b7ad370df5..b2bd94bb504c 100644 --- a/sys/dev/al_eth/al_eth.c +++ b/sys/dev/al_eth/al_eth.c @@ -1,3575 +1,3575 @@ /*- * Copyright (c) 2015,2016 Annapurna Labs Ltd. and affiliates * All rights reserved. * * Developed by Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "opt_inet.h" #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #include #include #endif #ifdef INET6 #include #endif #include #include #include #include #include #include #include #include #include #include #include #include "al_eth.h" #include "al_init_eth_lm.h" #include "arm/annapurna/alpine/alpine_serdes.h" #include "miibus_if.h" #define device_printf_dbg(fmt, ...) do { \ if (AL_DBG_LEVEL >= AL_DBG_LEVEL_DBG) { AL_DBG_LOCK(); \ device_printf(fmt, __VA_ARGS__); AL_DBG_UNLOCK();} \ } while (0) MALLOC_DEFINE(M_IFAL, "if_al_malloc", "All allocated data for AL ETH driver"); /* move out to some pci header file */ #define PCI_VENDOR_ID_ANNAPURNA_LABS 0x1c36 #define PCI_DEVICE_ID_AL_ETH 0x0001 #define PCI_DEVICE_ID_AL_ETH_ADVANCED 0x0002 #define PCI_DEVICE_ID_AL_ETH_NIC 0x0003 #define PCI_DEVICE_ID_AL_ETH_FPGA_NIC 0x0030 #define PCI_DEVICE_ID_AL_CRYPTO 0x0011 #define PCI_DEVICE_ID_AL_CRYPTO_VF 0x8011 #define PCI_DEVICE_ID_AL_RAID_DMA 0x0021 #define PCI_DEVICE_ID_AL_RAID_DMA_VF 0x8021 #define PCI_DEVICE_ID_AL_USB 0x0041 #define MAC_ADDR_STR "%02x:%02x:%02x:%02x:%02x:%02x" #define MAC_ADDR(addr) addr[0], addr[1], addr[2], addr[3], addr[4], addr[5] #define AL_ETH_MAC_TABLE_UNICAST_IDX_BASE 0 #define AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT 4 #define AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX (AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + \ AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT) #define AL_ETH_MAC_TABLE_DROP_IDX (AL_ETH_FWD_MAC_NUM - 1) #define AL_ETH_MAC_TABLE_BROADCAST_IDX (AL_ETH_MAC_TABLE_DROP_IDX - 1) #define AL_ETH_THASH_UDMA_SHIFT 0 #define AL_ETH_THASH_UDMA_MASK (0xF << AL_ETH_THASH_UDMA_SHIFT) #define AL_ETH_THASH_Q_SHIFT 4 #define AL_ETH_THASH_Q_MASK (0x3 << AL_ETH_THASH_Q_SHIFT) /* the following defines should be moved to hal */ #define AL_ETH_FSM_ENTRY_IPV4_TCP 0 #define AL_ETH_FSM_ENTRY_IPV4_UDP 1 #define AL_ETH_FSM_ENTRY_IPV6_TCP 2 #define AL_ETH_FSM_ENTRY_IPV6_UDP 3 #define AL_ETH_FSM_ENTRY_IPV6_NO_UDP_TCP 4 #define AL_ETH_FSM_ENTRY_IPV4_NO_UDP_TCP 5 /* FSM DATA format */ #define AL_ETH_FSM_DATA_OUTER_2_TUPLE 0 #define AL_ETH_FSM_DATA_OUTER_4_TUPLE 1 #define AL_ETH_FSM_DATA_INNER_2_TUPLE 2 #define AL_ETH_FSM_DATA_INNER_4_TUPLE 3 #define AL_ETH_FSM_DATA_HASH_SEL (1 << 2) #define AL_ETH_FSM_DATA_DEFAULT_Q 0 #define AL_ETH_FSM_DATA_DEFAULT_UDMA 0 #define AL_BR_SIZE 512 #define AL_TSO_SIZE 65500 #define AL_DEFAULT_MTU 1500 #define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP) #define AL_IP_ALIGNMENT_OFFSET 2 #define SFP_I2C_ADDR 0x50 #define AL_MASK_GROUP_A_INT 0x7 #define AL_MASK_GROUP_B_INT 0xF #define AL_MASK_GROUP_C_INT 0xF #define AL_MASK_GROUP_D_INT 0xFFFFFFFF #define AL_REG_OFFSET_FORWARD_INTR (0x1800000 + 0x1210) #define AL_EN_FORWARD_INTR 0x1FFFF #define AL_DIS_FORWARD_INTR 0 #define AL_M2S_MASK_INIT 0x480 #define AL_S2M_MASK_INIT 0x1E0 #define AL_M2S_S2M_MASK_NOT_INT (0x3f << 25) #define AL_10BASE_T_SPEED 10 #define AL_100BASE_TX_SPEED 100 #define AL_1000BASE_T_SPEED 1000 static devclass_t al_devclass; #define AL_RX_LOCK_INIT(_sc) mtx_init(&((_sc)->if_rx_lock), "ALRXL", "ALRXL", MTX_DEF) #define AL_RX_LOCK(_sc) mtx_lock(&((_sc)->if_rx_lock)) #define AL_RX_UNLOCK(_sc) mtx_unlock(&((_sc)->if_rx_lock)) /* helper functions */ static int al_is_device_supported(device_t); static void al_eth_init_rings(struct al_eth_adapter *); static void al_eth_flow_ctrl_disable(struct al_eth_adapter *); int al_eth_fpga_read_pci_config(void *, int, uint32_t *); int al_eth_fpga_write_pci_config(void *, int, uint32_t); int al_eth_read_pci_config(void *, int, uint32_t *); int al_eth_write_pci_config(void *, int, uint32_t); void al_eth_irq_config(uint32_t *, uint32_t); void al_eth_forward_int_config(uint32_t *, uint32_t); static void al_eth_start_xmit(void *, int); static void al_eth_rx_recv_work(void *, int); static int al_eth_up(struct al_eth_adapter *); static void al_eth_down(struct al_eth_adapter *); static void al_eth_interrupts_unmask(struct al_eth_adapter *); static void al_eth_interrupts_mask(struct al_eth_adapter *); static int al_eth_check_mtu(struct al_eth_adapter *, int); static uint64_t al_get_counter(struct ifnet *, ift_counter); static void al_eth_req_rx_buff_size(struct al_eth_adapter *, int); static int al_eth_board_params_init(struct al_eth_adapter *); static int al_media_update(struct ifnet *); static void al_media_status(struct ifnet *, struct ifmediareq *); static int al_eth_function_reset(struct al_eth_adapter *); static int al_eth_hw_init_adapter(struct al_eth_adapter *); static void al_eth_serdes_init(struct al_eth_adapter *); static void al_eth_lm_config(struct al_eth_adapter *); static int al_eth_hw_init(struct al_eth_adapter *); static void al_tick_stats(void *); /* ifnet entry points */ static void al_init(void *); static int al_mq_start(struct ifnet *, struct mbuf *); static void al_qflush(struct ifnet *); static int al_ioctl(struct ifnet * ifp, u_long, caddr_t); /* bus entry points */ static int al_probe(device_t); static int al_attach(device_t); static int al_detach(device_t); static int al_shutdown(device_t); /* mii bus support routines */ static int al_miibus_readreg(device_t, int, int); static int al_miibus_writereg(device_t, int, int, int); static void al_miibus_statchg(device_t); static void al_miibus_linkchg(device_t); struct al_eth_adapter* g_adapters[16]; uint32_t g_adapters_count; /* flag for napi-like mbuf processing, controlled from sysctl */ static int napi = 0; static device_method_t al_methods[] = { /* Device interface */ DEVMETHOD(device_probe, al_probe), DEVMETHOD(device_attach, al_attach), DEVMETHOD(device_detach, al_detach), DEVMETHOD(device_shutdown, al_shutdown), DEVMETHOD(miibus_readreg, al_miibus_readreg), DEVMETHOD(miibus_writereg, al_miibus_writereg), DEVMETHOD(miibus_statchg, al_miibus_statchg), DEVMETHOD(miibus_linkchg, al_miibus_linkchg), { 0, 0 } }; static driver_t al_driver = { "al", al_methods, sizeof(struct al_eth_adapter), }; DRIVER_MODULE(al, pci, al_driver, al_devclass, 0, 0); DRIVER_MODULE(miibus, al, miibus_driver, miibus_devclass, 0, 0); static int al_probe(device_t dev) { if ((al_is_device_supported(dev)) != 0) { device_set_desc(dev, "al"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int al_attach(device_t dev) { struct al_eth_adapter *adapter; struct sysctl_oid_list *child; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; struct ifnet *ifp; uint32_t dev_id; uint32_t rev_id; int bar_udma; int bar_mac; int bar_ec; int err; err = 0; ifp = NULL; dev_id = rev_id = 0; ctx = device_get_sysctl_ctx(dev); tree = SYSCTL_PARENT(device_get_sysctl_tree(dev)); child = SYSCTL_CHILDREN(tree); if (g_adapters_count == 0) { SYSCTL_ADD_INT(ctx, child, OID_AUTO, "napi", CTLFLAG_RW, &napi, 0, "Use pseudo-napi mechanism"); } adapter = device_get_softc(dev); adapter->dev = dev; adapter->board_type = ALPINE_INTEGRATED; snprintf(adapter->name, AL_ETH_NAME_MAX_LEN, "%s", device_get_nameunit(dev)); AL_RX_LOCK_INIT(adapter); g_adapters[g_adapters_count] = adapter; bar_udma = PCIR_BAR(AL_ETH_UDMA_BAR); adapter->udma_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &bar_udma, RF_ACTIVE); if (adapter->udma_res == NULL) { device_printf(adapter->dev, "could not allocate memory resources for DMA.\n"); err = ENOMEM; goto err_res_dma; } adapter->udma_base = al_bus_dma_to_va(rman_get_bustag(adapter->udma_res), rman_get_bushandle(adapter->udma_res)); bar_mac = PCIR_BAR(AL_ETH_MAC_BAR); adapter->mac_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &bar_mac, RF_ACTIVE); if (adapter->mac_res == NULL) { device_printf(adapter->dev, "could not allocate memory resources for MAC.\n"); err = ENOMEM; goto err_res_mac; } adapter->mac_base = al_bus_dma_to_va(rman_get_bustag(adapter->mac_res), rman_get_bushandle(adapter->mac_res)); bar_ec = PCIR_BAR(AL_ETH_EC_BAR); adapter->ec_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &bar_ec, RF_ACTIVE); if (adapter->ec_res == NULL) { device_printf(adapter->dev, "could not allocate memory resources for EC.\n"); err = ENOMEM; goto err_res_ec; } adapter->ec_base = al_bus_dma_to_va(rman_get_bustag(adapter->ec_res), rman_get_bushandle(adapter->ec_res)); adapter->netdev = ifp = if_alloc(IFT_ETHER); adapter->netdev->if_link_state = LINK_STATE_DOWN; ifp->if_softc = adapter; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp->if_flags = ifp->if_drv_flags; ifp->if_flags |= IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_ALLMULTI; ifp->if_transmit = al_mq_start; ifp->if_qflush = al_qflush; ifp->if_ioctl = al_ioctl; ifp->if_init = al_init; ifp->if_get_counter = al_get_counter; ifp->if_mtu = AL_DEFAULT_MTU; adapter->if_flags = ifp->if_flags; ifp->if_capabilities = ifp->if_capenable = 0; ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_TSO | IFCAP_LRO | IFCAP_JUMBO_MTU; ifp->if_capenable = ifp->if_capabilities; adapter->id_number = g_adapters_count; if (adapter->board_type == ALPINE_INTEGRATED) { dev_id = pci_get_device(adapter->dev); rev_id = pci_get_revid(adapter->dev); } else { al_eth_fpga_read_pci_config(adapter->internal_pcie_base, PCIR_DEVICE, &dev_id); al_eth_fpga_read_pci_config(adapter->internal_pcie_base, PCIR_REVID, &rev_id); } adapter->dev_id = dev_id; adapter->rev_id = rev_id; /* set default ring sizes */ adapter->tx_ring_count = AL_ETH_DEFAULT_TX_SW_DESCS; adapter->tx_descs_count = AL_ETH_DEFAULT_TX_HW_DESCS; adapter->rx_ring_count = AL_ETH_DEFAULT_RX_DESCS; adapter->rx_descs_count = AL_ETH_DEFAULT_RX_DESCS; adapter->num_tx_queues = AL_ETH_NUM_QUEUES; adapter->num_rx_queues = AL_ETH_NUM_QUEUES; adapter->small_copy_len = AL_ETH_DEFAULT_SMALL_PACKET_LEN; adapter->link_poll_interval = AL_ETH_DEFAULT_LINK_POLL_INTERVAL; adapter->max_rx_buff_alloc_size = AL_ETH_DEFAULT_MAX_RX_BUFF_ALLOC_SIZE; al_eth_req_rx_buff_size(adapter, adapter->netdev->if_mtu); adapter->link_config.force_1000_base_x = AL_ETH_DEFAULT_FORCE_1000_BASEX; err = al_eth_board_params_init(adapter); if (err != 0) goto err; if (adapter->mac_mode == AL_ETH_MAC_MODE_10GbE_Serial) { ifmedia_init(&adapter->media, IFM_IMASK, al_media_update, al_media_status); ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); } al_eth_function_reset(adapter); err = al_eth_hw_init_adapter(adapter); if (err != 0) goto err; al_eth_init_rings(adapter); g_adapters_count++; al_eth_lm_config(adapter); mtx_init(&adapter->stats_mtx, "AlStatsMtx", NULL, MTX_DEF); mtx_init(&adapter->wd_mtx, "AlWdMtx", NULL, MTX_DEF); callout_init_mtx(&adapter->stats_callout, &adapter->stats_mtx, 0); callout_init_mtx(&adapter->wd_callout, &adapter->wd_mtx, 0); ether_ifattach(ifp, adapter->mac_addr); ifp->if_mtu = AL_DEFAULT_MTU; if (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII) { al_eth_hw_init(adapter); /* Attach PHY(s) */ err = mii_attach(adapter->dev, &adapter->miibus, adapter->netdev, al_media_update, al_media_status, BMSR_DEFCAPMASK, 0, MII_OFFSET_ANY, 0); if (err != 0) { device_printf(adapter->dev, "attaching PHYs failed\n"); return (err); } adapter->mii = device_get_softc(adapter->miibus); } return (err); err: bus_release_resource(dev, SYS_RES_MEMORY, bar_ec, adapter->ec_res); err_res_ec: bus_release_resource(dev, SYS_RES_MEMORY, bar_mac, adapter->mac_res); err_res_mac: bus_release_resource(dev, SYS_RES_MEMORY, bar_udma, adapter->udma_res); err_res_dma: return (err); } static int al_detach(device_t dev) { struct al_eth_adapter *adapter; adapter = device_get_softc(dev); ether_ifdetach(adapter->netdev); mtx_destroy(&adapter->stats_mtx); mtx_destroy(&adapter->wd_mtx); al_eth_down(adapter); bus_release_resource(dev, SYS_RES_IRQ, 0, adapter->irq_res); bus_release_resource(dev, SYS_RES_MEMORY, 0, adapter->ec_res); bus_release_resource(dev, SYS_RES_MEMORY, 0, adapter->mac_res); bus_release_resource(dev, SYS_RES_MEMORY, 0, adapter->udma_res); return (0); } int al_eth_fpga_read_pci_config(void *handle, int where, uint32_t *val) { /* handle is the base address of the adapter */ *val = al_reg_read32((void*)((u_long)handle + where)); return (0); } int al_eth_fpga_write_pci_config(void *handle, int where, uint32_t val) { /* handle is the base address of the adapter */ al_reg_write32((void*)((u_long)handle + where), val); return (0); } int al_eth_read_pci_config(void *handle, int where, uint32_t *val) { /* handle is a pci_dev */ *val = pci_read_config((device_t)handle, where, sizeof(*val)); return (0); } int al_eth_write_pci_config(void *handle, int where, uint32_t val) { /* handle is a pci_dev */ pci_write_config((device_t)handle, where, val, sizeof(val)); return (0); } void al_eth_irq_config(uint32_t *offset, uint32_t value) { al_reg_write32_relaxed(offset, value); } void al_eth_forward_int_config(uint32_t *offset, uint32_t value) { al_reg_write32(offset, value); } static void al_eth_serdes_init(struct al_eth_adapter *adapter) { void __iomem *serdes_base; adapter->serdes_init = false; serdes_base = alpine_serdes_resource_get(adapter->serdes_grp); if (serdes_base == NULL) { device_printf(adapter->dev, "serdes_base get failed!\n"); return; } serdes_base = al_bus_dma_to_va(serdes_tag, serdes_base); al_serdes_handle_grp_init(serdes_base, adapter->serdes_grp, &adapter->serdes_obj); adapter->serdes_init = true; } static void al_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *paddr; paddr = arg; *paddr = segs->ds_addr; } static int al_dma_alloc_coherent(struct device *dev, bus_dma_tag_t *tag, bus_dmamap_t *map, bus_addr_t *baddr, void **vaddr, uint32_t size) { int ret; uint32_t maxsize = ((size - 1)/PAGE_SIZE + 1) * PAGE_SIZE; ret = bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, maxsize, 1, maxsize, BUS_DMA_COHERENT, NULL, NULL, tag); if (ret != 0) { device_printf(dev, "failed to create bus tag, ret = %d\n", ret); return (ret); } ret = bus_dmamem_alloc(*tag, vaddr, BUS_DMA_COHERENT | BUS_DMA_ZERO, map); if (ret != 0) { device_printf(dev, "failed to allocate dmamem, ret = %d\n", ret); return (ret); } ret = bus_dmamap_load(*tag, *map, *vaddr, size, al_dma_map_addr, baddr, 0); if (ret != 0) { device_printf(dev, "failed to allocate bus_dmamap_load, ret = %d\n", ret); return (ret); } return (0); } static void al_dma_free_coherent(bus_dma_tag_t tag, bus_dmamap_t map, void *vaddr) { bus_dmamap_unload(tag, map); bus_dmamem_free(tag, vaddr, map); bus_dma_tag_destroy(tag); } static void al_eth_mac_table_unicast_add(struct al_eth_adapter *adapter, uint8_t idx, uint8_t udma_mask) { struct al_eth_fwd_mac_table_entry entry = { { 0 } }; memcpy(entry.addr, adapter->mac_addr, sizeof(adapter->mac_addr)); memset(entry.mask, 0xff, sizeof(entry.mask)); entry.rx_valid = true; entry.tx_valid = false; entry.udma_mask = udma_mask; entry.filter = false; device_printf_dbg(adapter->dev, "%s: [%d]: addr "MAC_ADDR_STR" mask "MAC_ADDR_STR"\n", __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask)); al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry); } static void al_eth_mac_table_all_multicast_add(struct al_eth_adapter *adapter, uint8_t idx, uint8_t udma_mask) { struct al_eth_fwd_mac_table_entry entry = { { 0 } }; memset(entry.addr, 0x00, sizeof(entry.addr)); memset(entry.mask, 0x00, sizeof(entry.mask)); entry.mask[0] |= 1; entry.addr[0] |= 1; entry.rx_valid = true; entry.tx_valid = false; entry.udma_mask = udma_mask; entry.filter = false; device_printf_dbg(adapter->dev, "%s: [%d]: addr "MAC_ADDR_STR" mask "MAC_ADDR_STR"\n", __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask)); al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry); } static void al_eth_mac_table_broadcast_add(struct al_eth_adapter *adapter, uint8_t idx, uint8_t udma_mask) { struct al_eth_fwd_mac_table_entry entry = { { 0 } }; memset(entry.addr, 0xff, sizeof(entry.addr)); memset(entry.mask, 0xff, sizeof(entry.mask)); entry.rx_valid = true; entry.tx_valid = false; entry.udma_mask = udma_mask; entry.filter = false; device_printf_dbg(adapter->dev, "%s: [%d]: addr "MAC_ADDR_STR" mask "MAC_ADDR_STR"\n", __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask)); al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry); } static void al_eth_mac_table_promiscuous_set(struct al_eth_adapter *adapter, boolean_t promiscuous) { struct al_eth_fwd_mac_table_entry entry = { { 0 } }; memset(entry.addr, 0x00, sizeof(entry.addr)); memset(entry.mask, 0x00, sizeof(entry.mask)); entry.rx_valid = true; entry.tx_valid = false; entry.udma_mask = (promiscuous) ? 1 : 0; entry.filter = (promiscuous) ? false : true; device_printf_dbg(adapter->dev, "%s: %s promiscuous mode\n", __func__, (promiscuous) ? "enter" : "exit"); al_eth_fwd_mac_table_set(&adapter->hal_adapter, AL_ETH_MAC_TABLE_DROP_IDX, &entry); } static void al_eth_set_thash_table_entry(struct al_eth_adapter *adapter, uint8_t idx, uint8_t udma, uint32_t queue) { if (udma != 0) panic("only UDMA0 is supporter"); if (queue >= AL_ETH_NUM_QUEUES) panic("invalid queue number"); al_eth_thash_table_set(&adapter->hal_adapter, idx, udma, queue); } /* init FSM, no tunneling supported yet, if packet is tcp/udp over ipv4/ipv6, use 4 tuple hash */ static void al_eth_fsm_table_init(struct al_eth_adapter *adapter) { uint32_t val; int i; for (i = 0; i < AL_ETH_RX_FSM_TABLE_SIZE; i++) { uint8_t outer_type = AL_ETH_FSM_ENTRY_OUTER(i); switch (outer_type) { case AL_ETH_FSM_ENTRY_IPV4_TCP: case AL_ETH_FSM_ENTRY_IPV4_UDP: case AL_ETH_FSM_ENTRY_IPV6_TCP: case AL_ETH_FSM_ENTRY_IPV6_UDP: val = AL_ETH_FSM_DATA_OUTER_4_TUPLE | AL_ETH_FSM_DATA_HASH_SEL; break; case AL_ETH_FSM_ENTRY_IPV6_NO_UDP_TCP: case AL_ETH_FSM_ENTRY_IPV4_NO_UDP_TCP: val = AL_ETH_FSM_DATA_OUTER_2_TUPLE | AL_ETH_FSM_DATA_HASH_SEL; break; default: val = AL_ETH_FSM_DATA_DEFAULT_Q | AL_ETH_FSM_DATA_DEFAULT_UDMA; } al_eth_fsm_table_set(&adapter->hal_adapter, i, val); } } static void al_eth_mac_table_entry_clear(struct al_eth_adapter *adapter, uint8_t idx) { struct al_eth_fwd_mac_table_entry entry = { { 0 } }; device_printf_dbg(adapter->dev, "%s: clear entry %d\n", __func__, idx); al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry); } static int al_eth_hw_init_adapter(struct al_eth_adapter *adapter) { struct al_eth_adapter_params *params = &adapter->eth_hal_params; int rc; /* params->dev_id = adapter->dev_id; */ params->rev_id = adapter->rev_id; params->udma_id = 0; params->enable_rx_parser = 1; /* enable rx epe parser*/ params->udma_regs_base = adapter->udma_base; /* UDMA register base address */ params->ec_regs_base = adapter->ec_base; /* Ethernet controller registers base address */ params->mac_regs_base = adapter->mac_base; /* Ethernet MAC registers base address */ params->name = adapter->name; params->serdes_lane = adapter->serdes_lane; rc = al_eth_adapter_init(&adapter->hal_adapter, params); if (rc != 0) device_printf(adapter->dev, "%s failed at hal init!\n", __func__); if ((adapter->board_type == ALPINE_NIC) || (adapter->board_type == ALPINE_FPGA_NIC)) { /* in pcie NIC mode, force eth UDMA to access PCIE0 using the vmid */ struct al_udma_gen_tgtid_conf conf; int i; for (i = 0; i < DMA_MAX_Q; i++) { conf.tx_q_conf[i].queue_en = AL_TRUE; conf.tx_q_conf[i].desc_en = AL_FALSE; conf.tx_q_conf[i].tgtid = 0x100; /* for access from PCIE0 */ conf.rx_q_conf[i].queue_en = AL_TRUE; conf.rx_q_conf[i].desc_en = AL_FALSE; conf.rx_q_conf[i].tgtid = 0x100; /* for access from PCIE0 */ } al_udma_gen_tgtid_conf_set(adapter->udma_base, &conf); } return (rc); } static void al_eth_lm_config(struct al_eth_adapter *adapter) { struct al_eth_lm_init_params params = {0}; params.adapter = &adapter->hal_adapter; params.serdes_obj = &adapter->serdes_obj; params.lane = adapter->serdes_lane; params.sfp_detection = adapter->sfp_detection_needed; if (adapter->sfp_detection_needed == true) { params.sfp_bus_id = adapter->i2c_adapter_id; params.sfp_i2c_addr = SFP_I2C_ADDR; } if (adapter->sfp_detection_needed == false) { switch (adapter->mac_mode) { case AL_ETH_MAC_MODE_10GbE_Serial: if ((adapter->lt_en != 0) && (adapter->an_en != 0)) params.default_mode = AL_ETH_LM_MODE_10G_DA; else params.default_mode = AL_ETH_LM_MODE_10G_OPTIC; break; case AL_ETH_MAC_MODE_SGMII: params.default_mode = AL_ETH_LM_MODE_1G; break; default: params.default_mode = AL_ETH_LM_MODE_10G_DA; } } else params.default_mode = AL_ETH_LM_MODE_10G_DA; params.link_training = adapter->lt_en; params.rx_equal = true; params.static_values = !adapter->dont_override_serdes; params.i2c_context = adapter; params.kr_fec_enable = false; params.retimer_exist = adapter->retimer.exist; params.retimer_bus_id = adapter->retimer.bus_id; params.retimer_i2c_addr = adapter->retimer.i2c_addr; params.retimer_channel = adapter->retimer.channel; al_eth_lm_init(&adapter->lm_context, ¶ms); } static int al_eth_board_params_init(struct al_eth_adapter *adapter) { if (adapter->board_type == ALPINE_NIC) { adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial; adapter->sfp_detection_needed = false; adapter->phy_exist = false; adapter->an_en = false; adapter->lt_en = false; adapter->ref_clk_freq = AL_ETH_REF_FREQ_375_MHZ; adapter->mdio_freq = AL_ETH_DEFAULT_MDIO_FREQ_KHZ; } else if (adapter->board_type == ALPINE_FPGA_NIC) { adapter->mac_mode = AL_ETH_MAC_MODE_SGMII; adapter->sfp_detection_needed = false; adapter->phy_exist = false; adapter->an_en = false; adapter->lt_en = false; adapter->ref_clk_freq = AL_ETH_REF_FREQ_375_MHZ; adapter->mdio_freq = AL_ETH_DEFAULT_MDIO_FREQ_KHZ; } else { struct al_eth_board_params params; int rc; adapter->auto_speed = false; rc = al_eth_board_params_get(adapter->mac_base, ¶ms); if (rc != 0) { device_printf(adapter->dev, "board info not available\n"); return (-1); } adapter->phy_exist = params.phy_exist == TRUE; adapter->phy_addr = params.phy_mdio_addr; adapter->an_en = params.autoneg_enable; adapter->lt_en = params.kr_lt_enable; adapter->serdes_grp = params.serdes_grp; adapter->serdes_lane = params.serdes_lane; adapter->sfp_detection_needed = params.sfp_plus_module_exist; adapter->i2c_adapter_id = params.i2c_adapter_id; adapter->ref_clk_freq = params.ref_clk_freq; adapter->dont_override_serdes = params.dont_override_serdes; adapter->link_config.active_duplex = !params.half_duplex; adapter->link_config.autoneg = !params.an_disable; adapter->link_config.force_1000_base_x = params.force_1000_base_x; adapter->retimer.exist = params.retimer_exist; adapter->retimer.bus_id = params.retimer_bus_id; adapter->retimer.i2c_addr = params.retimer_i2c_addr; adapter->retimer.channel = params.retimer_channel; switch (params.speed) { default: device_printf(adapter->dev, "%s: invalid speed (%d)\n", __func__, params.speed); case AL_ETH_BOARD_1G_SPEED_1000M: adapter->link_config.active_speed = 1000; break; case AL_ETH_BOARD_1G_SPEED_100M: adapter->link_config.active_speed = 100; break; case AL_ETH_BOARD_1G_SPEED_10M: adapter->link_config.active_speed = 10; break; } switch (params.mdio_freq) { default: device_printf(adapter->dev, "%s: invalid mdio freq (%d)\n", __func__, params.mdio_freq); case AL_ETH_BOARD_MDIO_FREQ_2_5_MHZ: adapter->mdio_freq = AL_ETH_DEFAULT_MDIO_FREQ_KHZ; break; case AL_ETH_BOARD_MDIO_FREQ_1_MHZ: adapter->mdio_freq = AL_ETH_MDIO_FREQ_1000_KHZ; break; } switch (params.media_type) { case AL_ETH_BOARD_MEDIA_TYPE_RGMII: if (params.sfp_plus_module_exist == TRUE) /* Backward compatibility */ adapter->mac_mode = AL_ETH_MAC_MODE_SGMII; else adapter->mac_mode = AL_ETH_MAC_MODE_RGMII; adapter->use_lm = false; break; case AL_ETH_BOARD_MEDIA_TYPE_SGMII: adapter->mac_mode = AL_ETH_MAC_MODE_SGMII; adapter->use_lm = true; break; case AL_ETH_BOARD_MEDIA_TYPE_10GBASE_SR: adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial; adapter->use_lm = true; break; case AL_ETH_BOARD_MEDIA_TYPE_AUTO_DETECT: adapter->sfp_detection_needed = TRUE; adapter->auto_speed = false; adapter->use_lm = true; break; case AL_ETH_BOARD_MEDIA_TYPE_AUTO_DETECT_AUTO_SPEED: adapter->sfp_detection_needed = TRUE; adapter->auto_speed = true; adapter->mac_mode_set = false; adapter->use_lm = true; adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial; break; default: device_printf(adapter->dev, "%s: unsupported media type %d\n", __func__, params.media_type); return (-1); } device_printf(adapter->dev, "Board info: phy exist %s. phy addr %d. mdio freq %u Khz. " "SFP connected %s. media %d\n", params.phy_exist == TRUE ? "Yes" : "No", params.phy_mdio_addr, adapter->mdio_freq, params.sfp_plus_module_exist == TRUE ? "Yes" : "No", params.media_type); } al_eth_mac_addr_read(adapter->ec_base, 0, adapter->mac_addr); return (0); } static int al_eth_function_reset(struct al_eth_adapter *adapter) { struct al_eth_board_params params; int rc; /* save board params so we restore it after reset */ al_eth_board_params_get(adapter->mac_base, ¶ms); al_eth_mac_addr_read(adapter->ec_base, 0, adapter->mac_addr); if (adapter->board_type == ALPINE_INTEGRATED) rc = al_eth_flr_rmn(&al_eth_read_pci_config, &al_eth_write_pci_config, adapter->dev, adapter->mac_base); else rc = al_eth_flr_rmn(&al_eth_fpga_read_pci_config, &al_eth_fpga_write_pci_config, adapter->internal_pcie_base, adapter->mac_base); /* restore params */ al_eth_board_params_set(adapter->mac_base, ¶ms); al_eth_mac_addr_store(adapter->ec_base, 0, adapter->mac_addr); return (rc); } static void al_eth_init_rings(struct al_eth_adapter *adapter) { int i; for (i = 0; i < adapter->num_tx_queues; i++) { struct al_eth_ring *ring = &adapter->tx_ring[i]; ring->ring_id = i; ring->dev = adapter->dev; ring->adapter = adapter; ring->netdev = adapter->netdev; al_udma_q_handle_get(&adapter->hal_adapter.tx_udma, i, &ring->dma_q); ring->sw_count = adapter->tx_ring_count; ring->hw_count = adapter->tx_descs_count; ring->unmask_reg_offset = al_udma_iofic_unmask_offset_get((struct unit_regs *)adapter->udma_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_C); ring->unmask_val = ~(1 << i); } for (i = 0; i < adapter->num_rx_queues; i++) { struct al_eth_ring *ring = &adapter->rx_ring[i]; ring->ring_id = i; ring->dev = adapter->dev; ring->adapter = adapter; ring->netdev = adapter->netdev; al_udma_q_handle_get(&adapter->hal_adapter.rx_udma, i, &ring->dma_q); ring->sw_count = adapter->rx_ring_count; ring->hw_count = adapter->rx_descs_count; ring->unmask_reg_offset = al_udma_iofic_unmask_offset_get( (struct unit_regs *)adapter->udma_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_B); ring->unmask_val = ~(1 << i); } } static void al_init_locked(void *arg) { struct al_eth_adapter *adapter = arg; if_t ifp = adapter->netdev; int rc = 0; al_eth_down(adapter); rc = al_eth_up(adapter); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if (rc == 0) ifp->if_drv_flags |= IFF_DRV_RUNNING; } static void al_init(void *arg) { struct al_eth_adapter *adapter = arg; al_init_locked(adapter); } static inline int al_eth_alloc_rx_buf(struct al_eth_adapter *adapter, struct al_eth_ring *rx_ring, struct al_eth_rx_buffer *rx_info) { struct al_buf *al_buf; bus_dma_segment_t segs[2]; int error; int nsegs; if (rx_info->m != NULL) return (0); rx_info->data_size = adapter->rx_mbuf_sz; AL_RX_LOCK(adapter); /* Get mbuf using UMA allocator */ rx_info->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_info->data_size); AL_RX_UNLOCK(adapter); if (rx_info->m == NULL) return (ENOMEM); rx_info->m->m_pkthdr.len = rx_info->m->m_len = adapter->rx_mbuf_sz; /* Map packets for DMA */ error = bus_dmamap_load_mbuf_sg(rx_ring->dma_buf_tag, rx_info->dma_map, rx_info->m, segs, &nsegs, BUS_DMA_NOWAIT); if (__predict_false(error)) { device_printf(rx_ring->dev, "failed to map mbuf, error = %d\n", error); m_freem(rx_info->m); rx_info->m = NULL; return (EFAULT); } al_buf = &rx_info->al_buf; al_buf->addr = segs[0].ds_addr + AL_IP_ALIGNMENT_OFFSET; al_buf->len = rx_info->data_size - AL_IP_ALIGNMENT_OFFSET; return (0); } static int al_eth_refill_rx_bufs(struct al_eth_adapter *adapter, unsigned int qid, unsigned int num) { struct al_eth_ring *rx_ring = &adapter->rx_ring[qid]; uint16_t next_to_use; unsigned int i; next_to_use = rx_ring->next_to_use; for (i = 0; i < num; i++) { int rc; struct al_eth_rx_buffer *rx_info = &rx_ring->rx_buffer_info[next_to_use]; if (__predict_false(al_eth_alloc_rx_buf(adapter, rx_ring, rx_info) < 0)) { device_printf(adapter->dev, "failed to alloc buffer for rx queue %d\n", qid); break; } rc = al_eth_rx_buffer_add(rx_ring->dma_q, &rx_info->al_buf, AL_ETH_RX_FLAGS_INT, NULL); if (__predict_false(rc)) { device_printf(adapter->dev, "failed to add buffer for rx queue %d\n", qid); break; } next_to_use = AL_ETH_RX_RING_IDX_NEXT(rx_ring, next_to_use); } if (__predict_false(i < num)) device_printf(adapter->dev, "refilled rx queue %d with %d pages only - available %d\n", qid, i, al_udma_available_get(rx_ring->dma_q)); if (__predict_true(i)) al_eth_rx_buffer_action(rx_ring->dma_q, i); rx_ring->next_to_use = next_to_use; return (i); } /* * al_eth_refill_all_rx_bufs - allocate all queues Rx buffers * @adapter: board private structure */ static void al_eth_refill_all_rx_bufs(struct al_eth_adapter *adapter) { int i; for (i = 0; i < adapter->num_rx_queues; i++) al_eth_refill_rx_bufs(adapter, i, AL_ETH_DEFAULT_RX_DESCS - 1); } static void al_eth_tx_do_cleanup(struct al_eth_ring *tx_ring) { unsigned int total_done; uint16_t next_to_clean; int qid = tx_ring->ring_id; total_done = al_eth_comp_tx_get(tx_ring->dma_q); device_printf_dbg(tx_ring->dev, "tx_poll: q %d total completed descs %x\n", qid, total_done); next_to_clean = tx_ring->next_to_clean; while (total_done != 0) { struct al_eth_tx_buffer *tx_info; struct mbuf *mbuf; tx_info = &tx_ring->tx_buffer_info[next_to_clean]; /* stop if not all descriptors of the packet are completed */ if (tx_info->tx_descs > total_done) break; mbuf = tx_info->m; tx_info->m = NULL; device_printf_dbg(tx_ring->dev, "tx_poll: q %d mbuf %p completed\n", qid, mbuf); /* map is no longer required */ bus_dmamap_unload(tx_ring->dma_buf_tag, tx_info->dma_map); m_freem(mbuf); total_done -= tx_info->tx_descs; next_to_clean = AL_ETH_TX_RING_IDX_NEXT(tx_ring, next_to_clean); } tx_ring->next_to_clean = next_to_clean; device_printf_dbg(tx_ring->dev, "tx_poll: q %d done next to clean %x\n", qid, next_to_clean); /* * need to make the rings circular update visible to * al_eth_start_xmit() before checking for netif_queue_stopped(). */ al_smp_data_memory_barrier(); } static void al_eth_tx_csum(struct al_eth_ring *tx_ring, struct al_eth_tx_buffer *tx_info, struct al_eth_pkt *hal_pkt, struct mbuf *m) { uint32_t mss = m->m_pkthdr.tso_segsz; struct ether_vlan_header *eh; uint16_t etype; #ifdef INET struct ip *ip; #endif #ifdef INET6 struct ip6_hdr *ip6; #endif struct tcphdr *th = NULL; int ehdrlen, ip_hlen = 0; uint8_t ipproto = 0; uint32_t offload = 0; if (mss != 0) offload = 1; if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) offload = 1; if ((m->m_pkthdr.csum_flags & CSUM_OFFLOAD) != 0) offload = 1; if (offload != 0) { struct al_eth_meta_data *meta = &tx_ring->hal_meta; if (mss != 0) hal_pkt->flags |= (AL_ETH_TX_FLAGS_TSO | AL_ETH_TX_FLAGS_L4_CSUM); else hal_pkt->flags |= (AL_ETH_TX_FLAGS_L4_CSUM | AL_ETH_TX_FLAGS_L4_PARTIAL_CSUM); /* * Determine where frame payload starts. * Jump over vlan headers if already present, * helpful for QinQ too. */ eh = mtod(m, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { etype = ntohs(eh->evl_proto); ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; } else { etype = ntohs(eh->evl_encap_proto); ehdrlen = ETHER_HDR_LEN; } switch (etype) { #ifdef INET case ETHERTYPE_IP: ip = (struct ip *)(m->m_data + ehdrlen); ip_hlen = ip->ip_hl << 2; ipproto = ip->ip_p; hal_pkt->l3_proto_idx = AL_ETH_PROTO_ID_IPv4; th = (struct tcphdr *)((caddr_t)ip + ip_hlen); if (mss != 0) hal_pkt->flags |= AL_ETH_TX_FLAGS_IPV4_L3_CSUM; if (ipproto == IPPROTO_TCP) hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_TCP; else hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_UDP; break; #endif /* INET */ #ifdef INET6 case ETHERTYPE_IPV6: ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen); hal_pkt->l3_proto_idx = AL_ETH_PROTO_ID_IPv6; ip_hlen = sizeof(struct ip6_hdr); th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen); ipproto = ip6->ip6_nxt; if (ipproto == IPPROTO_TCP) hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_TCP; else hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_UDP; break; #endif /* INET6 */ default: break; } meta->words_valid = 4; meta->l3_header_len = ip_hlen; meta->l3_header_offset = ehdrlen; if (th != NULL) meta->l4_header_len = th->th_off; /* this param needed only for TSO */ meta->mss_idx_sel = 0; /* check how to select MSS */ meta->mss_val = mss; hal_pkt->meta = meta; } else hal_pkt->meta = NULL; } #define XMIT_QUEUE_TIMEOUT 100 static void al_eth_xmit_mbuf(struct al_eth_ring *tx_ring, struct mbuf *m) { struct al_eth_tx_buffer *tx_info; int error; int nsegs, a; uint16_t next_to_use; bus_dma_segment_t segs[AL_ETH_PKT_MAX_BUFS + 1]; struct al_eth_pkt *hal_pkt; struct al_buf *al_buf; boolean_t remap; /* Check if queue is ready */ if (unlikely(tx_ring->stall) != 0) { for (a = 0; a < XMIT_QUEUE_TIMEOUT; a++) { if (al_udma_available_get(tx_ring->dma_q) >= (AL_ETH_DEFAULT_TX_HW_DESCS - AL_ETH_TX_WAKEUP_THRESH)) { tx_ring->stall = 0; break; } pause("stall", 1); } if (a == XMIT_QUEUE_TIMEOUT) { device_printf(tx_ring->dev, "timeout waiting for queue %d ready!\n", tx_ring->ring_id); return; } else { device_printf_dbg(tx_ring->dev, "queue %d is ready!\n", tx_ring->ring_id); } } next_to_use = tx_ring->next_to_use; tx_info = &tx_ring->tx_buffer_info[next_to_use]; tx_info->m = m; hal_pkt = &tx_info->hal_pkt; if (m == NULL) { device_printf(tx_ring->dev, "mbuf is NULL\n"); return; } remap = TRUE; /* Map packets for DMA */ retry: error = bus_dmamap_load_mbuf_sg(tx_ring->dma_buf_tag, tx_info->dma_map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (__predict_false(error)) { struct mbuf *m_new; if (error == EFBIG) { /* Try it again? - one try */ if (remap == TRUE) { remap = FALSE; m_new = m_defrag(m, M_NOWAIT); if (m_new == NULL) { device_printf(tx_ring->dev, "failed to defrag mbuf\n"); goto exit; } m = m_new; goto retry; } else { device_printf(tx_ring->dev, "failed to map mbuf, error %d\n", error); goto exit; } } else { device_printf(tx_ring->dev, "failed to map mbuf, error %d\n", error); goto exit; } } /* set flags and meta data */ hal_pkt->flags = AL_ETH_TX_FLAGS_INT; al_eth_tx_csum(tx_ring, tx_info, hal_pkt, m); al_buf = hal_pkt->bufs; for (a = 0; a < nsegs; a++) { al_buf->addr = segs[a].ds_addr; al_buf->len = segs[a].ds_len; al_buf++; } hal_pkt->num_of_bufs = nsegs; /* prepare the packet's descriptors to dma engine */ tx_info->tx_descs = al_eth_tx_pkt_prepare(tx_ring->dma_q, hal_pkt); if (tx_info->tx_descs == 0) goto exit; /* * stop the queue when no more space available, the packet can have up * to AL_ETH_PKT_MAX_BUFS + 1 buffers and a meta descriptor */ if (unlikely(al_udma_available_get(tx_ring->dma_q) < (AL_ETH_PKT_MAX_BUFS + 2))) { tx_ring->stall = 1; device_printf_dbg(tx_ring->dev, "stall, stopping queue %d...\n", tx_ring->ring_id); al_data_memory_barrier(); } tx_ring->next_to_use = AL_ETH_TX_RING_IDX_NEXT(tx_ring, next_to_use); /* trigger the dma engine */ al_eth_tx_dma_action(tx_ring->dma_q, tx_info->tx_descs); return; exit: m_freem(m); } static void al_eth_tx_cmpl_work(void *arg, int pending) { struct al_eth_ring *tx_ring = arg; if (napi != 0) { tx_ring->cmpl_is_running = 1; al_data_memory_barrier(); } al_eth_tx_do_cleanup(tx_ring); if (napi != 0) { tx_ring->cmpl_is_running = 0; al_data_memory_barrier(); } /* all work done, enable IRQs */ al_eth_irq_config(tx_ring->unmask_reg_offset, tx_ring->unmask_val); } static int al_eth_tx_cmlp_irq_filter(void *arg) { struct al_eth_ring *tx_ring = arg; /* Interrupt should be auto-masked upon arrival */ device_printf_dbg(tx_ring->dev, "%s for ring ID = %d\n", __func__, tx_ring->ring_id); /* * For napi, if work is not running, schedule it. Always schedule * for casual (non-napi) packet handling. */ if ((napi == 0) || (napi && tx_ring->cmpl_is_running == 0)) taskqueue_enqueue(tx_ring->cmpl_tq, &tx_ring->cmpl_task); /* Do not run bottom half */ return (FILTER_HANDLED); } static int al_eth_rx_recv_irq_filter(void *arg) { struct al_eth_ring *rx_ring = arg; /* Interrupt should be auto-masked upon arrival */ device_printf_dbg(rx_ring->dev, "%s for ring ID = %d\n", __func__, rx_ring->ring_id); /* * For napi, if work is not running, schedule it. Always schedule * for casual (non-napi) packet handling. */ if ((napi == 0) || (napi && rx_ring->enqueue_is_running == 0)) taskqueue_enqueue(rx_ring->enqueue_tq, &rx_ring->enqueue_task); /* Do not run bottom half */ return (FILTER_HANDLED); } /* * al_eth_rx_checksum - indicate in mbuf if hw indicated a good cksum * @adapter: structure containing adapter specific data * @hal_pkt: HAL structure for the packet * @mbuf: mbuf currently being received and modified */ static inline void al_eth_rx_checksum(struct al_eth_adapter *adapter, struct al_eth_pkt *hal_pkt, struct mbuf *mbuf) { /* if IPv4 and error */ if (unlikely((adapter->netdev->if_capenable & IFCAP_RXCSUM) && (hal_pkt->l3_proto_idx == AL_ETH_PROTO_ID_IPv4) && (hal_pkt->flags & AL_ETH_RX_FLAGS_L3_CSUM_ERR))) { device_printf(adapter->dev,"rx ipv4 header checksum error\n"); return; } /* if IPv6 and error */ if (unlikely((adapter->netdev->if_capenable & IFCAP_RXCSUM_IPV6) && (hal_pkt->l3_proto_idx == AL_ETH_PROTO_ID_IPv6) && (hal_pkt->flags & AL_ETH_RX_FLAGS_L3_CSUM_ERR))) { device_printf(adapter->dev,"rx ipv6 header checksum error\n"); return; } /* if TCP/UDP */ if (likely((hal_pkt->l4_proto_idx == AL_ETH_PROTO_ID_TCP) || (hal_pkt->l4_proto_idx == AL_ETH_PROTO_ID_UDP))) { if (unlikely(hal_pkt->flags & AL_ETH_RX_FLAGS_L4_CSUM_ERR)) { device_printf_dbg(adapter->dev, "rx L4 checksum error\n"); /* TCP/UDP checksum error */ mbuf->m_pkthdr.csum_flags = 0; } else { device_printf_dbg(adapter->dev, "rx checksum correct\n"); /* IP Checksum Good */ mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED; mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID; } } } static struct mbuf* al_eth_rx_mbuf(struct al_eth_adapter *adapter, struct al_eth_ring *rx_ring, struct al_eth_pkt *hal_pkt, unsigned int descs, uint16_t *next_to_clean) { struct mbuf *mbuf; struct al_eth_rx_buffer *rx_info = &rx_ring->rx_buffer_info[*next_to_clean]; unsigned int len; len = hal_pkt->bufs[0].len; device_printf_dbg(adapter->dev, "rx_info %p data %p\n", rx_info, rx_info->m); if (rx_info->m == NULL) { *next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring, *next_to_clean); return (NULL); } mbuf = rx_info->m; mbuf->m_pkthdr.len = len; mbuf->m_len = len; mbuf->m_pkthdr.rcvif = rx_ring->netdev; mbuf->m_flags |= M_PKTHDR; if (len <= adapter->small_copy_len) { struct mbuf *smbuf; device_printf_dbg(adapter->dev, "rx small packet. len %d\n", len); AL_RX_LOCK(adapter); smbuf = m_gethdr(M_NOWAIT, MT_DATA); AL_RX_UNLOCK(adapter); if (__predict_false(smbuf == NULL)) { device_printf(adapter->dev, "smbuf is NULL\n"); return (NULL); } smbuf->m_data = smbuf->m_data + AL_IP_ALIGNMENT_OFFSET; memcpy(smbuf->m_data, mbuf->m_data + AL_IP_ALIGNMENT_OFFSET, len); smbuf->m_len = len; smbuf->m_pkthdr.rcvif = rx_ring->netdev; /* first desc of a non-ps chain */ smbuf->m_flags |= M_PKTHDR; smbuf->m_pkthdr.len = smbuf->m_len; *next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring, *next_to_clean); return (smbuf); } mbuf->m_data = mbuf->m_data + AL_IP_ALIGNMENT_OFFSET; /* Unmap the buffer */ bus_dmamap_unload(rx_ring->dma_buf_tag, rx_info->dma_map); rx_info->m = NULL; *next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring, *next_to_clean); return (mbuf); } static void al_eth_rx_recv_work(void *arg, int pending) { struct al_eth_ring *rx_ring = arg; struct mbuf *mbuf; struct lro_entry *queued; unsigned int qid = rx_ring->ring_id; struct al_eth_pkt *hal_pkt = &rx_ring->hal_pkt; uint16_t next_to_clean = rx_ring->next_to_clean; uint32_t refill_required; uint32_t refill_actual; uint32_t do_if_input; if (napi != 0) { rx_ring->enqueue_is_running = 1; al_data_memory_barrier(); } do { unsigned int descs; descs = al_eth_pkt_rx(rx_ring->dma_q, hal_pkt); if (unlikely(descs == 0)) break; device_printf_dbg(rx_ring->dev, "rx_poll: q %d got packet " "from hal. descs %d\n", qid, descs); device_printf_dbg(rx_ring->dev, "rx_poll: q %d flags %x. " "l3 proto %d l4 proto %d\n", qid, hal_pkt->flags, hal_pkt->l3_proto_idx, hal_pkt->l4_proto_idx); /* ignore if detected dma or eth controller errors */ if ((hal_pkt->flags & (AL_ETH_RX_ERROR | AL_UDMA_CDESC_ERROR)) != 0) { device_printf(rx_ring->dev, "receive packet with error. " "flags = 0x%x\n", hal_pkt->flags); next_to_clean = AL_ETH_RX_RING_IDX_ADD(rx_ring, next_to_clean, descs); continue; } /* allocate mbuf and fill it */ mbuf = al_eth_rx_mbuf(rx_ring->adapter, rx_ring, hal_pkt, descs, &next_to_clean); /* exit if we failed to retrieve a buffer */ if (unlikely(mbuf == NULL)) { next_to_clean = AL_ETH_RX_RING_IDX_ADD(rx_ring, next_to_clean, descs); break; } if (__predict_true(rx_ring->netdev->if_capenable & IFCAP_RXCSUM || rx_ring->netdev->if_capenable & IFCAP_RXCSUM_IPV6)) { al_eth_rx_checksum(rx_ring->adapter, hal_pkt, mbuf); } #if __FreeBSD_version >= 800000 mbuf->m_pkthdr.flowid = qid; M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE); #endif /* * LRO is only for IP/TCP packets and TCP checksum of the packet * should be computed by hardware. */ do_if_input = 1; if ((rx_ring->lro_enabled != 0) && ((mbuf->m_pkthdr.csum_flags & CSUM_IP_VALID) != 0) && hal_pkt->l4_proto_idx == AL_ETH_PROTO_ID_TCP) { /* * Send to the stack if: * - LRO not enabled, or * - no LRO resources, or * - lro enqueue fails */ if (rx_ring->lro.lro_cnt != 0) { if (tcp_lro_rx(&rx_ring->lro, mbuf, 0) == 0) do_if_input = 0; } } if (do_if_input) (*rx_ring->netdev->if_input)(rx_ring->netdev, mbuf); } while (1); rx_ring->next_to_clean = next_to_clean; refill_required = al_udma_available_get(rx_ring->dma_q); refill_actual = al_eth_refill_rx_bufs(rx_ring->adapter, qid, refill_required); if (unlikely(refill_actual < refill_required)) { device_printf_dbg(rx_ring->dev, "%s: not filling rx queue %d\n", __func__, qid); } while (((queued = LIST_FIRST(&rx_ring->lro.lro_active)) != NULL)) { LIST_REMOVE(queued, next); tcp_lro_flush(&rx_ring->lro, queued); } if (napi != 0) { rx_ring->enqueue_is_running = 0; al_data_memory_barrier(); } /* unmask irq */ al_eth_irq_config(rx_ring->unmask_reg_offset, rx_ring->unmask_val); } static void al_eth_start_xmit(void *arg, int pending) { struct al_eth_ring *tx_ring = arg; struct mbuf *mbuf; if (napi != 0) { tx_ring->enqueue_is_running = 1; al_data_memory_barrier(); } while (1) { mtx_lock(&tx_ring->br_mtx); mbuf = drbr_dequeue(NULL, tx_ring->br); mtx_unlock(&tx_ring->br_mtx); if (mbuf == NULL) break; al_eth_xmit_mbuf(tx_ring, mbuf); } if (napi != 0) { tx_ring->enqueue_is_running = 0; al_data_memory_barrier(); while (1) { mtx_lock(&tx_ring->br_mtx); mbuf = drbr_dequeue(NULL, tx_ring->br); mtx_unlock(&tx_ring->br_mtx); if (mbuf == NULL) break; al_eth_xmit_mbuf(tx_ring, mbuf); } } } static int al_mq_start(struct ifnet *ifp, struct mbuf *m) { struct al_eth_adapter *adapter = ifp->if_softc; struct al_eth_ring *tx_ring; int i; int ret; /* Which queue to use */ if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) i = m->m_pkthdr.flowid % adapter->num_tx_queues; else i = curcpu % adapter->num_tx_queues; if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) { return (EFAULT); } tx_ring = &adapter->tx_ring[i]; device_printf_dbg(adapter->dev, "dgb start() - assuming link is active, " "sending packet to queue %d\n", i); ret = drbr_enqueue(ifp, tx_ring->br, m); /* * For napi, if work is not running, schedule it. Always schedule * for casual (non-napi) packet handling. */ if ((napi == 0) || ((napi != 0) && (tx_ring->enqueue_is_running == 0))) taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task); return (ret); } static void al_qflush(struct ifnet * ifp) { /* unused */ } static inline void al_eth_flow_ctrl_init(struct al_eth_adapter *adapter) { uint8_t default_flow_ctrl; default_flow_ctrl = AL_ETH_FLOW_CTRL_TX_PAUSE; default_flow_ctrl |= AL_ETH_FLOW_CTRL_RX_PAUSE; adapter->link_config.flow_ctrl_supported = default_flow_ctrl; } static int al_eth_flow_ctrl_config(struct al_eth_adapter *adapter) { struct al_eth_flow_control_params *flow_ctrl_params; uint8_t active = adapter->link_config.flow_ctrl_active; int i; flow_ctrl_params = &adapter->flow_ctrl_params; flow_ctrl_params->type = AL_ETH_FLOW_CONTROL_TYPE_LINK_PAUSE; flow_ctrl_params->obay_enable = ((active & AL_ETH_FLOW_CTRL_RX_PAUSE) != 0); flow_ctrl_params->gen_enable = ((active & AL_ETH_FLOW_CTRL_TX_PAUSE) != 0); flow_ctrl_params->rx_fifo_th_high = AL_ETH_FLOW_CTRL_RX_FIFO_TH_HIGH; flow_ctrl_params->rx_fifo_th_low = AL_ETH_FLOW_CTRL_RX_FIFO_TH_LOW; flow_ctrl_params->quanta = AL_ETH_FLOW_CTRL_QUANTA; flow_ctrl_params->quanta_th = AL_ETH_FLOW_CTRL_QUANTA_TH; /* map priority to queue index, queue id = priority/2 */ for (i = 0; i < AL_ETH_FWD_PRIO_TABLE_NUM; i++) flow_ctrl_params->prio_q_map[0][i] = 1 << (i >> 1); al_eth_flow_control_config(&adapter->hal_adapter, flow_ctrl_params); return (0); } static void al_eth_flow_ctrl_enable(struct al_eth_adapter *adapter) { /* * change the active configuration to the default / force by ethtool * and call to configure */ adapter->link_config.flow_ctrl_active = adapter->link_config.flow_ctrl_supported; al_eth_flow_ctrl_config(adapter); } static void al_eth_flow_ctrl_disable(struct al_eth_adapter *adapter) { adapter->link_config.flow_ctrl_active = 0; al_eth_flow_ctrl_config(adapter); } static int al_eth_hw_init(struct al_eth_adapter *adapter) { int rc; rc = al_eth_hw_init_adapter(adapter); if (rc != 0) return (rc); rc = al_eth_mac_config(&adapter->hal_adapter, adapter->mac_mode); if (rc < 0) { device_printf(adapter->dev, "%s failed to configure mac!\n", __func__); return (rc); } if ((adapter->mac_mode == AL_ETH_MAC_MODE_SGMII) || (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII && adapter->phy_exist == FALSE)) { rc = al_eth_mac_link_config(&adapter->hal_adapter, adapter->link_config.force_1000_base_x, adapter->link_config.autoneg, adapter->link_config.active_speed, adapter->link_config.active_duplex); if (rc != 0) { device_printf(adapter->dev, "%s failed to configure link parameters!\n", __func__); return (rc); } } rc = al_eth_mdio_config(&adapter->hal_adapter, AL_ETH_MDIO_TYPE_CLAUSE_22, TRUE /* shared_mdio_if */, adapter->ref_clk_freq, adapter->mdio_freq); if (rc != 0) { device_printf(adapter->dev, "%s failed at mdio config!\n", __func__); return (rc); } al_eth_flow_ctrl_init(adapter); return (rc); } static int al_eth_hw_stop(struct al_eth_adapter *adapter) { al_eth_mac_stop(&adapter->hal_adapter); /* * wait till pending rx packets written and UDMA becomes idle, * the MAC has ~10KB fifo, 10us should be enought time for the * UDMA to write to the memory */ DELAY(10); al_eth_adapter_stop(&adapter->hal_adapter); adapter->flags |= AL_ETH_FLAG_RESET_REQUESTED; /* disable flow ctrl to avoid pause packets*/ al_eth_flow_ctrl_disable(adapter); return (0); } /* * al_eth_intr_intx_all - Legacy Interrupt Handler for all interrupts * @irq: interrupt number * @data: pointer to a network interface device structure */ static int al_eth_intr_intx_all(void *data) { struct al_eth_adapter *adapter = data; struct unit_regs __iomem *regs_base = (struct unit_regs __iomem *)adapter->udma_base; uint32_t reg; reg = al_udma_iofic_read_cause(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_A); if (likely(reg)) device_printf_dbg(adapter->dev, "%s group A cause %x\n", __func__, reg); if (unlikely(reg & AL_INT_GROUP_A_GROUP_D_SUM)) { struct al_iofic_grp_ctrl __iomem *sec_ints_base; uint32_t cause_d = al_udma_iofic_read_cause(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_D); sec_ints_base = ®s_base->gen.interrupt_regs.secondary_iofic_ctrl[0]; if (cause_d != 0) { device_printf_dbg(adapter->dev, "got interrupt from group D. cause %x\n", cause_d); cause_d = al_iofic_read_cause(sec_ints_base, AL_INT_GROUP_A); device_printf(adapter->dev, "secondary A cause %x\n", cause_d); cause_d = al_iofic_read_cause(sec_ints_base, AL_INT_GROUP_B); device_printf_dbg(adapter->dev, "secondary B cause %x\n", cause_d); } } if ((reg & AL_INT_GROUP_A_GROUP_B_SUM) != 0 ) { uint32_t cause_b = al_udma_iofic_read_cause(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_B); int qid; device_printf_dbg(adapter->dev, "secondary B cause %x\n", cause_b); for (qid = 0; qid < adapter->num_rx_queues; qid++) { if (cause_b & (1 << qid)) { /* mask */ al_udma_iofic_mask( (struct unit_regs __iomem *)adapter->udma_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_B, 1 << qid); } } } if ((reg & AL_INT_GROUP_A_GROUP_C_SUM) != 0) { uint32_t cause_c = al_udma_iofic_read_cause(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_C); int qid; device_printf_dbg(adapter->dev, "secondary C cause %x\n", cause_c); for (qid = 0; qid < adapter->num_tx_queues; qid++) { if ((cause_c & (1 << qid)) != 0) { al_udma_iofic_mask( (struct unit_regs __iomem *)adapter->udma_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_C, 1 << qid); } } } al_eth_tx_cmlp_irq_filter(adapter->tx_ring); return (0); } static int al_eth_intr_msix_all(void *data) { struct al_eth_adapter *adapter = data; device_printf_dbg(adapter->dev, "%s\n", __func__); return (0); } static int al_eth_intr_msix_mgmt(void *data) { struct al_eth_adapter *adapter = data; device_printf_dbg(adapter->dev, "%s\n", __func__); return (0); } static int al_eth_enable_msix(struct al_eth_adapter *adapter) { int i, msix_vecs, rc, count; device_printf_dbg(adapter->dev, "%s\n", __func__); msix_vecs = 1 + adapter->num_rx_queues + adapter->num_tx_queues; device_printf_dbg(adapter->dev, "Try to enable MSIX, vector numbers = %d\n", msix_vecs); adapter->msix_entries = malloc(msix_vecs*sizeof(*adapter->msix_entries), M_IFAL, M_ZERO | M_WAITOK); if (adapter->msix_entries == NULL) { device_printf_dbg(adapter->dev, "failed to allocate" " msix_entries %d\n", msix_vecs); rc = ENOMEM; goto exit; } /* management vector (GROUP_A) @2*/ adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].entry = 2; adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector = 0; /* rx queues start @3 */ for (i = 0; i < adapter->num_rx_queues; i++) { int irq_idx = AL_ETH_RXQ_IRQ_IDX(adapter, i); adapter->msix_entries[irq_idx].entry = 3 + i; adapter->msix_entries[irq_idx].vector = 0; } /* tx queues start @7 */ for (i = 0; i < adapter->num_tx_queues; i++) { int irq_idx = AL_ETH_TXQ_IRQ_IDX(adapter, i); adapter->msix_entries[irq_idx].entry = 3 + AL_ETH_MAX_HW_QUEUES + i; adapter->msix_entries[irq_idx].vector = 0; } count = msix_vecs + 2; /* entries start from 2 */ rc = pci_alloc_msix(adapter->dev, &count); if (rc != 0) { device_printf_dbg(adapter->dev, "failed to allocate MSIX " "vectors %d\n", msix_vecs+2); device_printf_dbg(adapter->dev, "ret = %d\n", rc); goto msix_entries_exit; } if (count != msix_vecs + 2) { device_printf_dbg(adapter->dev, "failed to allocate all MSIX " "vectors %d, allocated %d\n", msix_vecs+2, count); rc = ENOSPC; goto msix_entries_exit; } for (i = 0; i < msix_vecs; i++) adapter->msix_entries[i].vector = 2 + 1 + i; device_printf_dbg(adapter->dev, "successfully enabled MSIX," " vectors %d\n", msix_vecs); adapter->msix_vecs = msix_vecs; adapter->flags |= AL_ETH_FLAG_MSIX_ENABLED; goto exit; msix_entries_exit: adapter->msix_vecs = 0; free(adapter->msix_entries, M_IFAL); adapter->msix_entries = NULL; exit: return (rc); } static int al_eth_setup_int_mode(struct al_eth_adapter *adapter) { int i, rc; rc = al_eth_enable_msix(adapter); if (rc != 0) { device_printf(adapter->dev, "Failed to enable MSIX mode.\n"); return (rc); } adapter->irq_vecs = max(1, adapter->msix_vecs); /* single INTX mode */ if (adapter->msix_vecs == 0) { snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name, AL_ETH_IRQNAME_SIZE, "al-eth-intx-all@pci:%s", device_get_name(adapter->dev)); adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler = al_eth_intr_intx_all; /* IRQ vector will be resolved from device resources */ adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector = 0; adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter; device_printf(adapter->dev, "%s and vector %d \n", __func__, adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector); return (0); } /* single MSI-X mode */ if (adapter->msix_vecs == 1) { snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name, AL_ETH_IRQNAME_SIZE, "al-eth-msix-all@pci:%s", device_get_name(adapter->dev)); adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler = al_eth_intr_msix_all; adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector = adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector; adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter; return (0); } /* MSI-X per queue */ snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name, AL_ETH_IRQNAME_SIZE, "al-eth-msix-mgmt@pci:%s", device_get_name(adapter->dev)); adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler = al_eth_intr_msix_mgmt; adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter; adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector = adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector; for (i = 0; i < adapter->num_rx_queues; i++) { int irq_idx = AL_ETH_RXQ_IRQ_IDX(adapter, i); snprintf(adapter->irq_tbl[irq_idx].name, AL_ETH_IRQNAME_SIZE, "al-eth-rx-comp-%d@pci:%s", i, device_get_name(adapter->dev)); adapter->irq_tbl[irq_idx].handler = al_eth_rx_recv_irq_filter; adapter->irq_tbl[irq_idx].data = &adapter->rx_ring[i]; adapter->irq_tbl[irq_idx].vector = adapter->msix_entries[irq_idx].vector; } for (i = 0; i < adapter->num_tx_queues; i++) { int irq_idx = AL_ETH_TXQ_IRQ_IDX(adapter, i); snprintf(adapter->irq_tbl[irq_idx].name, AL_ETH_IRQNAME_SIZE, "al-eth-tx-comp-%d@pci:%s", i, device_get_name(adapter->dev)); adapter->irq_tbl[irq_idx].handler = al_eth_tx_cmlp_irq_filter; adapter->irq_tbl[irq_idx].data = &adapter->tx_ring[i]; adapter->irq_tbl[irq_idx].vector = adapter->msix_entries[irq_idx].vector; } return (0); } static void __al_eth_free_irq(struct al_eth_adapter *adapter) { struct al_eth_irq *irq; int i, rc; for (i = 0; i < adapter->irq_vecs; i++) { irq = &adapter->irq_tbl[i]; if (irq->requested != 0) { device_printf_dbg(adapter->dev, "tear down irq: %d\n", irq->vector); rc = bus_teardown_intr(adapter->dev, irq->res, irq->cookie); if (rc != 0) device_printf(adapter->dev, "failed to tear " "down irq: %d\n", irq->vector); } irq->requested = 0; } } static void al_eth_free_irq(struct al_eth_adapter *adapter) { struct al_eth_irq *irq; int i, rc; #ifdef CONFIG_RFS_ACCEL if (adapter->msix_vecs >= 1) { free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap); adapter->netdev->rx_cpu_rmap = NULL; } #endif __al_eth_free_irq(adapter); for (i = 0; i < adapter->irq_vecs; i++) { irq = &adapter->irq_tbl[i]; if (irq->res == NULL) continue; device_printf_dbg(adapter->dev, "release resource irq: %d\n", irq->vector); rc = bus_release_resource(adapter->dev, SYS_RES_IRQ, irq->vector, irq->res); irq->res = NULL; if (rc != 0) device_printf(adapter->dev, "dev has no parent while " "releasing res for irq: %d\n", irq->vector); } pci_release_msi(adapter->dev); adapter->flags &= ~AL_ETH_FLAG_MSIX_ENABLED; adapter->msix_vecs = 0; free(adapter->msix_entries, M_IFAL); adapter->msix_entries = NULL; } static int al_eth_request_irq(struct al_eth_adapter *adapter) { unsigned long flags; struct al_eth_irq *irq; int rc = 0, i, v; if ((adapter->flags & AL_ETH_FLAG_MSIX_ENABLED) != 0) flags = RF_ACTIVE; else flags = RF_ACTIVE | RF_SHAREABLE; for (i = 0; i < adapter->irq_vecs; i++) { irq = &adapter->irq_tbl[i]; if (irq->requested != 0) continue; irq->res = bus_alloc_resource_any(adapter->dev, SYS_RES_IRQ, &irq->vector, flags); if (irq->res == NULL) { device_printf(adapter->dev, "could not allocate " "irq vector=%d\n", irq->vector); rc = ENXIO; goto exit_res; } if ((rc = bus_setup_intr(adapter->dev, irq->res, INTR_TYPE_NET | INTR_MPSAFE, irq->handler, NULL, irq->data, &irq->cookie)) != 0) { device_printf(adapter->dev, "failed to register " "interrupt handler for irq %ju: %d\n", (uintmax_t)rman_get_start(irq->res), rc); goto exit_intr; } irq->requested = 1; } goto exit; exit_intr: v = i - 1; /* -1 because we omit the operation that failed */ while (v-- >= 0) { int bti; irq = &adapter->irq_tbl[v]; bti = bus_teardown_intr(adapter->dev, irq->res, irq->cookie); if (bti != 0) { device_printf(adapter->dev, "failed to tear " "down irq: %d\n", irq->vector); } irq->requested = 0; device_printf_dbg(adapter->dev, "exit_intr: releasing irq %d\n", irq->vector); } exit_res: v = i - 1; /* -1 because we omit the operation that failed */ while (v-- >= 0) { int brr; irq = &adapter->irq_tbl[v]; device_printf_dbg(adapter->dev, "exit_res: releasing resource" " for irq %d\n", irq->vector); brr = bus_release_resource(adapter->dev, SYS_RES_IRQ, irq->vector, irq->res); if (brr != 0) device_printf(adapter->dev, "dev has no parent while " "releasing res for irq: %d\n", irq->vector); irq->res = NULL; } exit: return (rc); } /** * al_eth_setup_tx_resources - allocate Tx resources (Descriptors) * @adapter: network interface device structure * @qid: queue index * * Return 0 on success, negative on failure **/ static int al_eth_setup_tx_resources(struct al_eth_adapter *adapter, int qid) { struct al_eth_ring *tx_ring = &adapter->tx_ring[qid]; struct device *dev = tx_ring->dev; struct al_udma_q_params *q_params = &tx_ring->q_params; int size; int ret; if (adapter->up) return (0); size = sizeof(struct al_eth_tx_buffer) * tx_ring->sw_count; tx_ring->tx_buffer_info = malloc(size, M_IFAL, M_ZERO | M_WAITOK); if (tx_ring->tx_buffer_info == NULL) return (ENOMEM); tx_ring->descs_size = tx_ring->hw_count * sizeof(union al_udma_desc); q_params->size = tx_ring->hw_count; ret = al_dma_alloc_coherent(dev, &q_params->desc_phy_base_tag, (bus_dmamap_t *)&q_params->desc_phy_base_map, (bus_addr_t *)&q_params->desc_phy_base, (void**)&q_params->desc_base, tx_ring->descs_size); if (ret != 0) { device_printf(dev, "failed to al_dma_alloc_coherent," " ret = %d\n", ret); return (ENOMEM); } if (q_params->desc_base == NULL) return (ENOMEM); device_printf_dbg(dev, "Initializing ring queues %d\n", qid); /* Allocate Ring Queue */ mtx_init(&tx_ring->br_mtx, "AlRingMtx", NULL, MTX_DEF); tx_ring->br = buf_ring_alloc(AL_BR_SIZE, M_DEVBUF, M_WAITOK, &tx_ring->br_mtx); if (tx_ring->br == NULL) { device_printf(dev, "Critical Failure setting up buf ring\n"); return (ENOMEM); } /* Allocate taskqueues */ TASK_INIT(&tx_ring->enqueue_task, 0, al_eth_start_xmit, tx_ring); tx_ring->enqueue_tq = taskqueue_create_fast("al_tx_enque", M_NOWAIT, taskqueue_thread_enqueue, &tx_ring->enqueue_tq); taskqueue_start_threads(&tx_ring->enqueue_tq, 1, PI_NET, "%s txeq", device_get_nameunit(adapter->dev)); TASK_INIT(&tx_ring->cmpl_task, 0, al_eth_tx_cmpl_work, tx_ring); tx_ring->cmpl_tq = taskqueue_create_fast("al_tx_cmpl", M_NOWAIT, taskqueue_thread_enqueue, &tx_ring->cmpl_tq); taskqueue_start_threads(&tx_ring->cmpl_tq, 1, PI_REALTIME, "%s txcq", device_get_nameunit(adapter->dev)); /* Setup DMA descriptor areas. */ ret = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ AL_TSO_SIZE, /* maxsize */ AL_ETH_PKT_MAX_BUFS, /* nsegments */ PAGE_SIZE, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &tx_ring->dma_buf_tag); if (ret != 0) { device_printf(dev,"Unable to allocate dma_buf_tag, ret = %d\n", ret); return (ret); } for (size = 0; size < tx_ring->sw_count; size++) { ret = bus_dmamap_create(tx_ring->dma_buf_tag, 0, &tx_ring->tx_buffer_info[size].dma_map); if (ret != 0) { device_printf(dev, "Unable to map DMA TX " "buffer memory [iter=%d]\n", size); return (ret); } } /* completion queue not used for tx */ q_params->cdesc_base = NULL; /* size in bytes of the udma completion ring descriptor */ q_params->cdesc_size = 8; tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; return (0); } /* * al_eth_free_tx_resources - Free Tx Resources per Queue * @adapter: network interface device structure * @qid: queue index * * Free all transmit software resources */ static void al_eth_free_tx_resources(struct al_eth_adapter *adapter, int qid) { struct al_eth_ring *tx_ring = &adapter->tx_ring[qid]; struct al_udma_q_params *q_params = &tx_ring->q_params; int size; /* At this point interrupts' handlers must be deactivated */ while (taskqueue_cancel(tx_ring->cmpl_tq, &tx_ring->cmpl_task, NULL)) taskqueue_drain(tx_ring->cmpl_tq, &tx_ring->cmpl_task); taskqueue_free(tx_ring->cmpl_tq); while (taskqueue_cancel(tx_ring->enqueue_tq, &tx_ring->enqueue_task, NULL)) { taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task); } taskqueue_free(tx_ring->enqueue_tq); if (tx_ring->br != NULL) { drbr_flush(adapter->netdev, tx_ring->br); buf_ring_free(tx_ring->br, M_DEVBUF); } for (size = 0; size < tx_ring->sw_count; size++) { m_freem(tx_ring->tx_buffer_info[size].m); tx_ring->tx_buffer_info[size].m = NULL; bus_dmamap_unload(tx_ring->dma_buf_tag, tx_ring->tx_buffer_info[size].dma_map); bus_dmamap_destroy(tx_ring->dma_buf_tag, tx_ring->tx_buffer_info[size].dma_map); } bus_dma_tag_destroy(tx_ring->dma_buf_tag); free(tx_ring->tx_buffer_info, M_IFAL); tx_ring->tx_buffer_info = NULL; mtx_destroy(&tx_ring->br_mtx); /* if not set, then don't free */ if (q_params->desc_base == NULL) return; al_dma_free_coherent(q_params->desc_phy_base_tag, q_params->desc_phy_base_map, q_params->desc_base); q_params->desc_base = NULL; } /* * al_eth_free_all_tx_resources - Free Tx Resources for All Queues * @adapter: board private structure * * Free all transmit software resources */ static void al_eth_free_all_tx_resources(struct al_eth_adapter *adapter) { int i; for (i = 0; i < adapter->num_tx_queues; i++) if (adapter->tx_ring[i].q_params.desc_base) al_eth_free_tx_resources(adapter, i); } /* * al_eth_setup_rx_resources - allocate Rx resources (Descriptors) * @adapter: network interface device structure * @qid: queue index * * Returns 0 on success, negative on failure */ static int al_eth_setup_rx_resources(struct al_eth_adapter *adapter, unsigned int qid) { struct al_eth_ring *rx_ring = &adapter->rx_ring[qid]; struct device *dev = rx_ring->dev; struct al_udma_q_params *q_params = &rx_ring->q_params; int size; int ret; size = sizeof(struct al_eth_rx_buffer) * rx_ring->sw_count; /* alloc extra element so in rx path we can always prefetch rx_info + 1 */ size += 1; rx_ring->rx_buffer_info = malloc(size, M_IFAL, M_ZERO | M_WAITOK); if (rx_ring->rx_buffer_info == NULL) return (ENOMEM); rx_ring->descs_size = rx_ring->hw_count * sizeof(union al_udma_desc); q_params->size = rx_ring->hw_count; ret = al_dma_alloc_coherent(dev, &q_params->desc_phy_base_tag, &q_params->desc_phy_base_map, (bus_addr_t *)&q_params->desc_phy_base, (void**)&q_params->desc_base, rx_ring->descs_size); if ((q_params->desc_base == NULL) || (ret != 0)) return (ENOMEM); /* size in bytes of the udma completion ring descriptor */ q_params->cdesc_size = 16; rx_ring->cdescs_size = rx_ring->hw_count * q_params->cdesc_size; ret = al_dma_alloc_coherent(dev, &q_params->cdesc_phy_base_tag, &q_params->cdesc_phy_base_map, (bus_addr_t *)&q_params->cdesc_phy_base, (void**)&q_params->cdesc_base, rx_ring->cdescs_size); if ((q_params->cdesc_base == NULL) || (ret != 0)) return (ENOMEM); /* Allocate taskqueues */ - TASK_INIT(&rx_ring->enqueue_task, 0, al_eth_rx_recv_work, rx_ring); + NET_TASK_INIT(&rx_ring->enqueue_task, 0, al_eth_rx_recv_work, rx_ring); rx_ring->enqueue_tq = taskqueue_create_fast("al_rx_enque", M_NOWAIT, taskqueue_thread_enqueue, &rx_ring->enqueue_tq); taskqueue_start_threads(&rx_ring->enqueue_tq, 1, PI_NET, "%s rxeq", device_get_nameunit(adapter->dev)); /* Setup DMA descriptor areas. */ ret = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ AL_TSO_SIZE, /* maxsize */ 1, /* nsegments */ AL_TSO_SIZE, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &rx_ring->dma_buf_tag); if (ret != 0) { device_printf(dev,"Unable to allocate RX dma_buf_tag\n"); return (ret); } for (size = 0; size < rx_ring->sw_count; size++) { ret = bus_dmamap_create(rx_ring->dma_buf_tag, 0, &rx_ring->rx_buffer_info[size].dma_map); if (ret != 0) { device_printf(dev,"Unable to map DMA RX buffer memory\n"); return (ret); } } /* Zero out the descriptor ring */ memset(q_params->cdesc_base, 0, rx_ring->cdescs_size); /* Create LRO for the ring */ if ((adapter->netdev->if_capenable & IFCAP_LRO) != 0) { int err = tcp_lro_init(&rx_ring->lro); if (err != 0) { device_printf(adapter->dev, "LRO[%d] Initialization failed!\n", qid); } else { device_printf_dbg(adapter->dev, "RX Soft LRO[%d] Initialized\n", qid); rx_ring->lro_enabled = TRUE; rx_ring->lro.ifp = adapter->netdev; } } rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; return (0); } /* * al_eth_free_rx_resources - Free Rx Resources * @adapter: network interface device structure * @qid: queue index * * Free all receive software resources */ static void al_eth_free_rx_resources(struct al_eth_adapter *adapter, unsigned int qid) { struct al_eth_ring *rx_ring = &adapter->rx_ring[qid]; struct al_udma_q_params *q_params = &rx_ring->q_params; int size; /* At this point interrupts' handlers must be deactivated */ while (taskqueue_cancel(rx_ring->enqueue_tq, &rx_ring->enqueue_task, NULL)) { taskqueue_drain(rx_ring->enqueue_tq, &rx_ring->enqueue_task); } taskqueue_free(rx_ring->enqueue_tq); for (size = 0; size < rx_ring->sw_count; size++) { m_freem(rx_ring->rx_buffer_info[size].m); rx_ring->rx_buffer_info[size].m = NULL; bus_dmamap_unload(rx_ring->dma_buf_tag, rx_ring->rx_buffer_info[size].dma_map); bus_dmamap_destroy(rx_ring->dma_buf_tag, rx_ring->rx_buffer_info[size].dma_map); } bus_dma_tag_destroy(rx_ring->dma_buf_tag); free(rx_ring->rx_buffer_info, M_IFAL); rx_ring->rx_buffer_info = NULL; /* if not set, then don't free */ if (q_params->desc_base == NULL) return; al_dma_free_coherent(q_params->desc_phy_base_tag, q_params->desc_phy_base_map, q_params->desc_base); q_params->desc_base = NULL; /* if not set, then don't free */ if (q_params->cdesc_base == NULL) return; al_dma_free_coherent(q_params->cdesc_phy_base_tag, q_params->cdesc_phy_base_map, q_params->cdesc_base); q_params->cdesc_phy_base = 0; /* Free LRO resources */ tcp_lro_free(&rx_ring->lro); } /* * al_eth_free_all_rx_resources - Free Rx Resources for All Queues * @adapter: board private structure * * Free all receive software resources */ static void al_eth_free_all_rx_resources(struct al_eth_adapter *adapter) { int i; for (i = 0; i < adapter->num_rx_queues; i++) if (adapter->rx_ring[i].q_params.desc_base != 0) al_eth_free_rx_resources(adapter, i); } /* * al_eth_setup_all_rx_resources - allocate all queues Rx resources * @adapter: board private structure * * Return 0 on success, negative on failure */ static int al_eth_setup_all_rx_resources(struct al_eth_adapter *adapter) { int i, rc = 0; for (i = 0; i < adapter->num_rx_queues; i++) { rc = al_eth_setup_rx_resources(adapter, i); if (rc == 0) continue; device_printf(adapter->dev, "Allocation for Rx Queue %u failed\n", i); goto err_setup_rx; } return (0); err_setup_rx: /* rewind the index freeing the rings as we go */ while (i--) al_eth_free_rx_resources(adapter, i); return (rc); } /* * al_eth_setup_all_tx_resources - allocate all queues Tx resources * @adapter: private structure * * Return 0 on success, negative on failure */ static int al_eth_setup_all_tx_resources(struct al_eth_adapter *adapter) { int i, rc = 0; for (i = 0; i < adapter->num_tx_queues; i++) { rc = al_eth_setup_tx_resources(adapter, i); if (rc == 0) continue; device_printf(adapter->dev, "Allocation for Tx Queue %u failed\n", i); goto err_setup_tx; } return (0); err_setup_tx: /* rewind the index freeing the rings as we go */ while (i--) al_eth_free_tx_resources(adapter, i); return (rc); } static void al_eth_disable_int_sync(struct al_eth_adapter *adapter) { /* disable forwarding interrupts from eth through pci end point */ if ((adapter->board_type == ALPINE_FPGA_NIC) || (adapter->board_type == ALPINE_NIC)) { al_eth_forward_int_config((uint32_t*)adapter->internal_pcie_base + AL_REG_OFFSET_FORWARD_INTR, AL_DIS_FORWARD_INTR); } /* mask hw interrupts */ al_eth_interrupts_mask(adapter); } static void al_eth_interrupts_unmask(struct al_eth_adapter *adapter) { uint32_t group_a_mask = AL_INT_GROUP_A_GROUP_D_SUM; /* enable group D summery */ uint32_t group_b_mask = (1 << adapter->num_rx_queues) - 1;/* bit per Rx q*/ uint32_t group_c_mask = (1 << adapter->num_tx_queues) - 1;/* bit per Tx q*/ uint32_t group_d_mask = 3 << 8; struct unit_regs __iomem *regs_base = (struct unit_regs __iomem *)adapter->udma_base; if (adapter->int_mode == AL_IOFIC_MODE_LEGACY) group_a_mask |= AL_INT_GROUP_A_GROUP_B_SUM | AL_INT_GROUP_A_GROUP_C_SUM | AL_INT_GROUP_A_GROUP_D_SUM; al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_A, group_a_mask); al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_B, group_b_mask); al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_C, group_c_mask); al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_D, group_d_mask); } static void al_eth_interrupts_mask(struct al_eth_adapter *adapter) { struct unit_regs __iomem *regs_base = (struct unit_regs __iomem *)adapter->udma_base; /* mask all interrupts */ al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_A, AL_MASK_GROUP_A_INT); al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_B, AL_MASK_GROUP_B_INT); al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_C, AL_MASK_GROUP_C_INT); al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_D, AL_MASK_GROUP_D_INT); } static int al_eth_configure_int_mode(struct al_eth_adapter *adapter) { enum al_iofic_mode int_mode; uint32_t m2s_errors_disable = AL_M2S_MASK_INIT; uint32_t m2s_aborts_disable = AL_M2S_MASK_INIT; uint32_t s2m_errors_disable = AL_S2M_MASK_INIT; uint32_t s2m_aborts_disable = AL_S2M_MASK_INIT; /* single INTX mode */ if (adapter->msix_vecs == 0) int_mode = AL_IOFIC_MODE_LEGACY; else if (adapter->msix_vecs > 1) int_mode = AL_IOFIC_MODE_MSIX_PER_Q; else { device_printf(adapter->dev, "udma doesn't support single MSI-X mode yet.\n"); return (EIO); } if (adapter->board_type != ALPINE_INTEGRATED) { m2s_errors_disable |= AL_M2S_S2M_MASK_NOT_INT; m2s_errors_disable |= AL_M2S_S2M_MASK_NOT_INT; s2m_aborts_disable |= AL_M2S_S2M_MASK_NOT_INT; s2m_aborts_disable |= AL_M2S_S2M_MASK_NOT_INT; } if (al_udma_iofic_config((struct unit_regs __iomem *)adapter->udma_base, int_mode, m2s_errors_disable, m2s_aborts_disable, s2m_errors_disable, s2m_aborts_disable)) { device_printf(adapter->dev, "al_udma_unit_int_config failed!.\n"); return (EIO); } adapter->int_mode = int_mode; device_printf_dbg(adapter->dev, "using %s interrupt mode\n", int_mode == AL_IOFIC_MODE_LEGACY ? "INTx" : int_mode == AL_IOFIC_MODE_MSIX_PER_Q ? "MSI-X per Queue" : "Unknown"); /* set interrupt moderation resolution to 15us */ al_iofic_moder_res_config(&((struct unit_regs *)(adapter->udma_base))->gen.interrupt_regs.main_iofic, AL_INT_GROUP_B, 15); al_iofic_moder_res_config(&((struct unit_regs *)(adapter->udma_base))->gen.interrupt_regs.main_iofic, AL_INT_GROUP_C, 15); /* by default interrupt coalescing is disabled */ adapter->tx_usecs = 0; adapter->rx_usecs = 0; return (0); } /* * ethtool_rxfh_indir_default - get default value for RX flow hash indirection * @index: Index in RX flow hash indirection table * @n_rx_rings: Number of RX rings to use * * This function provides the default policy for RX flow hash indirection. */ static inline uint32_t ethtool_rxfh_indir_default(uint32_t index, uint32_t n_rx_rings) { return (index % n_rx_rings); } static void* al_eth_update_stats(struct al_eth_adapter *adapter) { struct al_eth_mac_stats *mac_stats = &adapter->mac_stats; if (adapter->up == 0) return (NULL); al_eth_mac_stats_get(&adapter->hal_adapter, mac_stats); return (NULL); } static uint64_t al_get_counter(struct ifnet *ifp, ift_counter cnt) { struct al_eth_adapter *adapter; struct al_eth_mac_stats *mac_stats; uint64_t rv; adapter = if_getsoftc(ifp); mac_stats = &adapter->mac_stats; switch (cnt) { case IFCOUNTER_IPACKETS: return (mac_stats->aFramesReceivedOK); /* including pause frames */ case IFCOUNTER_OPACKETS: return (mac_stats->aFramesTransmittedOK); case IFCOUNTER_IBYTES: return (mac_stats->aOctetsReceivedOK); case IFCOUNTER_OBYTES: return (mac_stats->aOctetsTransmittedOK); case IFCOUNTER_IMCASTS: return (mac_stats->ifInMulticastPkts); case IFCOUNTER_OMCASTS: return (mac_stats->ifOutMulticastPkts); case IFCOUNTER_COLLISIONS: return (0); case IFCOUNTER_IQDROPS: return (mac_stats->etherStatsDropEvents); case IFCOUNTER_IERRORS: rv = mac_stats->ifInErrors + mac_stats->etherStatsUndersizePkts + /* good but short */ mac_stats->etherStatsFragments + /* short and bad*/ mac_stats->etherStatsJabbers + /* with crc errors */ mac_stats->etherStatsOversizePkts + mac_stats->aFrameCheckSequenceErrors + mac_stats->aAlignmentErrors; return (rv); case IFCOUNTER_OERRORS: return (mac_stats->ifOutErrors); default: return (if_get_counter_default(ifp, cnt)); } } static u_int al_count_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { unsigned char *mac; mac = LLADDR(sdl); /* default mc address inside mac address */ if (mac[3] != 0 && mac[4] != 0 && mac[5] != 1) return (1); else return (0); } static u_int al_program_addr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { struct al_eth_adapter *adapter = arg; al_eth_mac_table_unicast_add(adapter, AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + 1 + cnt, 1); return (1); } /* * Unicast, Multicast and Promiscuous mode set * * The set_rx_mode entry point is called whenever the unicast or multicast * address lists or the network interface flags are updated. This routine is * responsible for configuring the hardware for proper unicast, multicast, * promiscuous mode, and all-multi behavior. */ static void al_eth_set_rx_mode(struct al_eth_adapter *adapter) { struct ifnet *ifp = adapter->netdev; int mc, uc; uint8_t i; /* XXXGL: why generic count won't work? */ mc = if_foreach_llmaddr(ifp, al_count_maddr, NULL); uc = if_lladdr_count(ifp); if ((ifp->if_flags & IFF_PROMISC) != 0) { al_eth_mac_table_promiscuous_set(adapter, true); } else { if ((ifp->if_flags & IFF_ALLMULTI) != 0) { /* This interface is in all-multicasts mode (used by multicast routers). */ al_eth_mac_table_all_multicast_add(adapter, AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX, 1); } else { if (mc == 0) { al_eth_mac_table_entry_clear(adapter, AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX); } else { al_eth_mac_table_all_multicast_add(adapter, AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX, 1); } } if (uc != 0) { i = AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + 1; if (uc > AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT) { /* * In this case there are more addresses then * entries in the mac table - set promiscuous */ al_eth_mac_table_promiscuous_set(adapter, true); return; } /* clear the last configuration */ while (i < (AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT)) { al_eth_mac_table_entry_clear(adapter, i); i++; } /* set new addresses */ if_foreach_lladdr(ifp, al_program_addr, adapter); } al_eth_mac_table_promiscuous_set(adapter, false); } } static void al_eth_config_rx_fwd(struct al_eth_adapter *adapter) { struct al_eth_fwd_ctrl_table_entry entry; int i; /* let priority be equal to pbits */ for (i = 0; i < AL_ETH_FWD_PBITS_TABLE_NUM; i++) al_eth_fwd_pbits_table_set(&adapter->hal_adapter, i, i); /* map priority to queue index, queue id = priority/2 */ for (i = 0; i < AL_ETH_FWD_PRIO_TABLE_NUM; i++) al_eth_fwd_priority_table_set(&adapter->hal_adapter, i, i >> 1); entry.prio_sel = AL_ETH_CTRL_TABLE_PRIO_SEL_VAL_0; entry.queue_sel_1 = AL_ETH_CTRL_TABLE_QUEUE_SEL_1_THASH_TABLE; entry.queue_sel_2 = AL_ETH_CTRL_TABLE_QUEUE_SEL_2_NO_PRIO; entry.udma_sel = AL_ETH_CTRL_TABLE_UDMA_SEL_MAC_TABLE; entry.filter = FALSE; al_eth_ctrl_table_def_set(&adapter->hal_adapter, FALSE, &entry); /* * By default set the mac table to forward all unicast packets to our * MAC address and all broadcast. all the rest will be dropped. */ al_eth_mac_table_unicast_add(adapter, AL_ETH_MAC_TABLE_UNICAST_IDX_BASE, 1); al_eth_mac_table_broadcast_add(adapter, AL_ETH_MAC_TABLE_BROADCAST_IDX, 1); al_eth_mac_table_promiscuous_set(adapter, false); /* set toeplitz hash keys */ for (i = 0; i < sizeof(adapter->toeplitz_hash_key); i++) *((uint8_t*)adapter->toeplitz_hash_key + i) = (uint8_t)random(); for (i = 0; i < AL_ETH_RX_HASH_KEY_NUM; i++) al_eth_hash_key_set(&adapter->hal_adapter, i, htonl(adapter->toeplitz_hash_key[i])); for (i = 0; i < AL_ETH_RX_RSS_TABLE_SIZE; i++) { adapter->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, AL_ETH_NUM_QUEUES); al_eth_set_thash_table_entry(adapter, i, 0, adapter->rss_ind_tbl[i]); } al_eth_fsm_table_init(adapter); } static void al_eth_req_rx_buff_size(struct al_eth_adapter *adapter, int size) { /* * Determine the correct mbuf pool * for doing jumbo frames * Try from the smallest up to maximum supported */ adapter->rx_mbuf_sz = MCLBYTES; if (size > 2048) { if (adapter->max_rx_buff_alloc_size > 2048) adapter->rx_mbuf_sz = MJUMPAGESIZE; else return; } if (size > 4096) { if (adapter->max_rx_buff_alloc_size > 4096) adapter->rx_mbuf_sz = MJUM9BYTES; else return; } if (size > 9216) { if (adapter->max_rx_buff_alloc_size > 9216) adapter->rx_mbuf_sz = MJUM16BYTES; else return; } } static int al_eth_change_mtu(struct al_eth_adapter *adapter, int new_mtu) { int max_frame = new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN; al_eth_req_rx_buff_size(adapter, new_mtu); device_printf_dbg(adapter->dev, "set MTU to %d\n", new_mtu); al_eth_rx_pkt_limit_config(&adapter->hal_adapter, AL_ETH_MIN_FRAME_LEN, max_frame); al_eth_tso_mss_config(&adapter->hal_adapter, 0, new_mtu - 100); return (0); } static int al_eth_check_mtu(struct al_eth_adapter *adapter, int new_mtu) { int max_frame = new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN; if ((new_mtu < AL_ETH_MIN_FRAME_LEN) || (max_frame > AL_ETH_MAX_FRAME_LEN)) { return (EINVAL); } return (0); } static int al_eth_udma_queue_enable(struct al_eth_adapter *adapter, enum al_udma_type type, int qid) { int rc = 0; char *name = (type == UDMA_TX) ? "Tx" : "Rx"; struct al_udma_q_params *q_params; if (type == UDMA_TX) q_params = &adapter->tx_ring[qid].q_params; else q_params = &adapter->rx_ring[qid].q_params; rc = al_eth_queue_config(&adapter->hal_adapter, type, qid, q_params); if (rc < 0) { device_printf(adapter->dev, "config %s queue %u failed\n", name, qid); return (rc); } return (rc); } static int al_eth_udma_queues_enable_all(struct al_eth_adapter *adapter) { int i; for (i = 0; i < adapter->num_tx_queues; i++) al_eth_udma_queue_enable(adapter, UDMA_TX, i); for (i = 0; i < adapter->num_rx_queues; i++) al_eth_udma_queue_enable(adapter, UDMA_RX, i); return (0); } static void al_eth_up_complete(struct al_eth_adapter *adapter) { al_eth_configure_int_mode(adapter); al_eth_config_rx_fwd(adapter); al_eth_change_mtu(adapter, adapter->netdev->if_mtu); al_eth_udma_queues_enable_all(adapter); al_eth_refill_all_rx_bufs(adapter); al_eth_interrupts_unmask(adapter); /* enable forwarding interrupts from eth through pci end point */ if ((adapter->board_type == ALPINE_FPGA_NIC) || (adapter->board_type == ALPINE_NIC)) { al_eth_forward_int_config((uint32_t*)adapter->internal_pcie_base + AL_REG_OFFSET_FORWARD_INTR, AL_EN_FORWARD_INTR); } al_eth_flow_ctrl_enable(adapter); mtx_lock(&adapter->stats_mtx); callout_reset(&adapter->stats_callout, hz, al_tick_stats, (void*)adapter); mtx_unlock(&adapter->stats_mtx); al_eth_mac_start(&adapter->hal_adapter); } static int al_media_update(struct ifnet *ifp) { struct al_eth_adapter *adapter = ifp->if_softc; if ((ifp->if_flags & IFF_UP) != 0) mii_mediachg(adapter->mii); return (0); } static void al_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) { struct al_eth_adapter *sc = ifp->if_softc; struct mii_data *mii; if (sc->mii == NULL) { ifmr->ifm_active = IFM_ETHER | IFM_NONE; ifmr->ifm_status = 0; return; } mii = sc->mii; mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } static void al_tick(void *arg) { struct al_eth_adapter *adapter = arg; mii_tick(adapter->mii); /* Schedule another timeout one second from now */ callout_schedule(&adapter->wd_callout, hz); } static void al_tick_stats(void *arg) { struct al_eth_adapter *adapter = arg; al_eth_update_stats(adapter); callout_schedule(&adapter->stats_callout, hz); } static int al_eth_up(struct al_eth_adapter *adapter) { struct ifnet *ifp = adapter->netdev; int rc; if (adapter->up) return (0); if ((adapter->flags & AL_ETH_FLAG_RESET_REQUESTED) != 0) { al_eth_function_reset(adapter); adapter->flags &= ~AL_ETH_FLAG_RESET_REQUESTED; } ifp->if_hwassist = 0; if ((ifp->if_capenable & IFCAP_TSO) != 0) ifp->if_hwassist |= CSUM_TSO; if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6) != 0) ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6); al_eth_serdes_init(adapter); rc = al_eth_hw_init(adapter); if (rc != 0) goto err_hw_init_open; rc = al_eth_setup_int_mode(adapter); if (rc != 0) { device_printf(adapter->dev, "%s failed at setup interrupt mode!\n", __func__); goto err_setup_int; } /* allocate transmit descriptors */ rc = al_eth_setup_all_tx_resources(adapter); if (rc != 0) goto err_setup_tx; /* allocate receive descriptors */ rc = al_eth_setup_all_rx_resources(adapter); if (rc != 0) goto err_setup_rx; rc = al_eth_request_irq(adapter); if (rc != 0) goto err_req_irq; al_eth_up_complete(adapter); adapter->up = true; if (adapter->mac_mode == AL_ETH_MAC_MODE_10GbE_Serial) adapter->netdev->if_link_state = LINK_STATE_UP; if (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII) { mii_mediachg(adapter->mii); /* Schedule watchdog timeout */ mtx_lock(&adapter->wd_mtx); callout_reset(&adapter->wd_callout, hz, al_tick, adapter); mtx_unlock(&adapter->wd_mtx); mii_pollstat(adapter->mii); } return (rc); err_req_irq: al_eth_free_all_rx_resources(adapter); err_setup_rx: al_eth_free_all_tx_resources(adapter); err_setup_tx: al_eth_free_irq(adapter); err_setup_int: al_eth_hw_stop(adapter); err_hw_init_open: al_eth_function_reset(adapter); return (rc); } static int al_shutdown(device_t dev) { struct al_eth_adapter *adapter = device_get_softc(dev); al_eth_down(adapter); return (0); } static void al_eth_down(struct al_eth_adapter *adapter) { device_printf_dbg(adapter->dev, "al_eth_down: begin\n"); adapter->up = false; mtx_lock(&adapter->wd_mtx); callout_stop(&adapter->wd_callout); mtx_unlock(&adapter->wd_mtx); al_eth_disable_int_sync(adapter); mtx_lock(&adapter->stats_mtx); callout_stop(&adapter->stats_callout); mtx_unlock(&adapter->stats_mtx); al_eth_free_irq(adapter); al_eth_hw_stop(adapter); al_eth_free_all_tx_resources(adapter); al_eth_free_all_rx_resources(adapter); } static int al_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct al_eth_adapter *adapter = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; int error = 0; switch (command) { case SIOCSIFMTU: { error = al_eth_check_mtu(adapter, ifr->ifr_mtu); if (error != 0) { device_printf(adapter->dev, "ioctl wrong mtu %u\n", adapter->netdev->if_mtu); break; } ifp->if_drv_flags &= ~IFF_DRV_RUNNING; adapter->netdev->if_mtu = ifr->ifr_mtu; al_init(adapter); break; } case SIOCSIFFLAGS: if ((ifp->if_flags & IFF_UP) != 0) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { if (((ifp->if_flags ^ adapter->if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { device_printf_dbg(adapter->dev, "ioctl promisc/allmulti\n"); al_eth_set_rx_mode(adapter); } } else { error = al_eth_up(adapter); if (error == 0) ifp->if_drv_flags |= IFF_DRV_RUNNING; } } else { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { al_eth_down(adapter); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; } } adapter->if_flags = ifp->if_flags; break; case SIOCADDMULTI: case SIOCDELMULTI: if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { device_printf_dbg(adapter->dev, "ioctl add/del multi before\n"); al_eth_set_rx_mode(adapter); #ifdef DEVICE_POLLING if ((ifp->if_capenable & IFCAP_POLLING) == 0) #endif } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: if (adapter->mii != NULL) error = ifmedia_ioctl(ifp, ifr, &adapter->mii->mii_media, command); else error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); break; case SIOCSIFCAP: { int mask, reinit; reinit = 0; mask = ifr->ifr_reqcap ^ ifp->if_capenable; #ifdef DEVICE_POLLING if ((mask & IFCAP_POLLING) != 0) { if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) { if (error != 0) return (error); ifp->if_capenable |= IFCAP_POLLING; } else { error = ether_poll_deregister(ifp); /* Enable interrupt even in error case */ ifp->if_capenable &= ~IFCAP_POLLING; } } #endif if ((mask & IFCAP_HWCSUM) != 0) { /* apply to both rx and tx */ ifp->if_capenable ^= IFCAP_HWCSUM; reinit = 1; } if ((mask & IFCAP_HWCSUM_IPV6) != 0) { ifp->if_capenable ^= IFCAP_HWCSUM_IPV6; reinit = 1; } if ((mask & IFCAP_TSO) != 0) { ifp->if_capenable ^= IFCAP_TSO; reinit = 1; } if ((mask & IFCAP_LRO) != 0) { ifp->if_capenable ^= IFCAP_LRO; } if ((mask & IFCAP_VLAN_HWTAGGING) != 0) { ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; reinit = 1; } if ((mask & IFCAP_VLAN_HWFILTER) != 0) { ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; reinit = 1; } if ((mask & IFCAP_VLAN_HWTSO) != 0) { ifp->if_capenable ^= IFCAP_VLAN_HWTSO; reinit = 1; } if ((reinit != 0) && ((ifp->if_drv_flags & IFF_DRV_RUNNING)) != 0) { al_init(adapter); } break; } default: error = ether_ioctl(ifp, command, data); break; } return (error); } static int al_is_device_supported(device_t dev) { uint16_t pci_vendor_id = pci_get_vendor(dev); uint16_t pci_device_id = pci_get_device(dev); return (pci_vendor_id == PCI_VENDOR_ID_ANNAPURNA_LABS && (pci_device_id == PCI_DEVICE_ID_AL_ETH || pci_device_id == PCI_DEVICE_ID_AL_ETH_ADVANCED || pci_device_id == PCI_DEVICE_ID_AL_ETH_NIC || pci_device_id == PCI_DEVICE_ID_AL_ETH_FPGA_NIC)); } /* Time in mSec to keep trying to read / write from MDIO in case of error */ #define MDIO_TIMEOUT_MSEC 100 #define MDIO_PAUSE_MSEC 10 static int al_miibus_readreg(device_t dev, int phy, int reg) { struct al_eth_adapter *adapter = device_get_softc(dev); uint16_t value = 0; int rc; int timeout = MDIO_TIMEOUT_MSEC; while (timeout > 0) { rc = al_eth_mdio_read(&adapter->hal_adapter, adapter->phy_addr, -1, reg, &value); if (rc == 0) return (value); device_printf_dbg(adapter->dev, "mdio read failed. try again in 10 msec\n"); timeout -= MDIO_PAUSE_MSEC; pause("readred pause", MDIO_PAUSE_MSEC); } if (rc != 0) device_printf(adapter->dev, "MDIO read failed on timeout\n"); return (value); } static int al_miibus_writereg(device_t dev, int phy, int reg, int value) { struct al_eth_adapter *adapter = device_get_softc(dev); int rc; int timeout = MDIO_TIMEOUT_MSEC; while (timeout > 0) { rc = al_eth_mdio_write(&adapter->hal_adapter, adapter->phy_addr, -1, reg, value); if (rc == 0) return (0); device_printf(adapter->dev, "mdio write failed. try again in 10 msec\n"); timeout -= MDIO_PAUSE_MSEC; pause("miibus writereg", MDIO_PAUSE_MSEC); } if (rc != 0) device_printf(adapter->dev, "MDIO write failed on timeout\n"); return (rc); } static void al_miibus_statchg(device_t dev) { struct al_eth_adapter *adapter = device_get_softc(dev); device_printf_dbg(adapter->dev, "al_miibus_statchg: state has changed!\n"); device_printf_dbg(adapter->dev, "al_miibus_statchg: active = 0x%x status = 0x%x\n", adapter->mii->mii_media_active, adapter->mii->mii_media_status); if (adapter->up == 0) return; if ((adapter->mii->mii_media_status & IFM_AVALID) != 0) { if (adapter->mii->mii_media_status & IFM_ACTIVE) { device_printf(adapter->dev, "link is UP\n"); adapter->netdev->if_link_state = LINK_STATE_UP; } else { device_printf(adapter->dev, "link is DOWN\n"); adapter->netdev->if_link_state = LINK_STATE_DOWN; } } } static void al_miibus_linkchg(device_t dev) { struct al_eth_adapter *adapter = device_get_softc(dev); uint8_t duplex = 0; uint8_t speed = 0; if (adapter->mii == NULL) return; if ((adapter->netdev->if_flags & IFF_UP) == 0) return; /* Ignore link changes when link is not ready */ if ((adapter->mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) != (IFM_AVALID | IFM_ACTIVE)) { return; } if ((adapter->mii->mii_media_active & IFM_FDX) != 0) duplex = 1; speed = IFM_SUBTYPE(adapter->mii->mii_media_active); if (speed == IFM_10_T) { al_eth_mac_link_config(&adapter->hal_adapter, 0, 1, AL_10BASE_T_SPEED, duplex); return; } if (speed == IFM_100_TX) { al_eth_mac_link_config(&adapter->hal_adapter, 0, 1, AL_100BASE_TX_SPEED, duplex); return; } if (speed == IFM_1000_T) { al_eth_mac_link_config(&adapter->hal_adapter, 0, 1, AL_1000BASE_T_SPEED, duplex); return; } device_printf(adapter->dev, "ERROR: unknown MII media active 0x%08x\n", adapter->mii->mii_media_active); } diff --git a/sys/dev/alc/if_alc.c b/sys/dev/alc/if_alc.c index 172b8fab4d46..531c9f18fc8f 100644 --- a/sys/dev/alc/if_alc.c +++ b/sys/dev/alc/if_alc.c @@ -1,4712 +1,4712 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009, Pyun YongHyeon * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* Driver for Atheros AR813x/AR815x PCIe Ethernet. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #undef ALC_USE_CUSTOM_CSUM #ifdef ALC_USE_CUSTOM_CSUM #define ALC_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) #else #define ALC_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) #endif MODULE_DEPEND(alc, pci, 1, 1, 1); MODULE_DEPEND(alc, ether, 1, 1, 1); MODULE_DEPEND(alc, miibus, 1, 1, 1); /* Tunables. */ static int msi_disable = 0; static int msix_disable = 0; TUNABLE_INT("hw.alc.msi_disable", &msi_disable); TUNABLE_INT("hw.alc.msix_disable", &msix_disable); /* * Devices supported by this driver. */ static struct alc_ident alc_ident_table[] = { { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8131, 9 * 1024, "Atheros AR8131 PCIe Gigabit Ethernet" }, { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8132, 9 * 1024, "Atheros AR8132 PCIe Fast Ethernet" }, { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8151, 6 * 1024, "Atheros AR8151 v1.0 PCIe Gigabit Ethernet" }, { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8151_V2, 6 * 1024, "Atheros AR8151 v2.0 PCIe Gigabit Ethernet" }, { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8152_B, 6 * 1024, "Atheros AR8152 v1.1 PCIe Fast Ethernet" }, { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8152_B2, 6 * 1024, "Atheros AR8152 v2.0 PCIe Fast Ethernet" }, { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8161, 9 * 1024, "Atheros AR8161 PCIe Gigabit Ethernet" }, { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8162, 9 * 1024, "Atheros AR8162 PCIe Fast Ethernet" }, { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8171, 9 * 1024, "Atheros AR8171 PCIe Gigabit Ethernet" }, { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8172, 9 * 1024, "Atheros AR8172 PCIe Fast Ethernet" }, { VENDORID_ATHEROS, DEVICEID_ATHEROS_E2200, 9 * 1024, "Killer E2200 Gigabit Ethernet" }, { VENDORID_ATHEROS, DEVICEID_ATHEROS_E2400, 9 * 1024, "Killer E2400 Gigabit Ethernet" }, { VENDORID_ATHEROS, DEVICEID_ATHEROS_E2500, 9 * 1024, "Killer E2500 Gigabit Ethernet" }, { 0, 0, 0, NULL} }; static void alc_aspm(struct alc_softc *, int, int); static void alc_aspm_813x(struct alc_softc *, int); static void alc_aspm_816x(struct alc_softc *, int); static int alc_attach(device_t); static int alc_check_boundary(struct alc_softc *); static void alc_config_msi(struct alc_softc *); static int alc_detach(device_t); static void alc_disable_l0s_l1(struct alc_softc *); static int alc_dma_alloc(struct alc_softc *); static void alc_dma_free(struct alc_softc *); static void alc_dmamap_cb(void *, bus_dma_segment_t *, int, int); static void alc_dsp_fixup(struct alc_softc *, int); static int alc_encap(struct alc_softc *, struct mbuf **); static struct alc_ident * alc_find_ident(device_t); #ifndef __NO_STRICT_ALIGNMENT static struct mbuf * alc_fixup_rx(struct ifnet *, struct mbuf *); #endif static void alc_get_macaddr(struct alc_softc *); static void alc_get_macaddr_813x(struct alc_softc *); static void alc_get_macaddr_816x(struct alc_softc *); static void alc_get_macaddr_par(struct alc_softc *); static void alc_init(void *); static void alc_init_cmb(struct alc_softc *); static void alc_init_locked(struct alc_softc *); static void alc_init_rr_ring(struct alc_softc *); static int alc_init_rx_ring(struct alc_softc *); static void alc_init_smb(struct alc_softc *); static void alc_init_tx_ring(struct alc_softc *); static void alc_int_task(void *, int); static int alc_intr(void *); static int alc_ioctl(struct ifnet *, u_long, caddr_t); static void alc_mac_config(struct alc_softc *); static uint32_t alc_mii_readreg_813x(struct alc_softc *, int, int); static uint32_t alc_mii_readreg_816x(struct alc_softc *, int, int); static uint32_t alc_mii_writereg_813x(struct alc_softc *, int, int, int); static uint32_t alc_mii_writereg_816x(struct alc_softc *, int, int, int); static int alc_miibus_readreg(device_t, int, int); static void alc_miibus_statchg(device_t); static int alc_miibus_writereg(device_t, int, int, int); static uint32_t alc_miidbg_readreg(struct alc_softc *, int); static uint32_t alc_miidbg_writereg(struct alc_softc *, int, int); static uint32_t alc_miiext_readreg(struct alc_softc *, int, int); static uint32_t alc_miiext_writereg(struct alc_softc *, int, int, int); static int alc_mediachange(struct ifnet *); static int alc_mediachange_locked(struct alc_softc *); static void alc_mediastatus(struct ifnet *, struct ifmediareq *); static int alc_newbuf(struct alc_softc *, struct alc_rxdesc *); static void alc_osc_reset(struct alc_softc *); static void alc_phy_down(struct alc_softc *); static void alc_phy_reset(struct alc_softc *); static void alc_phy_reset_813x(struct alc_softc *); static void alc_phy_reset_816x(struct alc_softc *); static int alc_probe(device_t); static void alc_reset(struct alc_softc *); static int alc_resume(device_t); static void alc_rxeof(struct alc_softc *, struct rx_rdesc *); static int alc_rxintr(struct alc_softc *, int); static void alc_rxfilter(struct alc_softc *); static void alc_rxvlan(struct alc_softc *); static void alc_setlinkspeed(struct alc_softc *); static void alc_setwol(struct alc_softc *); static void alc_setwol_813x(struct alc_softc *); static void alc_setwol_816x(struct alc_softc *); static int alc_shutdown(device_t); static void alc_start(struct ifnet *); static void alc_start_locked(struct ifnet *); static void alc_start_queue(struct alc_softc *); static void alc_start_tx(struct alc_softc *); static void alc_stats_clear(struct alc_softc *); static void alc_stats_update(struct alc_softc *); static void alc_stop(struct alc_softc *); static void alc_stop_mac(struct alc_softc *); static void alc_stop_queue(struct alc_softc *); static int alc_suspend(device_t); static void alc_sysctl_node(struct alc_softc *); static void alc_tick(void *); static void alc_txeof(struct alc_softc *); static void alc_watchdog(struct alc_softc *); static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); static int sysctl_hw_alc_proc_limit(SYSCTL_HANDLER_ARGS); static int sysctl_hw_alc_int_mod(SYSCTL_HANDLER_ARGS); DEBUGNET_DEFINE(alc); static device_method_t alc_methods[] = { /* Device interface. */ DEVMETHOD(device_probe, alc_probe), DEVMETHOD(device_attach, alc_attach), DEVMETHOD(device_detach, alc_detach), DEVMETHOD(device_shutdown, alc_shutdown), DEVMETHOD(device_suspend, alc_suspend), DEVMETHOD(device_resume, alc_resume), /* MII interface. */ DEVMETHOD(miibus_readreg, alc_miibus_readreg), DEVMETHOD(miibus_writereg, alc_miibus_writereg), DEVMETHOD(miibus_statchg, alc_miibus_statchg), DEVMETHOD_END }; static driver_t alc_driver = { "alc", alc_methods, sizeof(struct alc_softc) }; static devclass_t alc_devclass; DRIVER_MODULE(alc, pci, alc_driver, alc_devclass, 0, 0); MODULE_PNP_INFO("U16:vendor;U16:device", pci, alc, alc_ident_table, nitems(alc_ident_table) - 1); DRIVER_MODULE(miibus, alc, miibus_driver, miibus_devclass, 0, 0); static struct resource_spec alc_res_spec_mem[] = { { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE }, { -1, 0, 0 } }; static struct resource_spec alc_irq_spec_legacy[] = { { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { -1, 0, 0 } }; static struct resource_spec alc_irq_spec_msi[] = { { SYS_RES_IRQ, 1, RF_ACTIVE }, { -1, 0, 0 } }; static struct resource_spec alc_irq_spec_msix[] = { { SYS_RES_IRQ, 1, RF_ACTIVE }, { -1, 0, 0 } }; static uint32_t alc_dma_burst[] = { 128, 256, 512, 1024, 2048, 4096, 0, 0 }; static int alc_miibus_readreg(device_t dev, int phy, int reg) { struct alc_softc *sc; int v; sc = device_get_softc(dev); if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) v = alc_mii_readreg_816x(sc, phy, reg); else v = alc_mii_readreg_813x(sc, phy, reg); return (v); } static uint32_t alc_mii_readreg_813x(struct alc_softc *sc, int phy, int reg) { uint32_t v; int i; /* * For AR8132 fast ethernet controller, do not report 1000baseT * capability to mii(4). Even though AR8132 uses the same * model/revision number of F1 gigabit PHY, the PHY has no * ability to establish 1000baseT link. */ if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0 && reg == MII_EXTSR) return (0); CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); for (i = ALC_PHY_TIMEOUT; i > 0; i--) { DELAY(5); v = CSR_READ_4(sc, ALC_MDIO); if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) break; } if (i == 0) { device_printf(sc->alc_dev, "phy read timeout : %d\n", reg); return (0); } return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT); } static uint32_t alc_mii_readreg_816x(struct alc_softc *sc, int phy, int reg) { uint32_t clk, v; int i; if ((sc->alc_flags & ALC_FLAG_LINK) != 0) clk = MDIO_CLK_25_128; else clk = MDIO_CLK_25_4; CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | MDIO_SUP_PREAMBLE | clk | MDIO_REG_ADDR(reg)); for (i = ALC_PHY_TIMEOUT; i > 0; i--) { DELAY(5); v = CSR_READ_4(sc, ALC_MDIO); if ((v & MDIO_OP_BUSY) == 0) break; } if (i == 0) { device_printf(sc->alc_dev, "phy read timeout : %d\n", reg); return (0); } return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT); } static int alc_miibus_writereg(device_t dev, int phy, int reg, int val) { struct alc_softc *sc; int v; sc = device_get_softc(dev); if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) v = alc_mii_writereg_816x(sc, phy, reg, val); else v = alc_mii_writereg_813x(sc, phy, reg, val); return (v); } static uint32_t alc_mii_writereg_813x(struct alc_softc *sc, int phy, int reg, int val) { uint32_t v; int i; CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT | MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); for (i = ALC_PHY_TIMEOUT; i > 0; i--) { DELAY(5); v = CSR_READ_4(sc, ALC_MDIO); if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) break; } if (i == 0) device_printf(sc->alc_dev, "phy write timeout : %d\n", reg); return (0); } static uint32_t alc_mii_writereg_816x(struct alc_softc *sc, int phy, int reg, int val) { uint32_t clk, v; int i; if ((sc->alc_flags & ALC_FLAG_LINK) != 0) clk = MDIO_CLK_25_128; else clk = MDIO_CLK_25_4; CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | ((val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT) | MDIO_REG_ADDR(reg) | MDIO_SUP_PREAMBLE | clk); for (i = ALC_PHY_TIMEOUT; i > 0; i--) { DELAY(5); v = CSR_READ_4(sc, ALC_MDIO); if ((v & MDIO_OP_BUSY) == 0) break; } if (i == 0) device_printf(sc->alc_dev, "phy write timeout : %d\n", reg); return (0); } static void alc_miibus_statchg(device_t dev) { struct alc_softc *sc; struct mii_data *mii; struct ifnet *ifp; uint32_t reg; sc = device_get_softc(dev); mii = device_get_softc(sc->alc_miibus); ifp = sc->alc_ifp; if (mii == NULL || ifp == NULL || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; sc->alc_flags &= ~ALC_FLAG_LINK; if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: sc->alc_flags |= ALC_FLAG_LINK; break; case IFM_1000_T: if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0) sc->alc_flags |= ALC_FLAG_LINK; break; default: break; } } /* Stop Rx/Tx MACs. */ alc_stop_mac(sc); /* Program MACs with resolved speed/duplex/flow-control. */ if ((sc->alc_flags & ALC_FLAG_LINK) != 0) { alc_start_queue(sc); alc_mac_config(sc); /* Re-enable Tx/Rx MACs. */ reg = CSR_READ_4(sc, ALC_MAC_CFG); reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB; CSR_WRITE_4(sc, ALC_MAC_CFG, reg); } alc_aspm(sc, 0, IFM_SUBTYPE(mii->mii_media_active)); alc_dsp_fixup(sc, IFM_SUBTYPE(mii->mii_media_active)); } static uint32_t alc_miidbg_readreg(struct alc_softc *sc, int reg) { alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, reg); return (alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA)); } static uint32_t alc_miidbg_writereg(struct alc_softc *sc, int reg, int val) { alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, reg); return (alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA, val)); } static uint32_t alc_miiext_readreg(struct alc_softc *sc, int devaddr, int reg) { uint32_t clk, v; int i; CSR_WRITE_4(sc, ALC_EXT_MDIO, EXT_MDIO_REG(reg) | EXT_MDIO_DEVADDR(devaddr)); if ((sc->alc_flags & ALC_FLAG_LINK) != 0) clk = MDIO_CLK_25_128; else clk = MDIO_CLK_25_4; CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | MDIO_SUP_PREAMBLE | clk | MDIO_MODE_EXT); for (i = ALC_PHY_TIMEOUT; i > 0; i--) { DELAY(5); v = CSR_READ_4(sc, ALC_MDIO); if ((v & MDIO_OP_BUSY) == 0) break; } if (i == 0) { device_printf(sc->alc_dev, "phy ext read timeout : %d, %d\n", devaddr, reg); return (0); } return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT); } static uint32_t alc_miiext_writereg(struct alc_softc *sc, int devaddr, int reg, int val) { uint32_t clk, v; int i; CSR_WRITE_4(sc, ALC_EXT_MDIO, EXT_MDIO_REG(reg) | EXT_MDIO_DEVADDR(devaddr)); if ((sc->alc_flags & ALC_FLAG_LINK) != 0) clk = MDIO_CLK_25_128; else clk = MDIO_CLK_25_4; CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | ((val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT) | MDIO_SUP_PREAMBLE | clk | MDIO_MODE_EXT); for (i = ALC_PHY_TIMEOUT; i > 0; i--) { DELAY(5); v = CSR_READ_4(sc, ALC_MDIO); if ((v & MDIO_OP_BUSY) == 0) break; } if (i == 0) device_printf(sc->alc_dev, "phy ext write timeout : %d, %d\n", devaddr, reg); return (0); } static void alc_dsp_fixup(struct alc_softc *sc, int media) { uint16_t agc, len, val; if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) return; if (AR816X_REV(sc->alc_rev) >= AR816X_REV_C0) return; /* * Vendor PHY magic. * 1000BT/AZ, wrong cable length */ if ((sc->alc_flags & ALC_FLAG_LINK) != 0) { len = alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL6); len = (len >> EXT_CLDCTL6_CAB_LEN_SHIFT) & EXT_CLDCTL6_CAB_LEN_MASK; agc = alc_miidbg_readreg(sc, MII_DBG_AGC); agc = (agc >> DBG_AGC_2_VGA_SHIFT) & DBG_AGC_2_VGA_MASK; if ((media == IFM_1000_T && len > EXT_CLDCTL6_CAB_LEN_SHORT1G && agc > DBG_AGC_LONG1G_LIMT) || (media == IFM_100_TX && len > DBG_AGC_LONG100M_LIMT && agc > DBG_AGC_LONG1G_LIMT)) { alc_miidbg_writereg(sc, MII_DBG_AZ_ANADECT, DBG_AZ_ANADECT_LONG); val = alc_miiext_readreg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE); val |= ANEG_AFEE_10BT_100M_TH; alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE, val); } else { alc_miidbg_writereg(sc, MII_DBG_AZ_ANADECT, DBG_AZ_ANADECT_DEFAULT); val = alc_miiext_readreg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE); val &= ~ANEG_AFEE_10BT_100M_TH; alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE, val); } if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0 && AR816X_REV(sc->alc_rev) == AR816X_REV_B0) { if (media == IFM_1000_T) { /* * Giga link threshold, raise the tolerance of * noise 50%. */ val = alc_miidbg_readreg(sc, MII_DBG_MSE20DB); val &= ~DBG_MSE20DB_TH_MASK; val |= (DBG_MSE20DB_TH_HI << DBG_MSE20DB_TH_SHIFT); alc_miidbg_writereg(sc, MII_DBG_MSE20DB, val); } else if (media == IFM_100_TX) alc_miidbg_writereg(sc, MII_DBG_MSE16DB, DBG_MSE16DB_UP); } } else { val = alc_miiext_readreg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE); val &= ~ANEG_AFEE_10BT_100M_TH; alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE, val); if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0 && AR816X_REV(sc->alc_rev) == AR816X_REV_B0) { alc_miidbg_writereg(sc, MII_DBG_MSE16DB, DBG_MSE16DB_DOWN); val = alc_miidbg_readreg(sc, MII_DBG_MSE20DB); val &= ~DBG_MSE20DB_TH_MASK; val |= (DBG_MSE20DB_TH_DEFAULT << DBG_MSE20DB_TH_SHIFT); alc_miidbg_writereg(sc, MII_DBG_MSE20DB, val); } } } static void alc_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) { struct alc_softc *sc; struct mii_data *mii; sc = ifp->if_softc; ALC_LOCK(sc); if ((ifp->if_flags & IFF_UP) == 0) { ALC_UNLOCK(sc); return; } mii = device_get_softc(sc->alc_miibus); mii_pollstat(mii); ifmr->ifm_status = mii->mii_media_status; ifmr->ifm_active = mii->mii_media_active; ALC_UNLOCK(sc); } static int alc_mediachange(struct ifnet *ifp) { struct alc_softc *sc; int error; sc = ifp->if_softc; ALC_LOCK(sc); error = alc_mediachange_locked(sc); ALC_UNLOCK(sc); return (error); } static int alc_mediachange_locked(struct alc_softc *sc) { struct mii_data *mii; struct mii_softc *miisc; int error; ALC_LOCK_ASSERT(sc); mii = device_get_softc(sc->alc_miibus); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); error = mii_mediachg(mii); return (error); } static struct alc_ident * alc_find_ident(device_t dev) { struct alc_ident *ident; uint16_t vendor, devid; vendor = pci_get_vendor(dev); devid = pci_get_device(dev); for (ident = alc_ident_table; ident->name != NULL; ident++) { if (vendor == ident->vendorid && devid == ident->deviceid) return (ident); } return (NULL); } static int alc_probe(device_t dev) { struct alc_ident *ident; ident = alc_find_ident(dev); if (ident != NULL) { device_set_desc(dev, ident->name); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static void alc_get_macaddr(struct alc_softc *sc) { if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) alc_get_macaddr_816x(sc); else alc_get_macaddr_813x(sc); } static void alc_get_macaddr_813x(struct alc_softc *sc) { uint32_t opt; uint16_t val; int eeprom, i; eeprom = 0; opt = CSR_READ_4(sc, ALC_OPT_CFG); if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_OTP_SEL) != 0 && (CSR_READ_4(sc, ALC_TWSI_DEBUG) & TWSI_DEBUG_DEV_EXIST) != 0) { /* * EEPROM found, let TWSI reload EEPROM configuration. * This will set ethernet address of controller. */ eeprom++; switch (sc->alc_ident->deviceid) { case DEVICEID_ATHEROS_AR8131: case DEVICEID_ATHEROS_AR8132: if ((opt & OPT_CFG_CLK_ENB) == 0) { opt |= OPT_CFG_CLK_ENB; CSR_WRITE_4(sc, ALC_OPT_CFG, opt); CSR_READ_4(sc, ALC_OPT_CFG); DELAY(1000); } break; case DEVICEID_ATHEROS_AR8151: case DEVICEID_ATHEROS_AR8151_V2: case DEVICEID_ATHEROS_AR8152_B: case DEVICEID_ATHEROS_AR8152_B2: alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, 0x00); val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA); alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA, val & 0xFF7F); alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, 0x3B); val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA); alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA, val | 0x0008); DELAY(20); break; } CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG, CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB); CSR_WRITE_4(sc, ALC_WOL_CFG, 0); CSR_READ_4(sc, ALC_WOL_CFG); CSR_WRITE_4(sc, ALC_TWSI_CFG, CSR_READ_4(sc, ALC_TWSI_CFG) | TWSI_CFG_SW_LD_START); for (i = 100; i > 0; i--) { DELAY(1000); if ((CSR_READ_4(sc, ALC_TWSI_CFG) & TWSI_CFG_SW_LD_START) == 0) break; } if (i == 0) device_printf(sc->alc_dev, "reloading EEPROM timeout!\n"); } else { if (bootverbose) device_printf(sc->alc_dev, "EEPROM not found!\n"); } if (eeprom != 0) { switch (sc->alc_ident->deviceid) { case DEVICEID_ATHEROS_AR8131: case DEVICEID_ATHEROS_AR8132: if ((opt & OPT_CFG_CLK_ENB) != 0) { opt &= ~OPT_CFG_CLK_ENB; CSR_WRITE_4(sc, ALC_OPT_CFG, opt); CSR_READ_4(sc, ALC_OPT_CFG); DELAY(1000); } break; case DEVICEID_ATHEROS_AR8151: case DEVICEID_ATHEROS_AR8151_V2: case DEVICEID_ATHEROS_AR8152_B: case DEVICEID_ATHEROS_AR8152_B2: alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, 0x00); val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA); alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA, val | 0x0080); alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, 0x3B); val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA); alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA, val & 0xFFF7); DELAY(20); break; } } alc_get_macaddr_par(sc); } static void alc_get_macaddr_816x(struct alc_softc *sc) { uint32_t reg; int i, reloaded; reloaded = 0; /* Try to reload station address via TWSI. */ for (i = 100; i > 0; i--) { reg = CSR_READ_4(sc, ALC_SLD); if ((reg & (SLD_PROGRESS | SLD_START)) == 0) break; DELAY(1000); } if (i != 0) { CSR_WRITE_4(sc, ALC_SLD, reg | SLD_START); for (i = 100; i > 0; i--) { DELAY(1000); reg = CSR_READ_4(sc, ALC_SLD); if ((reg & SLD_START) == 0) break; } if (i != 0) reloaded++; else if (bootverbose) device_printf(sc->alc_dev, "reloading station address via TWSI timed out!\n"); } /* Try to reload station address from EEPROM or FLASH. */ if (reloaded == 0) { reg = CSR_READ_4(sc, ALC_EEPROM_LD); if ((reg & (EEPROM_LD_EEPROM_EXIST | EEPROM_LD_FLASH_EXIST)) != 0) { for (i = 100; i > 0; i--) { reg = CSR_READ_4(sc, ALC_EEPROM_LD); if ((reg & (EEPROM_LD_PROGRESS | EEPROM_LD_START)) == 0) break; DELAY(1000); } if (i != 0) { CSR_WRITE_4(sc, ALC_EEPROM_LD, reg | EEPROM_LD_START); for (i = 100; i > 0; i--) { DELAY(1000); reg = CSR_READ_4(sc, ALC_EEPROM_LD); if ((reg & EEPROM_LD_START) == 0) break; } } else if (bootverbose) device_printf(sc->alc_dev, "reloading EEPROM/FLASH timed out!\n"); } } alc_get_macaddr_par(sc); } static void alc_get_macaddr_par(struct alc_softc *sc) { uint32_t ea[2]; ea[0] = CSR_READ_4(sc, ALC_PAR0); ea[1] = CSR_READ_4(sc, ALC_PAR1); sc->alc_eaddr[0] = (ea[1] >> 8) & 0xFF; sc->alc_eaddr[1] = (ea[1] >> 0) & 0xFF; sc->alc_eaddr[2] = (ea[0] >> 24) & 0xFF; sc->alc_eaddr[3] = (ea[0] >> 16) & 0xFF; sc->alc_eaddr[4] = (ea[0] >> 8) & 0xFF; sc->alc_eaddr[5] = (ea[0] >> 0) & 0xFF; } static void alc_disable_l0s_l1(struct alc_softc *sc) { uint32_t pmcfg; if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { /* Another magic from vendor. */ pmcfg = CSR_READ_4(sc, ALC_PM_CFG); pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_CLK_SWH_L1 | PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB | PM_CFG_MAC_ASPM_CHK | PM_CFG_SERDES_PD_EX_L1); pmcfg |= PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB | PM_CFG_SERDES_L1_ENB; CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); } } static void alc_phy_reset(struct alc_softc *sc) { if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) alc_phy_reset_816x(sc); else alc_phy_reset_813x(sc); } static void alc_phy_reset_813x(struct alc_softc *sc) { uint16_t data; /* Reset magic from Linux. */ CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_SEL_ANA_RESET); CSR_READ_2(sc, ALC_GPHY_CFG); DELAY(10 * 1000); CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_EXT_RESET | GPHY_CFG_SEL_ANA_RESET); CSR_READ_2(sc, ALC_GPHY_CFG); DELAY(10 * 1000); /* DSP fixup, Vendor magic. */ if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B) { alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, 0x000A); data = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA); alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA, data & 0xDFFF); } if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151 || sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 || sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B || sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) { alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, 0x003B); data = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA); alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA, data & 0xFFF7); DELAY(20 * 1000); } if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151) { alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, 0x0029); alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA, 0x929D); } if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8131 || sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8132 || sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 || sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) { alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, 0x0029); alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA, 0xB6DD); } /* Load DSP codes, vendor magic. */ data = ANA_LOOP_SEL_10BT | ANA_EN_MASK_TB | ANA_EN_10BT_IDLE | ((1 << ANA_INTERVAL_SEL_TIMER_SHIFT) & ANA_INTERVAL_SEL_TIMER_MASK); alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, MII_ANA_CFG18); alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA, data); data = ((2 << ANA_SERDES_CDR_BW_SHIFT) & ANA_SERDES_CDR_BW_MASK) | ANA_SERDES_EN_DEEM | ANA_SERDES_SEL_HSP | ANA_SERDES_EN_PLL | ANA_SERDES_EN_LCKDT; alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, MII_ANA_CFG5); alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA, data); data = ((44 << ANA_LONG_CABLE_TH_100_SHIFT) & ANA_LONG_CABLE_TH_100_MASK) | ((33 << ANA_SHORT_CABLE_TH_100_SHIFT) & ANA_SHORT_CABLE_TH_100_SHIFT) | ANA_BP_BAD_LINK_ACCUM | ANA_BP_SMALL_BW; alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, MII_ANA_CFG54); alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA, data); data = ((11 << ANA_IECHO_ADJ_3_SHIFT) & ANA_IECHO_ADJ_3_MASK) | ((11 << ANA_IECHO_ADJ_2_SHIFT) & ANA_IECHO_ADJ_2_MASK) | ((8 << ANA_IECHO_ADJ_1_SHIFT) & ANA_IECHO_ADJ_1_MASK) | ((8 << ANA_IECHO_ADJ_0_SHIFT) & ANA_IECHO_ADJ_0_MASK); alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, MII_ANA_CFG4); alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA, data); data = ((7 & ANA_MANUL_SWICH_ON_SHIFT) & ANA_MANUL_SWICH_ON_MASK) | ANA_RESTART_CAL | ANA_MAN_ENABLE | ANA_SEL_HSP | ANA_EN_HB | ANA_OEN_125M; alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, MII_ANA_CFG0); alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA, data); DELAY(1000); /* Disable hibernation. */ alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, 0x0029); data = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA); data &= ~0x8000; alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA, data); alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, 0x000B); data = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA); data &= ~0x8000; alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA, data); } static void alc_phy_reset_816x(struct alc_softc *sc) { uint32_t val; val = CSR_READ_4(sc, ALC_GPHY_CFG); val &= ~(GPHY_CFG_EXT_RESET | GPHY_CFG_LED_MODE | GPHY_CFG_GATE_25M_ENB | GPHY_CFG_PHY_IDDQ | GPHY_CFG_PHY_PLL_ON | GPHY_CFG_PWDOWN_HW | GPHY_CFG_100AB_ENB); val |= GPHY_CFG_SEL_ANA_RESET; #ifdef notyet val |= GPHY_CFG_HIB_PULSE | GPHY_CFG_HIB_EN | GPHY_CFG_SEL_ANA_RESET; #else /* Disable PHY hibernation. */ val &= ~(GPHY_CFG_HIB_PULSE | GPHY_CFG_HIB_EN); #endif CSR_WRITE_4(sc, ALC_GPHY_CFG, val); DELAY(10); CSR_WRITE_4(sc, ALC_GPHY_CFG, val | GPHY_CFG_EXT_RESET); DELAY(800); /* Vendor PHY magic. */ #ifdef notyet alc_miidbg_writereg(sc, MII_DBG_LEGCYPS, DBG_LEGCYPS_DEFAULT); alc_miidbg_writereg(sc, MII_DBG_SYSMODCTL, DBG_SYSMODCTL_DEFAULT); alc_miiext_writereg(sc, MII_EXT_PCS, MII_EXT_VDRVBIAS, EXT_VDRVBIAS_DEFAULT); #else /* Disable PHY hibernation. */ alc_miidbg_writereg(sc, MII_DBG_LEGCYPS, DBG_LEGCYPS_DEFAULT & ~DBG_LEGCYPS_ENB); alc_miidbg_writereg(sc, MII_DBG_HIBNEG, DBG_HIBNEG_DEFAULT & ~(DBG_HIBNEG_PSHIB_EN | DBG_HIBNEG_HIB_PULSE)); alc_miidbg_writereg(sc, MII_DBG_GREENCFG, DBG_GREENCFG_DEFAULT); #endif /* XXX Disable EEE. */ val = CSR_READ_4(sc, ALC_LPI_CTL); val &= ~LPI_CTL_ENB; CSR_WRITE_4(sc, ALC_LPI_CTL, val); alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_LOCAL_EEEADV, 0); /* PHY power saving. */ alc_miidbg_writereg(sc, MII_DBG_TST10BTCFG, DBG_TST10BTCFG_DEFAULT); alc_miidbg_writereg(sc, MII_DBG_SRDSYSMOD, DBG_SRDSYSMOD_DEFAULT); alc_miidbg_writereg(sc, MII_DBG_TST100BTCFG, DBG_TST100BTCFG_DEFAULT); alc_miidbg_writereg(sc, MII_DBG_ANACTL, DBG_ANACTL_DEFAULT); val = alc_miidbg_readreg(sc, MII_DBG_GREENCFG2); val &= ~DBG_GREENCFG2_GATE_DFSE_EN; alc_miidbg_writereg(sc, MII_DBG_GREENCFG2, val); /* RTL8139C, 120m issue. */ alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_NLP78, ANEG_NLP78_120M_DEFAULT); alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_S3DIG10, ANEG_S3DIG10_DEFAULT); if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0) { /* Turn off half amplitude. */ val = alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL3); val |= EXT_CLDCTL3_BP_CABLE1TH_DET_GT; alc_miiext_writereg(sc, MII_EXT_PCS, MII_EXT_CLDCTL3, val); /* Turn off Green feature. */ val = alc_miidbg_readreg(sc, MII_DBG_GREENCFG2); val |= DBG_GREENCFG2_BP_GREEN; alc_miidbg_writereg(sc, MII_DBG_GREENCFG2, val); /* Turn off half bias. */ val = alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL5); val |= EXT_CLDCTL5_BP_VD_HLFBIAS; alc_miiext_writereg(sc, MII_EXT_PCS, MII_EXT_CLDCTL5, val); } } static void alc_phy_down(struct alc_softc *sc) { uint32_t gphy; switch (sc->alc_ident->deviceid) { case DEVICEID_ATHEROS_AR8161: case DEVICEID_ATHEROS_E2200: case DEVICEID_ATHEROS_E2400: case DEVICEID_ATHEROS_E2500: case DEVICEID_ATHEROS_AR8162: case DEVICEID_ATHEROS_AR8171: case DEVICEID_ATHEROS_AR8172: gphy = CSR_READ_4(sc, ALC_GPHY_CFG); gphy &= ~(GPHY_CFG_EXT_RESET | GPHY_CFG_LED_MODE | GPHY_CFG_100AB_ENB | GPHY_CFG_PHY_PLL_ON); gphy |= GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE | GPHY_CFG_SEL_ANA_RESET; gphy |= GPHY_CFG_PHY_IDDQ | GPHY_CFG_PWDOWN_HW; CSR_WRITE_4(sc, ALC_GPHY_CFG, gphy); break; case DEVICEID_ATHEROS_AR8151: case DEVICEID_ATHEROS_AR8151_V2: case DEVICEID_ATHEROS_AR8152_B: case DEVICEID_ATHEROS_AR8152_B2: /* * GPHY power down caused more problems on AR8151 v2.0. * When driver is reloaded after GPHY power down, * accesses to PHY/MAC registers hung the system. Only * cold boot recovered from it. I'm not sure whether * AR8151 v1.0 also requires this one though. I don't * have AR8151 v1.0 controller in hand. * The only option left is to isolate the PHY and * initiates power down the PHY which in turn saves * more power when driver is unloaded. */ alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, MII_BMCR, BMCR_ISO | BMCR_PDOWN); break; default: /* Force PHY down. */ CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_EXT_RESET | GPHY_CFG_SEL_ANA_RESET | GPHY_CFG_PHY_IDDQ | GPHY_CFG_PWDOWN_HW); DELAY(1000); break; } } static void alc_aspm(struct alc_softc *sc, int init, int media) { if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) alc_aspm_816x(sc, init); else alc_aspm_813x(sc, media); } static void alc_aspm_813x(struct alc_softc *sc, int media) { uint32_t pmcfg; uint16_t linkcfg; if ((sc->alc_flags & ALC_FLAG_LINK) == 0) return; pmcfg = CSR_READ_4(sc, ALC_PM_CFG); if ((sc->alc_flags & (ALC_FLAG_APS | ALC_FLAG_PCIE)) == (ALC_FLAG_APS | ALC_FLAG_PCIE)) linkcfg = CSR_READ_2(sc, sc->alc_expcap + PCIER_LINK_CTL); else linkcfg = 0; pmcfg &= ~PM_CFG_SERDES_PD_EX_L1; pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_LCKDET_TIMER_MASK); pmcfg |= PM_CFG_MAC_ASPM_CHK; pmcfg |= (PM_CFG_LCKDET_TIMER_DEFAULT << PM_CFG_LCKDET_TIMER_SHIFT); pmcfg &= ~(PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB); if ((sc->alc_flags & ALC_FLAG_APS) != 0) { /* Disable extended sync except AR8152 B v1.0 */ linkcfg &= ~PCIEM_LINK_CTL_EXTENDED_SYNC; if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B && sc->alc_rev == ATHEROS_AR8152_B_V10) linkcfg |= PCIEM_LINK_CTL_EXTENDED_SYNC; CSR_WRITE_2(sc, sc->alc_expcap + PCIER_LINK_CTL, linkcfg); pmcfg &= ~(PM_CFG_EN_BUFS_RX_L0S | PM_CFG_SA_DLY_ENB | PM_CFG_HOTRST); pmcfg |= (PM_CFG_L1_ENTRY_TIMER_DEFAULT << PM_CFG_L1_ENTRY_TIMER_SHIFT); pmcfg &= ~PM_CFG_PM_REQ_TIMER_MASK; pmcfg |= (PM_CFG_PM_REQ_TIMER_DEFAULT << PM_CFG_PM_REQ_TIMER_SHIFT); pmcfg |= PM_CFG_SERDES_PD_EX_L1 | PM_CFG_PCIE_RECV; } if ((sc->alc_flags & ALC_FLAG_LINK) != 0) { if ((sc->alc_flags & ALC_FLAG_L0S) != 0) pmcfg |= PM_CFG_ASPM_L0S_ENB; if ((sc->alc_flags & ALC_FLAG_L1S) != 0) pmcfg |= PM_CFG_ASPM_L1_ENB; if ((sc->alc_flags & ALC_FLAG_APS) != 0) { if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B) pmcfg &= ~PM_CFG_ASPM_L0S_ENB; pmcfg &= ~(PM_CFG_SERDES_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB | PM_CFG_SERDES_BUDS_RX_L1_ENB); pmcfg |= PM_CFG_CLK_SWH_L1; if (media == IFM_100_TX || media == IFM_1000_T) { pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_MASK; switch (sc->alc_ident->deviceid) { case DEVICEID_ATHEROS_AR8152_B: pmcfg |= (7 << PM_CFG_L1_ENTRY_TIMER_SHIFT); break; case DEVICEID_ATHEROS_AR8152_B2: case DEVICEID_ATHEROS_AR8151_V2: pmcfg |= (4 << PM_CFG_L1_ENTRY_TIMER_SHIFT); break; default: pmcfg |= (15 << PM_CFG_L1_ENTRY_TIMER_SHIFT); break; } } } else { pmcfg |= PM_CFG_SERDES_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB | PM_CFG_SERDES_BUDS_RX_L1_ENB; pmcfg &= ~(PM_CFG_CLK_SWH_L1 | PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB); } } else { pmcfg &= ~(PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB); pmcfg |= PM_CFG_CLK_SWH_L1; if ((sc->alc_flags & ALC_FLAG_L1S) != 0) pmcfg |= PM_CFG_ASPM_L1_ENB; } CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); } static void alc_aspm_816x(struct alc_softc *sc, int init) { uint32_t pmcfg; pmcfg = CSR_READ_4(sc, ALC_PM_CFG); pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_816X_MASK; pmcfg |= PM_CFG_L1_ENTRY_TIMER_816X_DEFAULT; pmcfg &= ~PM_CFG_PM_REQ_TIMER_MASK; pmcfg |= PM_CFG_PM_REQ_TIMER_816X_DEFAULT; pmcfg &= ~PM_CFG_LCKDET_TIMER_MASK; pmcfg |= PM_CFG_LCKDET_TIMER_DEFAULT; pmcfg |= PM_CFG_SERDES_PD_EX_L1 | PM_CFG_CLK_SWH_L1 | PM_CFG_PCIE_RECV; pmcfg &= ~(PM_CFG_RX_L1_AFTER_L0S | PM_CFG_TX_L1_AFTER_L0S | PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB | PM_CFG_SERDES_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB | PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SA_DLY_ENB | PM_CFG_MAC_ASPM_CHK | PM_CFG_HOTRST); if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 && (sc->alc_rev & 0x01) != 0) pmcfg |= PM_CFG_SERDES_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB; if ((sc->alc_flags & ALC_FLAG_LINK) != 0) { /* Link up, enable both L0s, L1s. */ pmcfg |= PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB | PM_CFG_MAC_ASPM_CHK; } else { if (init != 0) pmcfg |= PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB | PM_CFG_MAC_ASPM_CHK; else if ((sc->alc_ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) pmcfg |= PM_CFG_ASPM_L1_ENB | PM_CFG_MAC_ASPM_CHK; } CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); } static void alc_init_pcie(struct alc_softc *sc) { const char *aspm_state[] = { "L0s/L1", "L0s", "L1", "L0s/L1" }; uint32_t cap, ctl, val; int state; /* Clear data link and flow-control protocol error. */ val = CSR_READ_4(sc, ALC_PEX_UNC_ERR_SEV); val &= ~(PEX_UNC_ERR_SEV_DLP | PEX_UNC_ERR_SEV_FCP); CSR_WRITE_4(sc, ALC_PEX_UNC_ERR_SEV, val); if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG, CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB); CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, CSR_READ_4(sc, ALC_PCIE_PHYMISC) | PCIE_PHYMISC_FORCE_RCV_DET); if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B && sc->alc_rev == ATHEROS_AR8152_B_V10) { val = CSR_READ_4(sc, ALC_PCIE_PHYMISC2); val &= ~(PCIE_PHYMISC2_SERDES_CDR_MASK | PCIE_PHYMISC2_SERDES_TH_MASK); val |= 3 << PCIE_PHYMISC2_SERDES_CDR_SHIFT; val |= 3 << PCIE_PHYMISC2_SERDES_TH_SHIFT; CSR_WRITE_4(sc, ALC_PCIE_PHYMISC2, val); } /* Disable ASPM L0S and L1. */ cap = CSR_READ_2(sc, sc->alc_expcap + PCIER_LINK_CAP); if ((cap & PCIEM_LINK_CAP_ASPM) != 0) { ctl = CSR_READ_2(sc, sc->alc_expcap + PCIER_LINK_CTL); if ((ctl & PCIEM_LINK_CTL_RCB) != 0) sc->alc_rcb = DMA_CFG_RCB_128; if (bootverbose) device_printf(sc->alc_dev, "RCB %u bytes\n", sc->alc_rcb == DMA_CFG_RCB_64 ? 64 : 128); state = ctl & PCIEM_LINK_CTL_ASPMC; if (state & PCIEM_LINK_CTL_ASPMC_L0S) sc->alc_flags |= ALC_FLAG_L0S; if (state & PCIEM_LINK_CTL_ASPMC_L1) sc->alc_flags |= ALC_FLAG_L1S; if (bootverbose) device_printf(sc->alc_dev, "ASPM %s %s\n", aspm_state[state], state == 0 ? "disabled" : "enabled"); alc_disable_l0s_l1(sc); } else { if (bootverbose) device_printf(sc->alc_dev, "no ASPM support\n"); } } else { val = CSR_READ_4(sc, ALC_PDLL_TRNS1); val &= ~PDLL_TRNS1_D3PLLOFF_ENB; CSR_WRITE_4(sc, ALC_PDLL_TRNS1, val); val = CSR_READ_4(sc, ALC_MASTER_CFG); if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 && (sc->alc_rev & 0x01) != 0) { if ((val & MASTER_WAKEN_25M) == 0 || (val & MASTER_CLK_SEL_DIS) == 0) { val |= MASTER_WAKEN_25M | MASTER_CLK_SEL_DIS; CSR_WRITE_4(sc, ALC_MASTER_CFG, val); } } else { if ((val & MASTER_WAKEN_25M) == 0 || (val & MASTER_CLK_SEL_DIS) != 0) { val |= MASTER_WAKEN_25M; val &= ~MASTER_CLK_SEL_DIS; CSR_WRITE_4(sc, ALC_MASTER_CFG, val); } } } alc_aspm(sc, 1, IFM_UNKNOWN); } static void alc_config_msi(struct alc_softc *sc) { uint32_t ctl, mod; if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { /* * It seems interrupt moderation is controlled by * ALC_MSI_RETRANS_TIMER register if MSI/MSIX is active. * Driver uses RX interrupt moderation parameter to * program ALC_MSI_RETRANS_TIMER register. */ ctl = CSR_READ_4(sc, ALC_MSI_RETRANS_TIMER); ctl &= ~MSI_RETRANS_TIMER_MASK; ctl &= ~MSI_RETRANS_MASK_SEL_LINE; mod = ALC_USECS(sc->alc_int_rx_mod); if (mod == 0) mod = 1; ctl |= mod; if ((sc->alc_flags & ALC_FLAG_MSIX) != 0) CSR_WRITE_4(sc, ALC_MSI_RETRANS_TIMER, ctl | MSI_RETRANS_MASK_SEL_STD); else if ((sc->alc_flags & ALC_FLAG_MSI) != 0) CSR_WRITE_4(sc, ALC_MSI_RETRANS_TIMER, ctl | MSI_RETRANS_MASK_SEL_LINE); else CSR_WRITE_4(sc, ALC_MSI_RETRANS_TIMER, 0); } } static int alc_attach(device_t dev) { struct alc_softc *sc; struct ifnet *ifp; int base, error, i, msic, msixc; uint16_t burst; error = 0; sc = device_get_softc(dev); sc->alc_dev = dev; sc->alc_rev = pci_get_revid(dev); mtx_init(&sc->alc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->alc_tick_ch, &sc->alc_mtx, 0); - TASK_INIT(&sc->alc_int_task, 0, alc_int_task, sc); + NET_TASK_INIT(&sc->alc_int_task, 0, alc_int_task, sc); sc->alc_ident = alc_find_ident(dev); /* Map the device. */ pci_enable_busmaster(dev); sc->alc_res_spec = alc_res_spec_mem; sc->alc_irq_spec = alc_irq_spec_legacy; error = bus_alloc_resources(dev, sc->alc_res_spec, sc->alc_res); if (error != 0) { device_printf(dev, "cannot allocate memory resources.\n"); goto fail; } /* Set PHY address. */ sc->alc_phyaddr = ALC_PHY_ADDR; /* * One odd thing is AR8132 uses the same PHY hardware(F1 * gigabit PHY) of AR8131. So atphy(4) of AR8132 reports * the PHY supports 1000Mbps but that's not true. The PHY * used in AR8132 can't establish gigabit link even if it * shows the same PHY model/revision number of AR8131. */ switch (sc->alc_ident->deviceid) { case DEVICEID_ATHEROS_E2200: case DEVICEID_ATHEROS_E2400: case DEVICEID_ATHEROS_E2500: sc->alc_flags |= ALC_FLAG_E2X00; /* FALLTHROUGH */ case DEVICEID_ATHEROS_AR8161: if (pci_get_subvendor(dev) == VENDORID_ATHEROS && pci_get_subdevice(dev) == 0x0091 && sc->alc_rev == 0) sc->alc_flags |= ALC_FLAG_LINK_WAR; /* FALLTHROUGH */ case DEVICEID_ATHEROS_AR8171: sc->alc_flags |= ALC_FLAG_AR816X_FAMILY; break; case DEVICEID_ATHEROS_AR8162: case DEVICEID_ATHEROS_AR8172: sc->alc_flags |= ALC_FLAG_FASTETHER | ALC_FLAG_AR816X_FAMILY; break; case DEVICEID_ATHEROS_AR8152_B: case DEVICEID_ATHEROS_AR8152_B2: sc->alc_flags |= ALC_FLAG_APS; /* FALLTHROUGH */ case DEVICEID_ATHEROS_AR8132: sc->alc_flags |= ALC_FLAG_FASTETHER; break; case DEVICEID_ATHEROS_AR8151: case DEVICEID_ATHEROS_AR8151_V2: sc->alc_flags |= ALC_FLAG_APS; /* FALLTHROUGH */ default: break; } sc->alc_flags |= ALC_FLAG_JUMBO; /* * It seems that AR813x/AR815x has silicon bug for SMB. In * addition, Atheros said that enabling SMB wouldn't improve * performance. However I think it's bad to access lots of * registers to extract MAC statistics. */ sc->alc_flags |= ALC_FLAG_SMB_BUG; /* * Don't use Tx CMB. It is known to have silicon bug. */ sc->alc_flags |= ALC_FLAG_CMB_BUG; sc->alc_chip_rev = CSR_READ_4(sc, ALC_MASTER_CFG) >> MASTER_CHIP_REV_SHIFT; if (bootverbose) { device_printf(dev, "PCI device revision : 0x%04x\n", sc->alc_rev); device_printf(dev, "Chip id/revision : 0x%04x\n", sc->alc_chip_rev); if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) device_printf(dev, "AR816x revision : 0x%x\n", AR816X_REV(sc->alc_rev)); } device_printf(dev, "%u Tx FIFO, %u Rx FIFO\n", CSR_READ_4(sc, ALC_SRAM_TX_FIFO_LEN) * 8, CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN) * 8); /* Initialize DMA parameters. */ sc->alc_dma_rd_burst = 0; sc->alc_dma_wr_burst = 0; sc->alc_rcb = DMA_CFG_RCB_64; if (pci_find_cap(dev, PCIY_EXPRESS, &base) == 0) { sc->alc_flags |= ALC_FLAG_PCIE; sc->alc_expcap = base; burst = CSR_READ_2(sc, base + PCIER_DEVICE_CTL); sc->alc_dma_rd_burst = (burst & PCIEM_CTL_MAX_READ_REQUEST) >> 12; sc->alc_dma_wr_burst = (burst & PCIEM_CTL_MAX_PAYLOAD) >> 5; if (bootverbose) { device_printf(dev, "Read request size : %u bytes.\n", alc_dma_burst[sc->alc_dma_rd_burst]); device_printf(dev, "TLP payload size : %u bytes.\n", alc_dma_burst[sc->alc_dma_wr_burst]); } if (alc_dma_burst[sc->alc_dma_rd_burst] > 1024) sc->alc_dma_rd_burst = 3; if (alc_dma_burst[sc->alc_dma_wr_burst] > 1024) sc->alc_dma_wr_burst = 3; /* * Force maximum payload size to 128 bytes for * E2200/E2400/E2500. * Otherwise it triggers DMA write error. */ if ((sc->alc_flags & ALC_FLAG_E2X00) != 0) sc->alc_dma_wr_burst = 0; alc_init_pcie(sc); } /* Reset PHY. */ alc_phy_reset(sc); /* Reset the ethernet controller. */ alc_stop_mac(sc); alc_reset(sc); /* Allocate IRQ resources. */ msixc = pci_msix_count(dev); msic = pci_msi_count(dev); if (bootverbose) { device_printf(dev, "MSIX count : %d\n", msixc); device_printf(dev, "MSI count : %d\n", msic); } if (msixc > 1) msixc = 1; if (msic > 1) msic = 1; /* * Prefer MSIX over MSI. * AR816x controller has a silicon bug that MSI interrupt * does not assert if PCIM_CMD_INTxDIS bit of command * register is set. pci(4) was taught to handle that case. */ if (msix_disable == 0 || msi_disable == 0) { if (msix_disable == 0 && msixc > 0 && pci_alloc_msix(dev, &msixc) == 0) { if (msic == 1) { device_printf(dev, "Using %d MSIX message(s).\n", msixc); sc->alc_flags |= ALC_FLAG_MSIX; sc->alc_irq_spec = alc_irq_spec_msix; } else pci_release_msi(dev); } if (msi_disable == 0 && (sc->alc_flags & ALC_FLAG_MSIX) == 0 && msic > 0 && pci_alloc_msi(dev, &msic) == 0) { if (msic == 1) { device_printf(dev, "Using %d MSI message(s).\n", msic); sc->alc_flags |= ALC_FLAG_MSI; sc->alc_irq_spec = alc_irq_spec_msi; } else pci_release_msi(dev); } } error = bus_alloc_resources(dev, sc->alc_irq_spec, sc->alc_irq); if (error != 0) { device_printf(dev, "cannot allocate IRQ resources.\n"); goto fail; } /* Create device sysctl node. */ alc_sysctl_node(sc); if ((error = alc_dma_alloc(sc)) != 0) goto fail; /* Load station address. */ alc_get_macaddr(sc); ifp = sc->alc_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "cannot allocate ifnet structure.\n"); error = ENXIO; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = alc_ioctl; ifp->if_start = alc_start; ifp->if_init = alc_init; ifp->if_snd.ifq_drv_maxlen = ALC_TX_RING_CNT - 1; IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); IFQ_SET_READY(&ifp->if_snd); ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_TSO4; ifp->if_hwassist = ALC_CSUM_FEATURES | CSUM_TSO; if (pci_find_cap(dev, PCIY_PMG, &base) == 0) { ifp->if_capabilities |= IFCAP_WOL_MAGIC | IFCAP_WOL_MCAST; sc->alc_flags |= ALC_FLAG_PM; sc->alc_pmcap = base; } ifp->if_capenable = ifp->if_capabilities; /* Set up MII bus. */ error = mii_attach(dev, &sc->alc_miibus, ifp, alc_mediachange, alc_mediastatus, BMSR_DEFCAPMASK, sc->alc_phyaddr, MII_OFFSET_ANY, MIIF_DOPAUSE); if (error != 0) { device_printf(dev, "attaching PHYs failed\n"); goto fail; } ether_ifattach(ifp, sc->alc_eaddr); /* VLAN capability setup. */ ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO; ifp->if_capenable = ifp->if_capabilities; /* * XXX * It seems enabling Tx checksum offloading makes more trouble. * Sometimes the controller does not receive any frames when * Tx checksum offloading is enabled. I'm not sure whether this * is a bug in Tx checksum offloading logic or I got broken * sample boards. To safety, don't enable Tx checksum offloading * by default but give chance to users to toggle it if they know * their controllers work without problems. * Fortunately, Tx checksum offloading for AR816x family * seems to work. */ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { ifp->if_capenable &= ~IFCAP_TXCSUM; ifp->if_hwassist &= ~ALC_CSUM_FEATURES; } /* Tell the upper layer(s) we support long frames. */ ifp->if_hdrlen = sizeof(struct ether_vlan_header); /* Create local taskq. */ sc->alc_tq = taskqueue_create_fast("alc_taskq", M_WAITOK, taskqueue_thread_enqueue, &sc->alc_tq); if (sc->alc_tq == NULL) { device_printf(dev, "could not create taskqueue.\n"); ether_ifdetach(ifp); error = ENXIO; goto fail; } taskqueue_start_threads(&sc->alc_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->alc_dev)); alc_config_msi(sc); if ((sc->alc_flags & ALC_FLAG_MSIX) != 0) msic = ALC_MSIX_MESSAGES; else if ((sc->alc_flags & ALC_FLAG_MSI) != 0) msic = ALC_MSI_MESSAGES; else msic = 1; for (i = 0; i < msic; i++) { error = bus_setup_intr(dev, sc->alc_irq[i], INTR_TYPE_NET | INTR_MPSAFE, alc_intr, NULL, sc, &sc->alc_intrhand[i]); if (error != 0) break; } if (error != 0) { device_printf(dev, "could not set up interrupt handler.\n"); taskqueue_free(sc->alc_tq); sc->alc_tq = NULL; ether_ifdetach(ifp); goto fail; } /* Attach driver debugnet methods. */ DEBUGNET_SET(ifp, alc); fail: if (error != 0) alc_detach(dev); return (error); } static int alc_detach(device_t dev) { struct alc_softc *sc; struct ifnet *ifp; int i, msic; sc = device_get_softc(dev); ifp = sc->alc_ifp; if (device_is_attached(dev)) { ether_ifdetach(ifp); ALC_LOCK(sc); alc_stop(sc); ALC_UNLOCK(sc); callout_drain(&sc->alc_tick_ch); taskqueue_drain(sc->alc_tq, &sc->alc_int_task); } if (sc->alc_tq != NULL) { taskqueue_drain(sc->alc_tq, &sc->alc_int_task); taskqueue_free(sc->alc_tq); sc->alc_tq = NULL; } if (sc->alc_miibus != NULL) { device_delete_child(dev, sc->alc_miibus); sc->alc_miibus = NULL; } bus_generic_detach(dev); alc_dma_free(sc); if (ifp != NULL) { if_free(ifp); sc->alc_ifp = NULL; } if ((sc->alc_flags & ALC_FLAG_MSIX) != 0) msic = ALC_MSIX_MESSAGES; else if ((sc->alc_flags & ALC_FLAG_MSI) != 0) msic = ALC_MSI_MESSAGES; else msic = 1; for (i = 0; i < msic; i++) { if (sc->alc_intrhand[i] != NULL) { bus_teardown_intr(dev, sc->alc_irq[i], sc->alc_intrhand[i]); sc->alc_intrhand[i] = NULL; } } if (sc->alc_res[0] != NULL) alc_phy_down(sc); bus_release_resources(dev, sc->alc_irq_spec, sc->alc_irq); if ((sc->alc_flags & (ALC_FLAG_MSI | ALC_FLAG_MSIX)) != 0) pci_release_msi(dev); bus_release_resources(dev, sc->alc_res_spec, sc->alc_res); mtx_destroy(&sc->alc_mtx); return (0); } #define ALC_SYSCTL_STAT_ADD32(c, h, n, p, d) \ SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) #define ALC_SYSCTL_STAT_ADD64(c, h, n, p, d) \ SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d) static void alc_sysctl_node(struct alc_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *child, *parent; struct sysctl_oid *tree; struct alc_hw_stats *stats; int error; stats = &sc->alc_stats; ctx = device_get_sysctl_ctx(sc->alc_dev); child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->alc_dev)); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_rx_mod", CTLTYPE_INT | CTLFLAG_RW, &sc->alc_int_rx_mod, 0, sysctl_hw_alc_int_mod, "I", "alc Rx interrupt moderation"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_tx_mod", CTLTYPE_INT | CTLFLAG_RW, &sc->alc_int_tx_mod, 0, sysctl_hw_alc_int_mod, "I", "alc Tx interrupt moderation"); /* Pull in device tunables. */ sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT; error = resource_int_value(device_get_name(sc->alc_dev), device_get_unit(sc->alc_dev), "int_rx_mod", &sc->alc_int_rx_mod); if (error == 0) { if (sc->alc_int_rx_mod < ALC_IM_TIMER_MIN || sc->alc_int_rx_mod > ALC_IM_TIMER_MAX) { device_printf(sc->alc_dev, "int_rx_mod value out of " "range; using default: %d\n", ALC_IM_RX_TIMER_DEFAULT); sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT; } } sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT; error = resource_int_value(device_get_name(sc->alc_dev), device_get_unit(sc->alc_dev), "int_tx_mod", &sc->alc_int_tx_mod); if (error == 0) { if (sc->alc_int_tx_mod < ALC_IM_TIMER_MIN || sc->alc_int_tx_mod > ALC_IM_TIMER_MAX) { device_printf(sc->alc_dev, "int_tx_mod value out of " "range; using default: %d\n", ALC_IM_TX_TIMER_DEFAULT); sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT; } } SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW, &sc->alc_process_limit, 0, sysctl_hw_alc_proc_limit, "I", "max number of Rx events to process"); /* Pull in device tunables. */ sc->alc_process_limit = ALC_PROC_DEFAULT; error = resource_int_value(device_get_name(sc->alc_dev), device_get_unit(sc->alc_dev), "process_limit", &sc->alc_process_limit); if (error == 0) { if (sc->alc_process_limit < ALC_PROC_MIN || sc->alc_process_limit > ALC_PROC_MAX) { device_printf(sc->alc_dev, "process_limit value out of range; " "using default: %d\n", ALC_PROC_DEFAULT); sc->alc_process_limit = ALC_PROC_DEFAULT; } } tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, NULL, "ALC statistics"); parent = SYSCTL_CHILDREN(tree); /* Rx statistics. */ tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, NULL, "Rx MAC statistics"); child = SYSCTL_CHILDREN(tree); ALC_SYSCTL_STAT_ADD32(ctx, child, "good_frames", &stats->rx_frames, "Good frames"); ALC_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames", &stats->rx_bcast_frames, "Good broadcast frames"); ALC_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames", &stats->rx_mcast_frames, "Good multicast frames"); ALC_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", &stats->rx_pause_frames, "Pause control frames"); ALC_SYSCTL_STAT_ADD32(ctx, child, "control_frames", &stats->rx_control_frames, "Control frames"); ALC_SYSCTL_STAT_ADD32(ctx, child, "crc_errs", &stats->rx_crcerrs, "CRC errors"); ALC_SYSCTL_STAT_ADD32(ctx, child, "len_errs", &stats->rx_lenerrs, "Frames with length mismatched"); ALC_SYSCTL_STAT_ADD64(ctx, child, "good_octets", &stats->rx_bytes, "Good octets"); ALC_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets", &stats->rx_bcast_bytes, "Good broadcast octets"); ALC_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets", &stats->rx_mcast_bytes, "Good multicast octets"); ALC_SYSCTL_STAT_ADD32(ctx, child, "runts", &stats->rx_runts, "Too short frames"); ALC_SYSCTL_STAT_ADD32(ctx, child, "fragments", &stats->rx_fragments, "Fragmented frames"); ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_64", &stats->rx_pkts_64, "64 bytes frames"); ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127", &stats->rx_pkts_65_127, "65 to 127 bytes frames"); ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255", &stats->rx_pkts_128_255, "128 to 255 bytes frames"); ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511", &stats->rx_pkts_256_511, "256 to 511 bytes frames"); ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023", &stats->rx_pkts_512_1023, "512 to 1023 bytes frames"); ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518", &stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames"); ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max", &stats->rx_pkts_1519_max, "1519 to max frames"); ALC_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs", &stats->rx_pkts_truncated, "Truncated frames due to MTU size"); ALC_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows", &stats->rx_fifo_oflows, "FIFO overflows"); ALC_SYSCTL_STAT_ADD32(ctx, child, "rrs_errs", &stats->rx_rrs_errs, "Return status write-back errors"); ALC_SYSCTL_STAT_ADD32(ctx, child, "align_errs", &stats->rx_alignerrs, "Alignment errors"); ALC_SYSCTL_STAT_ADD32(ctx, child, "filtered", &stats->rx_pkts_filtered, "Frames dropped due to address filtering"); /* Tx statistics. */ tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, NULL, "Tx MAC statistics"); child = SYSCTL_CHILDREN(tree); ALC_SYSCTL_STAT_ADD32(ctx, child, "good_frames", &stats->tx_frames, "Good frames"); ALC_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames", &stats->tx_bcast_frames, "Good broadcast frames"); ALC_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames", &stats->tx_mcast_frames, "Good multicast frames"); ALC_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", &stats->tx_pause_frames, "Pause control frames"); ALC_SYSCTL_STAT_ADD32(ctx, child, "control_frames", &stats->tx_control_frames, "Control frames"); ALC_SYSCTL_STAT_ADD32(ctx, child, "excess_defers", &stats->tx_excess_defer, "Frames with excessive derferrals"); ALC_SYSCTL_STAT_ADD32(ctx, child, "defers", &stats->tx_excess_defer, "Frames with derferrals"); ALC_SYSCTL_STAT_ADD64(ctx, child, "good_octets", &stats->tx_bytes, "Good octets"); ALC_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets", &stats->tx_bcast_bytes, "Good broadcast octets"); ALC_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets", &stats->tx_mcast_bytes, "Good multicast octets"); ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_64", &stats->tx_pkts_64, "64 bytes frames"); ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127", &stats->tx_pkts_65_127, "65 to 127 bytes frames"); ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255", &stats->tx_pkts_128_255, "128 to 255 bytes frames"); ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511", &stats->tx_pkts_256_511, "256 to 511 bytes frames"); ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023", &stats->tx_pkts_512_1023, "512 to 1023 bytes frames"); ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518", &stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames"); ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max", &stats->tx_pkts_1519_max, "1519 to max frames"); ALC_SYSCTL_STAT_ADD32(ctx, child, "single_colls", &stats->tx_single_colls, "Single collisions"); ALC_SYSCTL_STAT_ADD32(ctx, child, "multi_colls", &stats->tx_multi_colls, "Multiple collisions"); ALC_SYSCTL_STAT_ADD32(ctx, child, "late_colls", &stats->tx_late_colls, "Late collisions"); ALC_SYSCTL_STAT_ADD32(ctx, child, "excess_colls", &stats->tx_excess_colls, "Excessive collisions"); ALC_SYSCTL_STAT_ADD32(ctx, child, "underruns", &stats->tx_underrun, "FIFO underruns"); ALC_SYSCTL_STAT_ADD32(ctx, child, "desc_underruns", &stats->tx_desc_underrun, "Descriptor write-back errors"); ALC_SYSCTL_STAT_ADD32(ctx, child, "len_errs", &stats->tx_lenerrs, "Frames with length mismatched"); ALC_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs", &stats->tx_pkts_truncated, "Truncated frames due to MTU size"); } #undef ALC_SYSCTL_STAT_ADD32 #undef ALC_SYSCTL_STAT_ADD64 struct alc_dmamap_arg { bus_addr_t alc_busaddr; }; static void alc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct alc_dmamap_arg *ctx; if (error != 0) return; KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); ctx = (struct alc_dmamap_arg *)arg; ctx->alc_busaddr = segs[0].ds_addr; } /* * Normal and high Tx descriptors shares single Tx high address. * Four Rx descriptor/return rings and CMB shares the same Rx * high address. */ static int alc_check_boundary(struct alc_softc *sc) { bus_addr_t cmb_end, rx_ring_end, rr_ring_end, tx_ring_end; rx_ring_end = sc->alc_rdata.alc_rx_ring_paddr + ALC_RX_RING_SZ; rr_ring_end = sc->alc_rdata.alc_rr_ring_paddr + ALC_RR_RING_SZ; cmb_end = sc->alc_rdata.alc_cmb_paddr + ALC_CMB_SZ; tx_ring_end = sc->alc_rdata.alc_tx_ring_paddr + ALC_TX_RING_SZ; /* 4GB boundary crossing is not allowed. */ if ((ALC_ADDR_HI(rx_ring_end) != ALC_ADDR_HI(sc->alc_rdata.alc_rx_ring_paddr)) || (ALC_ADDR_HI(rr_ring_end) != ALC_ADDR_HI(sc->alc_rdata.alc_rr_ring_paddr)) || (ALC_ADDR_HI(cmb_end) != ALC_ADDR_HI(sc->alc_rdata.alc_cmb_paddr)) || (ALC_ADDR_HI(tx_ring_end) != ALC_ADDR_HI(sc->alc_rdata.alc_tx_ring_paddr))) return (EFBIG); /* * Make sure Rx return descriptor/Rx descriptor/CMB use * the same high address. */ if ((ALC_ADDR_HI(rx_ring_end) != ALC_ADDR_HI(rr_ring_end)) || (ALC_ADDR_HI(rx_ring_end) != ALC_ADDR_HI(cmb_end))) return (EFBIG); return (0); } static int alc_dma_alloc(struct alc_softc *sc) { struct alc_txdesc *txd; struct alc_rxdesc *rxd; bus_addr_t lowaddr; struct alc_dmamap_arg ctx; int error, i; lowaddr = BUS_SPACE_MAXADDR; again: /* Create parent DMA tag. */ error = bus_dma_tag_create( bus_get_dma_tag(sc->alc_dev), /* parent */ 1, 0, /* alignment, boundary */ lowaddr, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 0, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->alc_cdata.alc_parent_tag); if (error != 0) { device_printf(sc->alc_dev, "could not create parent DMA tag.\n"); goto fail; } /* Create DMA tag for Tx descriptor ring. */ error = bus_dma_tag_create( sc->alc_cdata.alc_parent_tag, /* parent */ ALC_TX_RING_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ ALC_TX_RING_SZ, /* maxsize */ 1, /* nsegments */ ALC_TX_RING_SZ, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->alc_cdata.alc_tx_ring_tag); if (error != 0) { device_printf(sc->alc_dev, "could not create Tx ring DMA tag.\n"); goto fail; } /* Create DMA tag for Rx free descriptor ring. */ error = bus_dma_tag_create( sc->alc_cdata.alc_parent_tag, /* parent */ ALC_RX_RING_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ ALC_RX_RING_SZ, /* maxsize */ 1, /* nsegments */ ALC_RX_RING_SZ, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->alc_cdata.alc_rx_ring_tag); if (error != 0) { device_printf(sc->alc_dev, "could not create Rx ring DMA tag.\n"); goto fail; } /* Create DMA tag for Rx return descriptor ring. */ error = bus_dma_tag_create( sc->alc_cdata.alc_parent_tag, /* parent */ ALC_RR_RING_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ ALC_RR_RING_SZ, /* maxsize */ 1, /* nsegments */ ALC_RR_RING_SZ, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->alc_cdata.alc_rr_ring_tag); if (error != 0) { device_printf(sc->alc_dev, "could not create Rx return ring DMA tag.\n"); goto fail; } /* Create DMA tag for coalescing message block. */ error = bus_dma_tag_create( sc->alc_cdata.alc_parent_tag, /* parent */ ALC_CMB_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ ALC_CMB_SZ, /* maxsize */ 1, /* nsegments */ ALC_CMB_SZ, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->alc_cdata.alc_cmb_tag); if (error != 0) { device_printf(sc->alc_dev, "could not create CMB DMA tag.\n"); goto fail; } /* Create DMA tag for status message block. */ error = bus_dma_tag_create( sc->alc_cdata.alc_parent_tag, /* parent */ ALC_SMB_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ ALC_SMB_SZ, /* maxsize */ 1, /* nsegments */ ALC_SMB_SZ, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->alc_cdata.alc_smb_tag); if (error != 0) { device_printf(sc->alc_dev, "could not create SMB DMA tag.\n"); goto fail; } /* Allocate DMA'able memory and load the DMA map for Tx ring. */ error = bus_dmamem_alloc(sc->alc_cdata.alc_tx_ring_tag, (void **)&sc->alc_rdata.alc_tx_ring, BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->alc_cdata.alc_tx_ring_map); if (error != 0) { device_printf(sc->alc_dev, "could not allocate DMA'able memory for Tx ring.\n"); goto fail; } ctx.alc_busaddr = 0; error = bus_dmamap_load(sc->alc_cdata.alc_tx_ring_tag, sc->alc_cdata.alc_tx_ring_map, sc->alc_rdata.alc_tx_ring, ALC_TX_RING_SZ, alc_dmamap_cb, &ctx, 0); if (error != 0 || ctx.alc_busaddr == 0) { device_printf(sc->alc_dev, "could not load DMA'able memory for Tx ring.\n"); goto fail; } sc->alc_rdata.alc_tx_ring_paddr = ctx.alc_busaddr; /* Allocate DMA'able memory and load the DMA map for Rx ring. */ error = bus_dmamem_alloc(sc->alc_cdata.alc_rx_ring_tag, (void **)&sc->alc_rdata.alc_rx_ring, BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->alc_cdata.alc_rx_ring_map); if (error != 0) { device_printf(sc->alc_dev, "could not allocate DMA'able memory for Rx ring.\n"); goto fail; } ctx.alc_busaddr = 0; error = bus_dmamap_load(sc->alc_cdata.alc_rx_ring_tag, sc->alc_cdata.alc_rx_ring_map, sc->alc_rdata.alc_rx_ring, ALC_RX_RING_SZ, alc_dmamap_cb, &ctx, 0); if (error != 0 || ctx.alc_busaddr == 0) { device_printf(sc->alc_dev, "could not load DMA'able memory for Rx ring.\n"); goto fail; } sc->alc_rdata.alc_rx_ring_paddr = ctx.alc_busaddr; /* Allocate DMA'able memory and load the DMA map for Rx return ring. */ error = bus_dmamem_alloc(sc->alc_cdata.alc_rr_ring_tag, (void **)&sc->alc_rdata.alc_rr_ring, BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->alc_cdata.alc_rr_ring_map); if (error != 0) { device_printf(sc->alc_dev, "could not allocate DMA'able memory for Rx return ring.\n"); goto fail; } ctx.alc_busaddr = 0; error = bus_dmamap_load(sc->alc_cdata.alc_rr_ring_tag, sc->alc_cdata.alc_rr_ring_map, sc->alc_rdata.alc_rr_ring, ALC_RR_RING_SZ, alc_dmamap_cb, &ctx, 0); if (error != 0 || ctx.alc_busaddr == 0) { device_printf(sc->alc_dev, "could not load DMA'able memory for Tx ring.\n"); goto fail; } sc->alc_rdata.alc_rr_ring_paddr = ctx.alc_busaddr; /* Allocate DMA'able memory and load the DMA map for CMB. */ error = bus_dmamem_alloc(sc->alc_cdata.alc_cmb_tag, (void **)&sc->alc_rdata.alc_cmb, BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->alc_cdata.alc_cmb_map); if (error != 0) { device_printf(sc->alc_dev, "could not allocate DMA'able memory for CMB.\n"); goto fail; } ctx.alc_busaddr = 0; error = bus_dmamap_load(sc->alc_cdata.alc_cmb_tag, sc->alc_cdata.alc_cmb_map, sc->alc_rdata.alc_cmb, ALC_CMB_SZ, alc_dmamap_cb, &ctx, 0); if (error != 0 || ctx.alc_busaddr == 0) { device_printf(sc->alc_dev, "could not load DMA'able memory for CMB.\n"); goto fail; } sc->alc_rdata.alc_cmb_paddr = ctx.alc_busaddr; /* Allocate DMA'able memory and load the DMA map for SMB. */ error = bus_dmamem_alloc(sc->alc_cdata.alc_smb_tag, (void **)&sc->alc_rdata.alc_smb, BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->alc_cdata.alc_smb_map); if (error != 0) { device_printf(sc->alc_dev, "could not allocate DMA'able memory for SMB.\n"); goto fail; } ctx.alc_busaddr = 0; error = bus_dmamap_load(sc->alc_cdata.alc_smb_tag, sc->alc_cdata.alc_smb_map, sc->alc_rdata.alc_smb, ALC_SMB_SZ, alc_dmamap_cb, &ctx, 0); if (error != 0 || ctx.alc_busaddr == 0) { device_printf(sc->alc_dev, "could not load DMA'able memory for CMB.\n"); goto fail; } sc->alc_rdata.alc_smb_paddr = ctx.alc_busaddr; /* Make sure we've not crossed 4GB boundary. */ if (lowaddr != BUS_SPACE_MAXADDR_32BIT && (error = alc_check_boundary(sc)) != 0) { device_printf(sc->alc_dev, "4GB boundary crossed, " "switching to 32bit DMA addressing mode.\n"); alc_dma_free(sc); /* * Limit max allowable DMA address space to 32bit * and try again. */ lowaddr = BUS_SPACE_MAXADDR_32BIT; goto again; } /* * Create Tx buffer parent tag. * AR81[3567]x allows 64bit DMA addressing of Tx/Rx buffers * so it needs separate parent DMA tag as parent DMA address * space could be restricted to be within 32bit address space * by 4GB boundary crossing. */ error = bus_dma_tag_create( bus_get_dma_tag(sc->alc_dev), /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 0, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->alc_cdata.alc_buffer_tag); if (error != 0) { device_printf(sc->alc_dev, "could not create parent buffer DMA tag.\n"); goto fail; } /* Create DMA tag for Tx buffers. */ error = bus_dma_tag_create( sc->alc_cdata.alc_buffer_tag, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ ALC_TSO_MAXSIZE, /* maxsize */ ALC_MAXTXSEGS, /* nsegments */ ALC_TSO_MAXSEGSIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->alc_cdata.alc_tx_tag); if (error != 0) { device_printf(sc->alc_dev, "could not create Tx DMA tag.\n"); goto fail; } /* Create DMA tag for Rx buffers. */ error = bus_dma_tag_create( sc->alc_cdata.alc_buffer_tag, /* parent */ ALC_RX_BUF_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES, /* maxsize */ 1, /* nsegments */ MCLBYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->alc_cdata.alc_rx_tag); if (error != 0) { device_printf(sc->alc_dev, "could not create Rx DMA tag.\n"); goto fail; } /* Create DMA maps for Tx buffers. */ for (i = 0; i < ALC_TX_RING_CNT; i++) { txd = &sc->alc_cdata.alc_txdesc[i]; txd->tx_m = NULL; txd->tx_dmamap = NULL; error = bus_dmamap_create(sc->alc_cdata.alc_tx_tag, 0, &txd->tx_dmamap); if (error != 0) { device_printf(sc->alc_dev, "could not create Tx dmamap.\n"); goto fail; } } /* Create DMA maps for Rx buffers. */ if ((error = bus_dmamap_create(sc->alc_cdata.alc_rx_tag, 0, &sc->alc_cdata.alc_rx_sparemap)) != 0) { device_printf(sc->alc_dev, "could not create spare Rx dmamap.\n"); goto fail; } for (i = 0; i < ALC_RX_RING_CNT; i++) { rxd = &sc->alc_cdata.alc_rxdesc[i]; rxd->rx_m = NULL; rxd->rx_dmamap = NULL; error = bus_dmamap_create(sc->alc_cdata.alc_rx_tag, 0, &rxd->rx_dmamap); if (error != 0) { device_printf(sc->alc_dev, "could not create Rx dmamap.\n"); goto fail; } } fail: return (error); } static void alc_dma_free(struct alc_softc *sc) { struct alc_txdesc *txd; struct alc_rxdesc *rxd; int i; /* Tx buffers. */ if (sc->alc_cdata.alc_tx_tag != NULL) { for (i = 0; i < ALC_TX_RING_CNT; i++) { txd = &sc->alc_cdata.alc_txdesc[i]; if (txd->tx_dmamap != NULL) { bus_dmamap_destroy(sc->alc_cdata.alc_tx_tag, txd->tx_dmamap); txd->tx_dmamap = NULL; } } bus_dma_tag_destroy(sc->alc_cdata.alc_tx_tag); sc->alc_cdata.alc_tx_tag = NULL; } /* Rx buffers */ if (sc->alc_cdata.alc_rx_tag != NULL) { for (i = 0; i < ALC_RX_RING_CNT; i++) { rxd = &sc->alc_cdata.alc_rxdesc[i]; if (rxd->rx_dmamap != NULL) { bus_dmamap_destroy(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap); rxd->rx_dmamap = NULL; } } if (sc->alc_cdata.alc_rx_sparemap != NULL) { bus_dmamap_destroy(sc->alc_cdata.alc_rx_tag, sc->alc_cdata.alc_rx_sparemap); sc->alc_cdata.alc_rx_sparemap = NULL; } bus_dma_tag_destroy(sc->alc_cdata.alc_rx_tag); sc->alc_cdata.alc_rx_tag = NULL; } /* Tx descriptor ring. */ if (sc->alc_cdata.alc_tx_ring_tag != NULL) { if (sc->alc_rdata.alc_tx_ring_paddr != 0) bus_dmamap_unload(sc->alc_cdata.alc_tx_ring_tag, sc->alc_cdata.alc_tx_ring_map); if (sc->alc_rdata.alc_tx_ring != NULL) bus_dmamem_free(sc->alc_cdata.alc_tx_ring_tag, sc->alc_rdata.alc_tx_ring, sc->alc_cdata.alc_tx_ring_map); sc->alc_rdata.alc_tx_ring_paddr = 0; sc->alc_rdata.alc_tx_ring = NULL; bus_dma_tag_destroy(sc->alc_cdata.alc_tx_ring_tag); sc->alc_cdata.alc_tx_ring_tag = NULL; } /* Rx ring. */ if (sc->alc_cdata.alc_rx_ring_tag != NULL) { if (sc->alc_rdata.alc_rx_ring_paddr != 0) bus_dmamap_unload(sc->alc_cdata.alc_rx_ring_tag, sc->alc_cdata.alc_rx_ring_map); if (sc->alc_rdata.alc_rx_ring != NULL) bus_dmamem_free(sc->alc_cdata.alc_rx_ring_tag, sc->alc_rdata.alc_rx_ring, sc->alc_cdata.alc_rx_ring_map); sc->alc_rdata.alc_rx_ring_paddr = 0; sc->alc_rdata.alc_rx_ring = NULL; bus_dma_tag_destroy(sc->alc_cdata.alc_rx_ring_tag); sc->alc_cdata.alc_rx_ring_tag = NULL; } /* Rx return ring. */ if (sc->alc_cdata.alc_rr_ring_tag != NULL) { if (sc->alc_rdata.alc_rr_ring_paddr != 0) bus_dmamap_unload(sc->alc_cdata.alc_rr_ring_tag, sc->alc_cdata.alc_rr_ring_map); if (sc->alc_rdata.alc_rr_ring != NULL) bus_dmamem_free(sc->alc_cdata.alc_rr_ring_tag, sc->alc_rdata.alc_rr_ring, sc->alc_cdata.alc_rr_ring_map); sc->alc_rdata.alc_rr_ring_paddr = 0; sc->alc_rdata.alc_rr_ring = NULL; bus_dma_tag_destroy(sc->alc_cdata.alc_rr_ring_tag); sc->alc_cdata.alc_rr_ring_tag = NULL; } /* CMB block */ if (sc->alc_cdata.alc_cmb_tag != NULL) { if (sc->alc_rdata.alc_cmb_paddr != 0) bus_dmamap_unload(sc->alc_cdata.alc_cmb_tag, sc->alc_cdata.alc_cmb_map); if (sc->alc_rdata.alc_cmb != NULL) bus_dmamem_free(sc->alc_cdata.alc_cmb_tag, sc->alc_rdata.alc_cmb, sc->alc_cdata.alc_cmb_map); sc->alc_rdata.alc_cmb_paddr = 0; sc->alc_rdata.alc_cmb = NULL; bus_dma_tag_destroy(sc->alc_cdata.alc_cmb_tag); sc->alc_cdata.alc_cmb_tag = NULL; } /* SMB block */ if (sc->alc_cdata.alc_smb_tag != NULL) { if (sc->alc_rdata.alc_smb_paddr != 0) bus_dmamap_unload(sc->alc_cdata.alc_smb_tag, sc->alc_cdata.alc_smb_map); if (sc->alc_rdata.alc_smb != NULL) bus_dmamem_free(sc->alc_cdata.alc_smb_tag, sc->alc_rdata.alc_smb, sc->alc_cdata.alc_smb_map); sc->alc_rdata.alc_smb_paddr = 0; sc->alc_rdata.alc_smb = NULL; bus_dma_tag_destroy(sc->alc_cdata.alc_smb_tag); sc->alc_cdata.alc_smb_tag = NULL; } if (sc->alc_cdata.alc_buffer_tag != NULL) { bus_dma_tag_destroy(sc->alc_cdata.alc_buffer_tag); sc->alc_cdata.alc_buffer_tag = NULL; } if (sc->alc_cdata.alc_parent_tag != NULL) { bus_dma_tag_destroy(sc->alc_cdata.alc_parent_tag); sc->alc_cdata.alc_parent_tag = NULL; } } static int alc_shutdown(device_t dev) { return (alc_suspend(dev)); } /* * Note, this driver resets the link speed to 10/100Mbps by * restarting auto-negotiation in suspend/shutdown phase but we * don't know whether that auto-negotiation would succeed or not * as driver has no control after powering off/suspend operation. * If the renegotiation fail WOL may not work. Running at 1Gbps * will draw more power than 375mA at 3.3V which is specified in * PCI specification and that would result in complete * shutdowning power to ethernet controller. * * TODO * Save current negotiated media speed/duplex/flow-control to * softc and restore the same link again after resuming. PHY * handling such as power down/resetting to 100Mbps may be better * handled in suspend method in phy driver. */ static void alc_setlinkspeed(struct alc_softc *sc) { struct mii_data *mii; int aneg, i; mii = device_get_softc(sc->alc_miibus); mii_pollstat(mii); aneg = 0; if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch IFM_SUBTYPE(mii->mii_media_active) { case IFM_10_T: case IFM_100_TX: return; case IFM_1000_T: aneg++; break; default: break; } } alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, MII_100T2CR, 0); alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG); DELAY(1000); if (aneg != 0) { /* * Poll link state until alc(4) get a 10/100Mbps link. */ for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { mii_pollstat(mii); if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE( mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: alc_mac_config(sc); return; default: break; } } ALC_UNLOCK(sc); pause("alclnk", hz); ALC_LOCK(sc); } if (i == MII_ANEGTICKS_GIGE) device_printf(sc->alc_dev, "establishing a link failed, WOL may not work!"); } /* * No link, force MAC to have 100Mbps, full-duplex link. * This is the last resort and may/may not work. */ mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; alc_mac_config(sc); } static void alc_setwol(struct alc_softc *sc) { if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) alc_setwol_816x(sc); else alc_setwol_813x(sc); } static void alc_setwol_813x(struct alc_softc *sc) { struct ifnet *ifp; uint32_t reg, pmcs; uint16_t pmstat; ALC_LOCK_ASSERT(sc); alc_disable_l0s_l1(sc); ifp = sc->alc_ifp; if ((sc->alc_flags & ALC_FLAG_PM) == 0) { /* Disable WOL. */ CSR_WRITE_4(sc, ALC_WOL_CFG, 0); reg = CSR_READ_4(sc, ALC_PCIE_PHYMISC); reg |= PCIE_PHYMISC_FORCE_RCV_DET; CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, reg); /* Force PHY power down. */ alc_phy_down(sc); CSR_WRITE_4(sc, ALC_MASTER_CFG, CSR_READ_4(sc, ALC_MASTER_CFG) | MASTER_CLK_SEL_DIS); return; } if ((ifp->if_capenable & IFCAP_WOL) != 0) { if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0) alc_setlinkspeed(sc); CSR_WRITE_4(sc, ALC_MASTER_CFG, CSR_READ_4(sc, ALC_MASTER_CFG) & ~MASTER_CLK_SEL_DIS); } pmcs = 0; if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) pmcs |= WOL_CFG_MAGIC | WOL_CFG_MAGIC_ENB; CSR_WRITE_4(sc, ALC_WOL_CFG, pmcs); reg = CSR_READ_4(sc, ALC_MAC_CFG); reg &= ~(MAC_CFG_DBG | MAC_CFG_PROMISC | MAC_CFG_ALLMULTI | MAC_CFG_BCAST); if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) reg |= MAC_CFG_ALLMULTI | MAC_CFG_BCAST; if ((ifp->if_capenable & IFCAP_WOL) != 0) reg |= MAC_CFG_RX_ENB; CSR_WRITE_4(sc, ALC_MAC_CFG, reg); reg = CSR_READ_4(sc, ALC_PCIE_PHYMISC); reg |= PCIE_PHYMISC_FORCE_RCV_DET; CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, reg); if ((ifp->if_capenable & IFCAP_WOL) == 0) { /* WOL disabled, PHY power down. */ alc_phy_down(sc); CSR_WRITE_4(sc, ALC_MASTER_CFG, CSR_READ_4(sc, ALC_MASTER_CFG) | MASTER_CLK_SEL_DIS); } /* Request PME. */ pmstat = pci_read_config(sc->alc_dev, sc->alc_pmcap + PCIR_POWER_STATUS, 2); pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); if ((ifp->if_capenable & IFCAP_WOL) != 0) pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; pci_write_config(sc->alc_dev, sc->alc_pmcap + PCIR_POWER_STATUS, pmstat, 2); } static void alc_setwol_816x(struct alc_softc *sc) { struct ifnet *ifp; uint32_t gphy, mac, master, pmcs, reg; uint16_t pmstat; ALC_LOCK_ASSERT(sc); ifp = sc->alc_ifp; master = CSR_READ_4(sc, ALC_MASTER_CFG); master &= ~MASTER_CLK_SEL_DIS; gphy = CSR_READ_4(sc, ALC_GPHY_CFG); gphy &= ~(GPHY_CFG_EXT_RESET | GPHY_CFG_LED_MODE | GPHY_CFG_100AB_ENB | GPHY_CFG_PHY_PLL_ON); gphy |= GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE | GPHY_CFG_SEL_ANA_RESET; if ((sc->alc_flags & ALC_FLAG_PM) == 0) { CSR_WRITE_4(sc, ALC_WOL_CFG, 0); gphy |= GPHY_CFG_PHY_IDDQ | GPHY_CFG_PWDOWN_HW; mac = CSR_READ_4(sc, ALC_MAC_CFG); } else { if ((ifp->if_capenable & IFCAP_WOL) != 0) { gphy |= GPHY_CFG_EXT_RESET; if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0) alc_setlinkspeed(sc); } pmcs = 0; if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) pmcs |= WOL_CFG_MAGIC | WOL_CFG_MAGIC_ENB; CSR_WRITE_4(sc, ALC_WOL_CFG, pmcs); mac = CSR_READ_4(sc, ALC_MAC_CFG); mac &= ~(MAC_CFG_DBG | MAC_CFG_PROMISC | MAC_CFG_ALLMULTI | MAC_CFG_BCAST); if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) mac |= MAC_CFG_ALLMULTI | MAC_CFG_BCAST; if ((ifp->if_capenable & IFCAP_WOL) != 0) mac |= MAC_CFG_RX_ENB; alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_S3DIG10, ANEG_S3DIG10_SL); } /* Enable OSC. */ reg = CSR_READ_4(sc, ALC_MISC); reg &= ~MISC_INTNLOSC_OPEN; CSR_WRITE_4(sc, ALC_MISC, reg); reg |= MISC_INTNLOSC_OPEN; CSR_WRITE_4(sc, ALC_MISC, reg); CSR_WRITE_4(sc, ALC_MASTER_CFG, master); CSR_WRITE_4(sc, ALC_MAC_CFG, mac); CSR_WRITE_4(sc, ALC_GPHY_CFG, gphy); reg = CSR_READ_4(sc, ALC_PDLL_TRNS1); reg |= PDLL_TRNS1_D3PLLOFF_ENB; CSR_WRITE_4(sc, ALC_PDLL_TRNS1, reg); if ((sc->alc_flags & ALC_FLAG_PM) != 0) { /* Request PME. */ pmstat = pci_read_config(sc->alc_dev, sc->alc_pmcap + PCIR_POWER_STATUS, 2); pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); if ((ifp->if_capenable & IFCAP_WOL) != 0) pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; pci_write_config(sc->alc_dev, sc->alc_pmcap + PCIR_POWER_STATUS, pmstat, 2); } } static int alc_suspend(device_t dev) { struct alc_softc *sc; sc = device_get_softc(dev); ALC_LOCK(sc); alc_stop(sc); alc_setwol(sc); ALC_UNLOCK(sc); return (0); } static int alc_resume(device_t dev) { struct alc_softc *sc; struct ifnet *ifp; uint16_t pmstat; sc = device_get_softc(dev); ALC_LOCK(sc); if ((sc->alc_flags & ALC_FLAG_PM) != 0) { /* Disable PME and clear PME status. */ pmstat = pci_read_config(sc->alc_dev, sc->alc_pmcap + PCIR_POWER_STATUS, 2); if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) { pmstat &= ~PCIM_PSTAT_PMEENABLE; pci_write_config(sc->alc_dev, sc->alc_pmcap + PCIR_POWER_STATUS, pmstat, 2); } } /* Reset PHY. */ alc_phy_reset(sc); ifp = sc->alc_ifp; if ((ifp->if_flags & IFF_UP) != 0) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; alc_init_locked(sc); } ALC_UNLOCK(sc); return (0); } static int alc_encap(struct alc_softc *sc, struct mbuf **m_head) { struct alc_txdesc *txd, *txd_last; struct tx_desc *desc; struct mbuf *m; struct ip *ip; struct tcphdr *tcp; bus_dma_segment_t txsegs[ALC_MAXTXSEGS]; bus_dmamap_t map; uint32_t cflags, hdrlen, ip_off, poff, vtag; int error, idx, nsegs, prod; ALC_LOCK_ASSERT(sc); M_ASSERTPKTHDR((*m_head)); m = *m_head; ip = NULL; tcp = NULL; ip_off = poff = 0; if ((m->m_pkthdr.csum_flags & (ALC_CSUM_FEATURES | CSUM_TSO)) != 0) { /* * AR81[3567]x requires offset of TCP/UDP header in its * Tx descriptor to perform Tx checksum offloading. TSO * also requires TCP header offset and modification of * IP/TCP header. This kind of operation takes many CPU * cycles on FreeBSD so fast host CPU is required to get * smooth TSO performance. */ struct ether_header *eh; if (M_WRITABLE(m) == 0) { /* Get a writable copy. */ m = m_dup(*m_head, M_NOWAIT); /* Release original mbufs. */ m_freem(*m_head); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } *m_head = m; } ip_off = sizeof(struct ether_header); m = m_pullup(m, ip_off); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } eh = mtod(m, struct ether_header *); /* * Check if hardware VLAN insertion is off. * Additional check for LLC/SNAP frame? */ if (eh->ether_type == htons(ETHERTYPE_VLAN)) { ip_off = sizeof(struct ether_vlan_header); m = m_pullup(m, ip_off); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } } m = m_pullup(m, ip_off + sizeof(struct ip)); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } ip = (struct ip *)(mtod(m, char *) + ip_off); poff = ip_off + (ip->ip_hl << 2); if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { m = m_pullup(m, poff + sizeof(struct tcphdr)); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } tcp = (struct tcphdr *)(mtod(m, char *) + poff); m = m_pullup(m, poff + (tcp->th_off << 2)); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } /* * Due to strict adherence of Microsoft NDIS * Large Send specification, hardware expects * a pseudo TCP checksum inserted by upper * stack. Unfortunately the pseudo TCP * checksum that NDIS refers to does not include * TCP payload length so driver should recompute * the pseudo checksum here. Hopefully this * wouldn't be much burden on modern CPUs. * * Reset IP checksum and recompute TCP pseudo * checksum as NDIS specification said. */ ip = (struct ip *)(mtod(m, char *) + ip_off); tcp = (struct tcphdr *)(mtod(m, char *) + poff); ip->ip_sum = 0; tcp->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htons(IPPROTO_TCP)); } *m_head = m; } prod = sc->alc_cdata.alc_tx_prod; txd = &sc->alc_cdata.alc_txdesc[prod]; txd_last = txd; map = txd->tx_dmamap; error = bus_dmamap_load_mbuf_sg(sc->alc_cdata.alc_tx_tag, map, *m_head, txsegs, &nsegs, 0); if (error == EFBIG) { m = m_collapse(*m_head, M_NOWAIT, ALC_MAXTXSEGS); if (m == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOMEM); } *m_head = m; error = bus_dmamap_load_mbuf_sg(sc->alc_cdata.alc_tx_tag, map, *m_head, txsegs, &nsegs, 0); if (error != 0) { m_freem(*m_head); *m_head = NULL; return (error); } } else if (error != 0) return (error); if (nsegs == 0) { m_freem(*m_head); *m_head = NULL; return (EIO); } /* Check descriptor overrun. */ if (sc->alc_cdata.alc_tx_cnt + nsegs >= ALC_TX_RING_CNT - 3) { bus_dmamap_unload(sc->alc_cdata.alc_tx_tag, map); return (ENOBUFS); } bus_dmamap_sync(sc->alc_cdata.alc_tx_tag, map, BUS_DMASYNC_PREWRITE); m = *m_head; cflags = TD_ETHERNET; vtag = 0; desc = NULL; idx = 0; /* Configure VLAN hardware tag insertion. */ if ((m->m_flags & M_VLANTAG) != 0) { vtag = htons(m->m_pkthdr.ether_vtag); vtag = (vtag << TD_VLAN_SHIFT) & TD_VLAN_MASK; cflags |= TD_INS_VLAN_TAG; } if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { /* Request TSO and set MSS. */ cflags |= TD_TSO | TD_TSO_DESCV1; cflags |= ((uint32_t)m->m_pkthdr.tso_segsz << TD_MSS_SHIFT) & TD_MSS_MASK; /* Set TCP header offset. */ cflags |= (poff << TD_TCPHDR_OFFSET_SHIFT) & TD_TCPHDR_OFFSET_MASK; /* * AR81[3567]x requires the first buffer should * only hold IP/TCP header data. Payload should * be handled in other descriptors. */ hdrlen = poff + (tcp->th_off << 2); desc = &sc->alc_rdata.alc_tx_ring[prod]; desc->len = htole32(TX_BYTES(hdrlen | vtag)); desc->flags = htole32(cflags); desc->addr = htole64(txsegs[0].ds_addr); sc->alc_cdata.alc_tx_cnt++; ALC_DESC_INC(prod, ALC_TX_RING_CNT); if (m->m_len - hdrlen > 0) { /* Handle remaining payload of the first fragment. */ desc = &sc->alc_rdata.alc_tx_ring[prod]; desc->len = htole32(TX_BYTES((m->m_len - hdrlen) | vtag)); desc->flags = htole32(cflags); desc->addr = htole64(txsegs[0].ds_addr + hdrlen); sc->alc_cdata.alc_tx_cnt++; ALC_DESC_INC(prod, ALC_TX_RING_CNT); } /* Handle remaining fragments. */ idx = 1; } else if ((m->m_pkthdr.csum_flags & ALC_CSUM_FEATURES) != 0) { /* Configure Tx checksum offload. */ #ifdef ALC_USE_CUSTOM_CSUM cflags |= TD_CUSTOM_CSUM; /* Set checksum start offset. */ cflags |= ((poff >> 1) << TD_PLOAD_OFFSET_SHIFT) & TD_PLOAD_OFFSET_MASK; /* Set checksum insertion position of TCP/UDP. */ cflags |= (((poff + m->m_pkthdr.csum_data) >> 1) << TD_CUSTOM_CSUM_OFFSET_SHIFT) & TD_CUSTOM_CSUM_OFFSET_MASK; #else if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) cflags |= TD_IPCSUM; if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) cflags |= TD_TCPCSUM; if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) cflags |= TD_UDPCSUM; /* Set TCP/UDP header offset. */ cflags |= (poff << TD_L4HDR_OFFSET_SHIFT) & TD_L4HDR_OFFSET_MASK; #endif } for (; idx < nsegs; idx++) { desc = &sc->alc_rdata.alc_tx_ring[prod]; desc->len = htole32(TX_BYTES(txsegs[idx].ds_len) | vtag); desc->flags = htole32(cflags); desc->addr = htole64(txsegs[idx].ds_addr); sc->alc_cdata.alc_tx_cnt++; ALC_DESC_INC(prod, ALC_TX_RING_CNT); } /* Update producer index. */ sc->alc_cdata.alc_tx_prod = prod; /* Finally set EOP on the last descriptor. */ prod = (prod + ALC_TX_RING_CNT - 1) % ALC_TX_RING_CNT; desc = &sc->alc_rdata.alc_tx_ring[prod]; desc->flags |= htole32(TD_EOP); /* Swap dmamap of the first and the last. */ txd = &sc->alc_cdata.alc_txdesc[prod]; map = txd_last->tx_dmamap; txd_last->tx_dmamap = txd->tx_dmamap; txd->tx_dmamap = map; txd->tx_m = m; return (0); } static void alc_start(struct ifnet *ifp) { struct alc_softc *sc; sc = ifp->if_softc; ALC_LOCK(sc); alc_start_locked(ifp); ALC_UNLOCK(sc); } static void alc_start_locked(struct ifnet *ifp) { struct alc_softc *sc; struct mbuf *m_head; int enq; sc = ifp->if_softc; ALC_LOCK_ASSERT(sc); /* Reclaim transmitted frames. */ if (sc->alc_cdata.alc_tx_cnt >= ALC_TX_DESC_HIWAT) alc_txeof(sc); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING || (sc->alc_flags & ALC_FLAG_LINK) == 0) return; for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* * Pack the data into the transmit ring. If we * don't have room, set the OACTIVE flag and wait * for the NIC to drain the ring. */ if (alc_encap(sc, &m_head)) { if (m_head == NULL) break; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } enq++; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ ETHER_BPF_MTAP(ifp, m_head); } if (enq > 0) alc_start_tx(sc); } static void alc_start_tx(struct alc_softc *sc) { /* Sync descriptors. */ bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag, sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_PREWRITE); /* Kick. Assume we're using normal Tx priority queue. */ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) CSR_WRITE_2(sc, ALC_MBOX_TD_PRI0_PROD_IDX, (uint16_t)sc->alc_cdata.alc_tx_prod); else CSR_WRITE_4(sc, ALC_MBOX_TD_PROD_IDX, (sc->alc_cdata.alc_tx_prod << MBOX_TD_PROD_LO_IDX_SHIFT) & MBOX_TD_PROD_LO_IDX_MASK); /* Set a timeout in case the chip goes out to lunch. */ sc->alc_watchdog_timer = ALC_TX_TIMEOUT; } static void alc_watchdog(struct alc_softc *sc) { struct ifnet *ifp; ALC_LOCK_ASSERT(sc); if (sc->alc_watchdog_timer == 0 || --sc->alc_watchdog_timer) return; ifp = sc->alc_ifp; if ((sc->alc_flags & ALC_FLAG_LINK) == 0) { if_printf(sc->alc_ifp, "watchdog timeout (lost link)\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; alc_init_locked(sc); return; } if_printf(sc->alc_ifp, "watchdog timeout -- resetting\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; alc_init_locked(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) alc_start_locked(ifp); } static int alc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct alc_softc *sc; struct ifreq *ifr; struct mii_data *mii; int error, mask; sc = ifp->if_softc; ifr = (struct ifreq *)data; error = 0; switch (cmd) { case SIOCSIFMTU: if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > (sc->alc_ident->max_framelen - sizeof(struct ether_vlan_header) - ETHER_CRC_LEN) || ((sc->alc_flags & ALC_FLAG_JUMBO) == 0 && ifr->ifr_mtu > ETHERMTU)) error = EINVAL; else if (ifp->if_mtu != ifr->ifr_mtu) { ALC_LOCK(sc); ifp->if_mtu = ifr->ifr_mtu; /* AR81[3567]x has 13 bits MSS field. */ if (ifp->if_mtu > ALC_TSO_MTU && (ifp->if_capenable & IFCAP_TSO4) != 0) { ifp->if_capenable &= ~IFCAP_TSO4; ifp->if_hwassist &= ~CSUM_TSO; VLAN_CAPABILITIES(ifp); } ALC_UNLOCK(sc); } break; case SIOCSIFFLAGS: ALC_LOCK(sc); if ((ifp->if_flags & IFF_UP) != 0) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && ((ifp->if_flags ^ sc->alc_if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) alc_rxfilter(sc); else alc_init_locked(sc); } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) alc_stop(sc); sc->alc_if_flags = ifp->if_flags; ALC_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: ALC_LOCK(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) alc_rxfilter(sc); ALC_UNLOCK(sc); break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: mii = device_get_softc(sc->alc_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); break; case SIOCSIFCAP: ALC_LOCK(sc); mask = ifr->ifr_reqcap ^ ifp->if_capenable; if ((mask & IFCAP_TXCSUM) != 0 && (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { ifp->if_capenable ^= IFCAP_TXCSUM; if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) ifp->if_hwassist |= ALC_CSUM_FEATURES; else ifp->if_hwassist &= ~ALC_CSUM_FEATURES; } if ((mask & IFCAP_TSO4) != 0 && (ifp->if_capabilities & IFCAP_TSO4) != 0) { ifp->if_capenable ^= IFCAP_TSO4; if ((ifp->if_capenable & IFCAP_TSO4) != 0) { /* AR81[3567]x has 13 bits MSS field. */ if (ifp->if_mtu > ALC_TSO_MTU) { ifp->if_capenable &= ~IFCAP_TSO4; ifp->if_hwassist &= ~CSUM_TSO; } else ifp->if_hwassist |= CSUM_TSO; } else ifp->if_hwassist &= ~CSUM_TSO; } if ((mask & IFCAP_WOL_MCAST) != 0 && (ifp->if_capabilities & IFCAP_WOL_MCAST) != 0) ifp->if_capenable ^= IFCAP_WOL_MCAST; if ((mask & IFCAP_WOL_MAGIC) != 0 && (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0) ifp->if_capenable ^= IFCAP_WOL_MAGIC; if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; alc_rxvlan(sc); } if ((mask & IFCAP_VLAN_HWCSUM) != 0 && (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0) ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; if ((mask & IFCAP_VLAN_HWTSO) != 0 && (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0) ifp->if_capenable ^= IFCAP_VLAN_HWTSO; if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) ifp->if_capenable &= ~(IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM); ALC_UNLOCK(sc); VLAN_CAPABILITIES(ifp); break; default: error = ether_ioctl(ifp, cmd, data); break; } return (error); } static void alc_mac_config(struct alc_softc *sc) { struct mii_data *mii; uint32_t reg; ALC_LOCK_ASSERT(sc); mii = device_get_softc(sc->alc_miibus); reg = CSR_READ_4(sc, ALC_MAC_CFG); reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC | MAC_CFG_SPEED_MASK); if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0 || sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151 || sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 || sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW; /* Reprogram MAC with resolved speed/duplex. */ switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: reg |= MAC_CFG_SPEED_10_100; break; case IFM_1000_T: reg |= MAC_CFG_SPEED_1000; break; } if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { reg |= MAC_CFG_FULL_DUPLEX; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) reg |= MAC_CFG_TX_FC; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) reg |= MAC_CFG_RX_FC; } CSR_WRITE_4(sc, ALC_MAC_CFG, reg); } static void alc_stats_clear(struct alc_softc *sc) { struct smb sb, *smb; uint32_t *reg; int i; if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) { bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, sc->alc_cdata.alc_smb_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); smb = sc->alc_rdata.alc_smb; /* Update done, clear. */ smb->updated = 0; bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, sc->alc_cdata.alc_smb_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } else { for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; reg++) { CSR_READ_4(sc, ALC_RX_MIB_BASE + i); i += sizeof(uint32_t); } /* Read Tx statistics. */ for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; reg++) { CSR_READ_4(sc, ALC_TX_MIB_BASE + i); i += sizeof(uint32_t); } } } static void alc_stats_update(struct alc_softc *sc) { struct alc_hw_stats *stat; struct smb sb, *smb; struct ifnet *ifp; uint32_t *reg; int i; ALC_LOCK_ASSERT(sc); ifp = sc->alc_ifp; stat = &sc->alc_stats; if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) { bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, sc->alc_cdata.alc_smb_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); smb = sc->alc_rdata.alc_smb; if (smb->updated == 0) return; } else { smb = &sb; /* Read Rx statistics. */ for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; reg++) { *reg = CSR_READ_4(sc, ALC_RX_MIB_BASE + i); i += sizeof(uint32_t); } /* Read Tx statistics. */ for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; reg++) { *reg = CSR_READ_4(sc, ALC_TX_MIB_BASE + i); i += sizeof(uint32_t); } } /* Rx stats. */ stat->rx_frames += smb->rx_frames; stat->rx_bcast_frames += smb->rx_bcast_frames; stat->rx_mcast_frames += smb->rx_mcast_frames; stat->rx_pause_frames += smb->rx_pause_frames; stat->rx_control_frames += smb->rx_control_frames; stat->rx_crcerrs += smb->rx_crcerrs; stat->rx_lenerrs += smb->rx_lenerrs; stat->rx_bytes += smb->rx_bytes; stat->rx_runts += smb->rx_runts; stat->rx_fragments += smb->rx_fragments; stat->rx_pkts_64 += smb->rx_pkts_64; stat->rx_pkts_65_127 += smb->rx_pkts_65_127; stat->rx_pkts_128_255 += smb->rx_pkts_128_255; stat->rx_pkts_256_511 += smb->rx_pkts_256_511; stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023; stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518; stat->rx_pkts_1519_max += smb->rx_pkts_1519_max; stat->rx_pkts_truncated += smb->rx_pkts_truncated; stat->rx_fifo_oflows += smb->rx_fifo_oflows; stat->rx_rrs_errs += smb->rx_rrs_errs; stat->rx_alignerrs += smb->rx_alignerrs; stat->rx_bcast_bytes += smb->rx_bcast_bytes; stat->rx_mcast_bytes += smb->rx_mcast_bytes; stat->rx_pkts_filtered += smb->rx_pkts_filtered; /* Tx stats. */ stat->tx_frames += smb->tx_frames; stat->tx_bcast_frames += smb->tx_bcast_frames; stat->tx_mcast_frames += smb->tx_mcast_frames; stat->tx_pause_frames += smb->tx_pause_frames; stat->tx_excess_defer += smb->tx_excess_defer; stat->tx_control_frames += smb->tx_control_frames; stat->tx_deferred += smb->tx_deferred; stat->tx_bytes += smb->tx_bytes; stat->tx_pkts_64 += smb->tx_pkts_64; stat->tx_pkts_65_127 += smb->tx_pkts_65_127; stat->tx_pkts_128_255 += smb->tx_pkts_128_255; stat->tx_pkts_256_511 += smb->tx_pkts_256_511; stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023; stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518; stat->tx_pkts_1519_max += smb->tx_pkts_1519_max; stat->tx_single_colls += smb->tx_single_colls; stat->tx_multi_colls += smb->tx_multi_colls; stat->tx_late_colls += smb->tx_late_colls; stat->tx_excess_colls += smb->tx_excess_colls; stat->tx_underrun += smb->tx_underrun; stat->tx_desc_underrun += smb->tx_desc_underrun; stat->tx_lenerrs += smb->tx_lenerrs; stat->tx_pkts_truncated += smb->tx_pkts_truncated; stat->tx_bcast_bytes += smb->tx_bcast_bytes; stat->tx_mcast_bytes += smb->tx_mcast_bytes; /* Update counters in ifnet. */ if_inc_counter(ifp, IFCOUNTER_OPACKETS, smb->tx_frames); if_inc_counter(ifp, IFCOUNTER_COLLISIONS, smb->tx_single_colls + smb->tx_multi_colls * 2 + smb->tx_late_colls + smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT); if_inc_counter(ifp, IFCOUNTER_OERRORS, smb->tx_late_colls + smb->tx_excess_colls + smb->tx_underrun + smb->tx_pkts_truncated); if_inc_counter(ifp, IFCOUNTER_IPACKETS, smb->rx_frames); if_inc_counter(ifp, IFCOUNTER_IERRORS, smb->rx_crcerrs + smb->rx_lenerrs + smb->rx_runts + smb->rx_pkts_truncated + smb->rx_fifo_oflows + smb->rx_rrs_errs + smb->rx_alignerrs); if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) { /* Update done, clear. */ smb->updated = 0; bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, sc->alc_cdata.alc_smb_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } } static int alc_intr(void *arg) { struct alc_softc *sc; uint32_t status; sc = (struct alc_softc *)arg; status = CSR_READ_4(sc, ALC_INTR_STATUS); if ((status & ALC_INTRS) == 0) return (FILTER_STRAY); /* Disable interrupts. */ CSR_WRITE_4(sc, ALC_INTR_STATUS, INTR_DIS_INT); taskqueue_enqueue(sc->alc_tq, &sc->alc_int_task); return (FILTER_HANDLED); } static void alc_int_task(void *arg, int pending) { struct alc_softc *sc; struct ifnet *ifp; uint32_t status; int more; sc = (struct alc_softc *)arg; ifp = sc->alc_ifp; status = CSR_READ_4(sc, ALC_INTR_STATUS); ALC_LOCK(sc); if (sc->alc_morework != 0) { sc->alc_morework = 0; status |= INTR_RX_PKT; } if ((status & ALC_INTRS) == 0) goto done; /* Acknowledge interrupts but still disable interrupts. */ CSR_WRITE_4(sc, ALC_INTR_STATUS, status | INTR_DIS_INT); more = 0; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { if ((status & INTR_RX_PKT) != 0) { more = alc_rxintr(sc, sc->alc_process_limit); if (more == EAGAIN) sc->alc_morework = 1; else if (more == EIO) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; alc_init_locked(sc); ALC_UNLOCK(sc); return; } } if ((status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST | INTR_TXQ_TO_RST)) != 0) { if ((status & INTR_DMA_RD_TO_RST) != 0) device_printf(sc->alc_dev, "DMA read error! -- resetting\n"); if ((status & INTR_DMA_WR_TO_RST) != 0) device_printf(sc->alc_dev, "DMA write error! -- resetting\n"); if ((status & INTR_TXQ_TO_RST) != 0) device_printf(sc->alc_dev, "TxQ reset! -- resetting\n"); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; alc_init_locked(sc); ALC_UNLOCK(sc); return; } if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) alc_start_locked(ifp); } if (more == EAGAIN || (CSR_READ_4(sc, ALC_INTR_STATUS) & ALC_INTRS) != 0) { ALC_UNLOCK(sc); taskqueue_enqueue(sc->alc_tq, &sc->alc_int_task); return; } done: if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { /* Re-enable interrupts if we're running. */ CSR_WRITE_4(sc, ALC_INTR_STATUS, 0x7FFFFFFF); } ALC_UNLOCK(sc); } static void alc_txeof(struct alc_softc *sc) { struct ifnet *ifp; struct alc_txdesc *txd; uint32_t cons, prod; int prog; ALC_LOCK_ASSERT(sc); ifp = sc->alc_ifp; if (sc->alc_cdata.alc_tx_cnt == 0) return; bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag, sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_POSTWRITE); if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) { bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag, sc->alc_cdata.alc_cmb_map, BUS_DMASYNC_POSTREAD); prod = sc->alc_rdata.alc_cmb->cons; } else { if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) prod = CSR_READ_2(sc, ALC_MBOX_TD_PRI0_CONS_IDX); else { prod = CSR_READ_4(sc, ALC_MBOX_TD_CONS_IDX); /* Assume we're using normal Tx priority queue. */ prod = (prod & MBOX_TD_CONS_LO_IDX_MASK) >> MBOX_TD_CONS_LO_IDX_SHIFT; } } cons = sc->alc_cdata.alc_tx_cons; /* * Go through our Tx list and free mbufs for those * frames which have been transmitted. */ for (prog = 0; cons != prod; prog++, ALC_DESC_INC(cons, ALC_TX_RING_CNT)) { if (sc->alc_cdata.alc_tx_cnt <= 0) break; prog++; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; sc->alc_cdata.alc_tx_cnt--; txd = &sc->alc_cdata.alc_txdesc[cons]; if (txd->tx_m != NULL) { /* Reclaim transmitted mbufs. */ bus_dmamap_sync(sc->alc_cdata.alc_tx_tag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->alc_cdata.alc_tx_tag, txd->tx_dmamap); m_freem(txd->tx_m); txd->tx_m = NULL; } } if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag, sc->alc_cdata.alc_cmb_map, BUS_DMASYNC_PREREAD); sc->alc_cdata.alc_tx_cons = cons; /* * Unarm watchdog timer only when there is no pending * frames in Tx queue. */ if (sc->alc_cdata.alc_tx_cnt == 0) sc->alc_watchdog_timer = 0; } static int alc_newbuf(struct alc_softc *sc, struct alc_rxdesc *rxd) { struct mbuf *m; bus_dma_segment_t segs[1]; bus_dmamap_t map; int nsegs; m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = RX_BUF_SIZE_MAX; #ifndef __NO_STRICT_ALIGNMENT m_adj(m, sizeof(uint64_t)); #endif if (bus_dmamap_load_mbuf_sg(sc->alc_cdata.alc_rx_tag, sc->alc_cdata.alc_rx_sparemap, m, segs, &nsegs, 0) != 0) { m_freem(m); return (ENOBUFS); } KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); if (rxd->rx_m != NULL) { bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap); } map = rxd->rx_dmamap; rxd->rx_dmamap = sc->alc_cdata.alc_rx_sparemap; sc->alc_cdata.alc_rx_sparemap = map; bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_PREREAD); rxd->rx_m = m; rxd->rx_desc->addr = htole64(segs[0].ds_addr); return (0); } static int alc_rxintr(struct alc_softc *sc, int count) { struct ifnet *ifp; struct rx_rdesc *rrd; uint32_t nsegs, status; int rr_cons, prog; bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag, sc->alc_cdata.alc_rr_ring_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag, sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_POSTWRITE); rr_cons = sc->alc_cdata.alc_rr_cons; ifp = sc->alc_ifp; for (prog = 0; (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;) { if (count-- <= 0) break; rrd = &sc->alc_rdata.alc_rr_ring[rr_cons]; status = le32toh(rrd->status); if ((status & RRD_VALID) == 0) break; nsegs = RRD_RD_CNT(le32toh(rrd->rdinfo)); if (nsegs == 0) { /* This should not happen! */ device_printf(sc->alc_dev, "unexpected segment count -- resetting\n"); return (EIO); } alc_rxeof(sc, rrd); /* Clear Rx return status. */ rrd->status = 0; ALC_DESC_INC(rr_cons, ALC_RR_RING_CNT); sc->alc_cdata.alc_rx_cons += nsegs; sc->alc_cdata.alc_rx_cons %= ALC_RR_RING_CNT; prog += nsegs; } if (prog > 0) { /* Update the consumer index. */ sc->alc_cdata.alc_rr_cons = rr_cons; /* Sync Rx return descriptors. */ bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag, sc->alc_cdata.alc_rr_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* * Sync updated Rx descriptors such that controller see * modified buffer addresses. */ bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag, sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_PREWRITE); /* * Let controller know availability of new Rx buffers. * Since alc(4) use RXQ_CFG_RD_BURST_DEFAULT descriptors * it may be possible to update ALC_MBOX_RD0_PROD_IDX * only when Rx buffer pre-fetching is required. In * addition we already set ALC_RX_RD_FREE_THRESH to * RX_RD_FREE_THRESH_LO_DEFAULT descriptors. However * it still seems that pre-fetching needs more * experimentation. */ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) CSR_WRITE_2(sc, ALC_MBOX_RD0_PROD_IDX, (uint16_t)sc->alc_cdata.alc_rx_cons); else CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, sc->alc_cdata.alc_rx_cons); } return (count > 0 ? 0 : EAGAIN); } #ifndef __NO_STRICT_ALIGNMENT static struct mbuf * alc_fixup_rx(struct ifnet *ifp, struct mbuf *m) { struct mbuf *n; int i; uint16_t *src, *dst; src = mtod(m, uint16_t *); dst = src - 3; if (m->m_next == NULL) { for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) *dst++ = *src++; m->m_data -= 6; return (m); } /* * Append a new mbuf to received mbuf chain and copy ethernet * header from the mbuf chain. This can save lots of CPU * cycles for jumbo frame. */ MGETHDR(n, M_NOWAIT, MT_DATA); if (n == NULL) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); m_freem(m); return (NULL); } bcopy(m->m_data, n->m_data, ETHER_HDR_LEN); m->m_data += ETHER_HDR_LEN; m->m_len -= ETHER_HDR_LEN; n->m_len = ETHER_HDR_LEN; M_MOVE_PKTHDR(n, m); n->m_next = m; return (n); } #endif /* Receive a frame. */ static void alc_rxeof(struct alc_softc *sc, struct rx_rdesc *rrd) { struct alc_rxdesc *rxd; struct ifnet *ifp; struct mbuf *mp, *m; uint32_t rdinfo, status, vtag; int count, nsegs, rx_cons; ifp = sc->alc_ifp; status = le32toh(rrd->status); rdinfo = le32toh(rrd->rdinfo); rx_cons = RRD_RD_IDX(rdinfo); nsegs = RRD_RD_CNT(rdinfo); sc->alc_cdata.alc_rxlen = RRD_BYTES(status); if ((status & (RRD_ERR_SUM | RRD_ERR_LENGTH)) != 0) { /* * We want to pass the following frames to upper * layer regardless of error status of Rx return * ring. * * o IP/TCP/UDP checksum is bad. * o frame length and protocol specific length * does not match. * * Force network stack compute checksum for * errored frames. */ status |= RRD_TCP_UDPCSUM_NOK | RRD_IPCSUM_NOK; if ((status & (RRD_ERR_CRC | RRD_ERR_ALIGN | RRD_ERR_TRUNC | RRD_ERR_RUNT)) != 0) return; } for (count = 0; count < nsegs; count++, ALC_DESC_INC(rx_cons, ALC_RX_RING_CNT)) { rxd = &sc->alc_cdata.alc_rxdesc[rx_cons]; mp = rxd->rx_m; /* Add a new receive buffer to the ring. */ if (alc_newbuf(sc, rxd) != 0) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); /* Reuse Rx buffers. */ if (sc->alc_cdata.alc_rxhead != NULL) m_freem(sc->alc_cdata.alc_rxhead); break; } /* * Assume we've received a full sized frame. * Actual size is fixed when we encounter the end of * multi-segmented frame. */ mp->m_len = sc->alc_buf_size; /* Chain received mbufs. */ if (sc->alc_cdata.alc_rxhead == NULL) { sc->alc_cdata.alc_rxhead = mp; sc->alc_cdata.alc_rxtail = mp; } else { mp->m_flags &= ~M_PKTHDR; sc->alc_cdata.alc_rxprev_tail = sc->alc_cdata.alc_rxtail; sc->alc_cdata.alc_rxtail->m_next = mp; sc->alc_cdata.alc_rxtail = mp; } if (count == nsegs - 1) { /* Last desc. for this frame. */ m = sc->alc_cdata.alc_rxhead; m->m_flags |= M_PKTHDR; /* * It seems that L1C/L2C controller has no way * to tell hardware to strip CRC bytes. */ m->m_pkthdr.len = sc->alc_cdata.alc_rxlen - ETHER_CRC_LEN; if (nsegs > 1) { /* Set last mbuf size. */ mp->m_len = sc->alc_cdata.alc_rxlen - (nsegs - 1) * sc->alc_buf_size; /* Remove the CRC bytes in chained mbufs. */ if (mp->m_len <= ETHER_CRC_LEN) { sc->alc_cdata.alc_rxtail = sc->alc_cdata.alc_rxprev_tail; sc->alc_cdata.alc_rxtail->m_len -= (ETHER_CRC_LEN - mp->m_len); sc->alc_cdata.alc_rxtail->m_next = NULL; m_freem(mp); } else { mp->m_len -= ETHER_CRC_LEN; } } else m->m_len = m->m_pkthdr.len; m->m_pkthdr.rcvif = ifp; /* * Due to hardware bugs, Rx checksum offloading * was intentionally disabled. */ if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && (status & RRD_VLAN_TAG) != 0) { vtag = RRD_VLAN(le32toh(rrd->vtag)); m->m_pkthdr.ether_vtag = ntohs(vtag); m->m_flags |= M_VLANTAG; } #ifndef __NO_STRICT_ALIGNMENT m = alc_fixup_rx(ifp, m); if (m != NULL) #endif { /* Pass it on. */ ALC_UNLOCK(sc); (*ifp->if_input)(ifp, m); ALC_LOCK(sc); } } } /* Reset mbuf chains. */ ALC_RXCHAIN_RESET(sc); } static void alc_tick(void *arg) { struct alc_softc *sc; struct mii_data *mii; sc = (struct alc_softc *)arg; ALC_LOCK_ASSERT(sc); mii = device_get_softc(sc->alc_miibus); mii_tick(mii); alc_stats_update(sc); /* * alc(4) does not rely on Tx completion interrupts to reclaim * transferred buffers. Instead Tx completion interrupts are * used to hint for scheduling Tx task. So it's necessary to * release transmitted buffers by kicking Tx completion * handler. This limits the maximum reclamation delay to a hz. */ alc_txeof(sc); alc_watchdog(sc); callout_reset(&sc->alc_tick_ch, hz, alc_tick, sc); } static void alc_osc_reset(struct alc_softc *sc) { uint32_t reg; reg = CSR_READ_4(sc, ALC_MISC3); reg &= ~MISC3_25M_BY_SW; reg |= MISC3_25M_NOTO_INTNL; CSR_WRITE_4(sc, ALC_MISC3, reg); reg = CSR_READ_4(sc, ALC_MISC); if (AR816X_REV(sc->alc_rev) >= AR816X_REV_B0) { /* * Restore over-current protection default value. * This value could be reset by MAC reset. */ reg &= ~MISC_PSW_OCP_MASK; reg |= (MISC_PSW_OCP_DEFAULT << MISC_PSW_OCP_SHIFT); reg &= ~MISC_INTNLOSC_OPEN; CSR_WRITE_4(sc, ALC_MISC, reg); CSR_WRITE_4(sc, ALC_MISC, reg | MISC_INTNLOSC_OPEN); reg = CSR_READ_4(sc, ALC_MISC2); reg &= ~MISC2_CALB_START; CSR_WRITE_4(sc, ALC_MISC2, reg); CSR_WRITE_4(sc, ALC_MISC2, reg | MISC2_CALB_START); } else { reg &= ~MISC_INTNLOSC_OPEN; /* Disable isolate for revision A devices. */ if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1) reg &= ~MISC_ISO_ENB; CSR_WRITE_4(sc, ALC_MISC, reg | MISC_INTNLOSC_OPEN); CSR_WRITE_4(sc, ALC_MISC, reg); } DELAY(20); } static void alc_reset(struct alc_softc *sc) { uint32_t pmcfg, reg; int i; pmcfg = 0; if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { /* Reset workaround. */ CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, 1); if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 && (sc->alc_rev & 0x01) != 0) { /* Disable L0s/L1s before reset. */ pmcfg = CSR_READ_4(sc, ALC_PM_CFG); if ((pmcfg & (PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB)) != 0) { pmcfg &= ~(PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB); CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); } } } reg = CSR_READ_4(sc, ALC_MASTER_CFG); reg |= MASTER_OOB_DIS_OFF | MASTER_RESET; CSR_WRITE_4(sc, ALC_MASTER_CFG, reg); if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { for (i = ALC_RESET_TIMEOUT; i > 0; i--) { DELAY(10); if (CSR_READ_4(sc, ALC_MBOX_RD0_PROD_IDX) == 0) break; } if (i == 0) device_printf(sc->alc_dev, "MAC reset timeout!\n"); } for (i = ALC_RESET_TIMEOUT; i > 0; i--) { DELAY(10); if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_RESET) == 0) break; } if (i == 0) device_printf(sc->alc_dev, "master reset timeout!\n"); for (i = ALC_RESET_TIMEOUT; i > 0; i--) { reg = CSR_READ_4(sc, ALC_IDLE_STATUS); if ((reg & (IDLE_STATUS_RXMAC | IDLE_STATUS_TXMAC | IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0) break; DELAY(10); } if (i == 0) device_printf(sc->alc_dev, "reset timeout(0x%08x)!\n", reg); if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 && (sc->alc_rev & 0x01) != 0) { reg = CSR_READ_4(sc, ALC_MASTER_CFG); reg |= MASTER_CLK_SEL_DIS; CSR_WRITE_4(sc, ALC_MASTER_CFG, reg); /* Restore L0s/L1s config. */ if ((pmcfg & (PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB)) != 0) CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); } alc_osc_reset(sc); reg = CSR_READ_4(sc, ALC_MISC3); reg &= ~MISC3_25M_BY_SW; reg |= MISC3_25M_NOTO_INTNL; CSR_WRITE_4(sc, ALC_MISC3, reg); reg = CSR_READ_4(sc, ALC_MISC); reg &= ~MISC_INTNLOSC_OPEN; if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1) reg &= ~MISC_ISO_ENB; CSR_WRITE_4(sc, ALC_MISC, reg); DELAY(20); } if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0 || sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B || sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2) CSR_WRITE_4(sc, ALC_SERDES_LOCK, CSR_READ_4(sc, ALC_SERDES_LOCK) | SERDES_MAC_CLK_SLOWDOWN | SERDES_PHY_CLK_SLOWDOWN); } static void alc_init(void *xsc) { struct alc_softc *sc; sc = (struct alc_softc *)xsc; ALC_LOCK(sc); alc_init_locked(sc); ALC_UNLOCK(sc); } static void alc_init_locked(struct alc_softc *sc) { struct ifnet *ifp; struct mii_data *mii; uint8_t eaddr[ETHER_ADDR_LEN]; bus_addr_t paddr; uint32_t reg, rxf_hi, rxf_lo; ALC_LOCK_ASSERT(sc); ifp = sc->alc_ifp; mii = device_get_softc(sc->alc_miibus); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) return; /* * Cancel any pending I/O. */ alc_stop(sc); /* * Reset the chip to a known state. */ alc_reset(sc); /* Initialize Rx descriptors. */ if (alc_init_rx_ring(sc) != 0) { device_printf(sc->alc_dev, "no memory for Rx buffers.\n"); alc_stop(sc); return; } alc_init_rr_ring(sc); alc_init_tx_ring(sc); alc_init_cmb(sc); alc_init_smb(sc); /* Enable all clocks. */ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { CSR_WRITE_4(sc, ALC_CLK_GATING_CFG, CLK_GATING_DMAW_ENB | CLK_GATING_DMAR_ENB | CLK_GATING_TXQ_ENB | CLK_GATING_RXQ_ENB | CLK_GATING_TXMAC_ENB | CLK_GATING_RXMAC_ENB); if (AR816X_REV(sc->alc_rev) >= AR816X_REV_B0) CSR_WRITE_4(sc, ALC_IDLE_DECISN_TIMER, IDLE_DECISN_TIMER_DEFAULT_1MS); } else CSR_WRITE_4(sc, ALC_CLK_GATING_CFG, 0); /* Reprogram the station address. */ bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); CSR_WRITE_4(sc, ALC_PAR0, eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]); CSR_WRITE_4(sc, ALC_PAR1, eaddr[0] << 8 | eaddr[1]); /* * Clear WOL status and disable all WOL feature as WOL * would interfere Rx operation under normal environments. */ CSR_READ_4(sc, ALC_WOL_CFG); CSR_WRITE_4(sc, ALC_WOL_CFG, 0); /* Set Tx descriptor base addresses. */ paddr = sc->alc_rdata.alc_tx_ring_paddr; CSR_WRITE_4(sc, ALC_TX_BASE_ADDR_HI, ALC_ADDR_HI(paddr)); CSR_WRITE_4(sc, ALC_TDL_HEAD_ADDR_LO, ALC_ADDR_LO(paddr)); /* We don't use high priority ring. */ CSR_WRITE_4(sc, ALC_TDH_HEAD_ADDR_LO, 0); /* Set Tx descriptor counter. */ CSR_WRITE_4(sc, ALC_TD_RING_CNT, (ALC_TX_RING_CNT << TD_RING_CNT_SHIFT) & TD_RING_CNT_MASK); /* Set Rx descriptor base addresses. */ paddr = sc->alc_rdata.alc_rx_ring_paddr; CSR_WRITE_4(sc, ALC_RX_BASE_ADDR_HI, ALC_ADDR_HI(paddr)); CSR_WRITE_4(sc, ALC_RD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr)); if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { /* We use one Rx ring. */ CSR_WRITE_4(sc, ALC_RD1_HEAD_ADDR_LO, 0); CSR_WRITE_4(sc, ALC_RD2_HEAD_ADDR_LO, 0); CSR_WRITE_4(sc, ALC_RD3_HEAD_ADDR_LO, 0); } /* Set Rx descriptor counter. */ CSR_WRITE_4(sc, ALC_RD_RING_CNT, (ALC_RX_RING_CNT << RD_RING_CNT_SHIFT) & RD_RING_CNT_MASK); /* * Let hardware split jumbo frames into alc_max_buf_sized chunks. * if it do not fit the buffer size. Rx return descriptor holds * a counter that indicates how many fragments were made by the * hardware. The buffer size should be multiple of 8 bytes. * Since hardware has limit on the size of buffer size, always * use the maximum value. * For strict-alignment architectures make sure to reduce buffer * size by 8 bytes to make room for alignment fixup. */ #ifndef __NO_STRICT_ALIGNMENT sc->alc_buf_size = RX_BUF_SIZE_MAX - sizeof(uint64_t); #else sc->alc_buf_size = RX_BUF_SIZE_MAX; #endif CSR_WRITE_4(sc, ALC_RX_BUF_SIZE, sc->alc_buf_size); paddr = sc->alc_rdata.alc_rr_ring_paddr; /* Set Rx return descriptor base addresses. */ CSR_WRITE_4(sc, ALC_RRD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr)); if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { /* We use one Rx return ring. */ CSR_WRITE_4(sc, ALC_RRD1_HEAD_ADDR_LO, 0); CSR_WRITE_4(sc, ALC_RRD2_HEAD_ADDR_LO, 0); CSR_WRITE_4(sc, ALC_RRD3_HEAD_ADDR_LO, 0); } /* Set Rx return descriptor counter. */ CSR_WRITE_4(sc, ALC_RRD_RING_CNT, (ALC_RR_RING_CNT << RRD_RING_CNT_SHIFT) & RRD_RING_CNT_MASK); paddr = sc->alc_rdata.alc_cmb_paddr; CSR_WRITE_4(sc, ALC_CMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr)); paddr = sc->alc_rdata.alc_smb_paddr; CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_HI, ALC_ADDR_HI(paddr)); CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr)); if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B) { /* Reconfigure SRAM - Vendor magic. */ CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_LEN, 0x000002A0); CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_LEN, 0x00000100); CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_ADDR, 0x029F0000); CSR_WRITE_4(sc, ALC_SRAM_RD0_ADDR, 0x02BF02A0); CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_ADDR, 0x03BF02C0); CSR_WRITE_4(sc, ALC_SRAM_TD_ADDR, 0x03DF03C0); CSR_WRITE_4(sc, ALC_TXF_WATER_MARK, 0x00000000); CSR_WRITE_4(sc, ALC_RD_DMA_CFG, 0x00000000); } /* Tell hardware that we're ready to load DMA blocks. */ CSR_WRITE_4(sc, ALC_DMA_BLOCK, DMA_BLOCK_LOAD); /* Configure interrupt moderation timer. */ reg = ALC_USECS(sc->alc_int_rx_mod) << IM_TIMER_RX_SHIFT; if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) reg |= ALC_USECS(sc->alc_int_tx_mod) << IM_TIMER_TX_SHIFT; CSR_WRITE_4(sc, ALC_IM_TIMER, reg); /* * We don't want to automatic interrupt clear as task queue * for the interrupt should know interrupt status. */ reg = CSR_READ_4(sc, ALC_MASTER_CFG); reg &= ~(MASTER_IM_RX_TIMER_ENB | MASTER_IM_TX_TIMER_ENB); reg |= MASTER_SA_TIMER_ENB; if (ALC_USECS(sc->alc_int_rx_mod) != 0) reg |= MASTER_IM_RX_TIMER_ENB; if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0 && ALC_USECS(sc->alc_int_tx_mod) != 0) reg |= MASTER_IM_TX_TIMER_ENB; CSR_WRITE_4(sc, ALC_MASTER_CFG, reg); /* * Disable interrupt re-trigger timer. We don't want automatic * re-triggering of un-ACKed interrupts. */ CSR_WRITE_4(sc, ALC_INTR_RETRIG_TIMER, ALC_USECS(0)); /* Configure CMB. */ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, ALC_TX_RING_CNT / 3); CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(sc->alc_int_tx_mod)); } else { if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) { CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, 4); CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(5000)); } else CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(0)); } /* * Hardware can be configured to issue SMB interrupt based * on programmed interval. Since there is a callout that is * invoked for every hz in driver we use that instead of * relying on periodic SMB interrupt. */ CSR_WRITE_4(sc, ALC_SMB_STAT_TIMER, ALC_USECS(0)); /* Clear MAC statistics. */ alc_stats_clear(sc); /* * Always use maximum frame size that controller can support. * Otherwise received frames that has larger frame length * than alc(4) MTU would be silently dropped in hardware. This * would make path-MTU discovery hard as sender wouldn't get * any responses from receiver. alc(4) supports * multi-fragmented frames on Rx path so it has no issue on * assembling fragmented frames. Using maximum frame size also * removes the need to reinitialize hardware when interface * MTU configuration was changed. * * Be conservative in what you do, be liberal in what you * accept from others - RFC 793. */ CSR_WRITE_4(sc, ALC_FRAME_SIZE, sc->alc_ident->max_framelen); if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { /* Disable header split(?) */ CSR_WRITE_4(sc, ALC_HDS_CFG, 0); /* Configure IPG/IFG parameters. */ CSR_WRITE_4(sc, ALC_IPG_IFG_CFG, ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK) | ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) | ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) | ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK)); /* Set parameters for half-duplex media. */ CSR_WRITE_4(sc, ALC_HDPX_CFG, ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) & HDPX_CFG_LCOL_MASK) | ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) & HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN | ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) & HDPX_CFG_ABEBT_MASK) | ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) & HDPX_CFG_JAMIPG_MASK)); } /* * Set TSO/checksum offload threshold. For frames that is * larger than this threshold, hardware wouldn't do * TSO/checksum offloading. */ reg = (sc->alc_ident->max_framelen >> TSO_OFFLOAD_THRESH_UNIT_SHIFT) & TSO_OFFLOAD_THRESH_MASK; if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) reg |= TSO_OFFLOAD_ERRLGPKT_DROP_ENB; CSR_WRITE_4(sc, ALC_TSO_OFFLOAD_THRESH, reg); /* Configure TxQ. */ reg = (alc_dma_burst[sc->alc_dma_rd_burst] << TXQ_CFG_TX_FIFO_BURST_SHIFT) & TXQ_CFG_TX_FIFO_BURST_MASK; if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B || sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) reg >>= 1; reg |= (TXQ_CFG_TD_BURST_DEFAULT << TXQ_CFG_TD_BURST_SHIFT) & TXQ_CFG_TD_BURST_MASK; reg |= TXQ_CFG_IP_OPTION_ENB | TXQ_CFG_8023_ENB; CSR_WRITE_4(sc, ALC_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE); if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { reg = (TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q1_BURST_SHIFT | TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q2_BURST_SHIFT | TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q3_BURST_SHIFT | HQTD_CFG_BURST_ENB); CSR_WRITE_4(sc, ALC_HQTD_CFG, reg); reg = WRR_PRI_RESTRICT_NONE; reg |= (WRR_PRI_DEFAULT << WRR_PRI0_SHIFT | WRR_PRI_DEFAULT << WRR_PRI1_SHIFT | WRR_PRI_DEFAULT << WRR_PRI2_SHIFT | WRR_PRI_DEFAULT << WRR_PRI3_SHIFT); CSR_WRITE_4(sc, ALC_WRR, reg); } else { /* Configure Rx free descriptor pre-fetching. */ CSR_WRITE_4(sc, ALC_RX_RD_FREE_THRESH, ((RX_RD_FREE_THRESH_HI_DEFAULT << RX_RD_FREE_THRESH_HI_SHIFT) & RX_RD_FREE_THRESH_HI_MASK) | ((RX_RD_FREE_THRESH_LO_DEFAULT << RX_RD_FREE_THRESH_LO_SHIFT) & RX_RD_FREE_THRESH_LO_MASK)); } /* * Configure flow control parameters. * XON : 80% of Rx FIFO * XOFF : 30% of Rx FIFO */ if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN); reg &= SRAM_RX_FIFO_LEN_MASK; reg *= 8; if (reg > 8 * 1024) reg -= RX_FIFO_PAUSE_816X_RSVD; else reg -= RX_BUF_SIZE_MAX; reg /= 8; CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH, ((reg << RX_FIFO_PAUSE_THRESH_LO_SHIFT) & RX_FIFO_PAUSE_THRESH_LO_MASK) | (((RX_FIFO_PAUSE_816X_RSVD / 8) << RX_FIFO_PAUSE_THRESH_HI_SHIFT) & RX_FIFO_PAUSE_THRESH_HI_MASK)); } else if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8131 || sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8132) { reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN); rxf_hi = (reg * 8) / 10; rxf_lo = (reg * 3) / 10; CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH, ((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) & RX_FIFO_PAUSE_THRESH_LO_MASK) | ((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) & RX_FIFO_PAUSE_THRESH_HI_MASK)); } if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { /* Disable RSS until I understand L1C/L2C's RSS logic. */ CSR_WRITE_4(sc, ALC_RSS_IDT_TABLE0, 0); CSR_WRITE_4(sc, ALC_RSS_CPU, 0); } /* Configure RxQ. */ reg = (RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) & RXQ_CFG_RD_BURST_MASK; reg |= RXQ_CFG_RSS_MODE_DIS; if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { reg |= (RXQ_CFG_816X_IDT_TBL_SIZE_DEFAULT << RXQ_CFG_816X_IDT_TBL_SIZE_SHIFT) & RXQ_CFG_816X_IDT_TBL_SIZE_MASK; if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0) reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_100M; } else { if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0 && sc->alc_ident->deviceid != DEVICEID_ATHEROS_AR8151_V2) reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_100M; } CSR_WRITE_4(sc, ALC_RXQ_CFG, reg); /* Configure DMA parameters. */ reg = DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI; reg |= sc->alc_rcb; if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) reg |= DMA_CFG_CMB_ENB; if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) reg |= DMA_CFG_SMB_ENB; else reg |= DMA_CFG_SMB_DIS; reg |= (sc->alc_dma_rd_burst & DMA_CFG_RD_BURST_MASK) << DMA_CFG_RD_BURST_SHIFT; reg |= (sc->alc_dma_wr_burst & DMA_CFG_WR_BURST_MASK) << DMA_CFG_WR_BURST_SHIFT; reg |= (DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) & DMA_CFG_RD_DELAY_CNT_MASK; reg |= (DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) & DMA_CFG_WR_DELAY_CNT_MASK; if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { switch (AR816X_REV(sc->alc_rev)) { case AR816X_REV_A0: case AR816X_REV_A1: reg |= DMA_CFG_RD_CHNL_SEL_2; break; case AR816X_REV_B0: /* FALLTHROUGH */ default: reg |= DMA_CFG_RD_CHNL_SEL_4; break; } } CSR_WRITE_4(sc, ALC_DMA_CFG, reg); /* * Configure Tx/Rx MACs. * - Auto-padding for short frames. * - Enable CRC generation. * Actual reconfiguration of MAC for resolved speed/duplex * is followed after detection of link establishment. * AR813x/AR815x always does checksum computation regardless * of MAC_CFG_RXCSUM_ENB bit. Also the controller is known to * have bug in protocol field in Rx return structure so * these controllers can't handle fragmented frames. Disable * Rx checksum offloading until there is a newer controller * that has sane implementation. */ reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX | ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) & MAC_CFG_PREAMBLE_MASK); if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0 || sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151 || sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 || sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW; if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0) reg |= MAC_CFG_SPEED_10_100; else reg |= MAC_CFG_SPEED_1000; CSR_WRITE_4(sc, ALC_MAC_CFG, reg); /* Set up the receive filter. */ alc_rxfilter(sc); alc_rxvlan(sc); /* Acknowledge all pending interrupts and clear it. */ CSR_WRITE_4(sc, ALC_INTR_MASK, ALC_INTRS); CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF); CSR_WRITE_4(sc, ALC_INTR_STATUS, 0); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; sc->alc_flags &= ~ALC_FLAG_LINK; /* Switch to the current media. */ alc_mediachange_locked(sc); callout_reset(&sc->alc_tick_ch, hz, alc_tick, sc); } static void alc_stop(struct alc_softc *sc) { struct ifnet *ifp; struct alc_txdesc *txd; struct alc_rxdesc *rxd; uint32_t reg; int i; ALC_LOCK_ASSERT(sc); /* * Mark the interface down and cancel the watchdog timer. */ ifp = sc->alc_ifp; ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); sc->alc_flags &= ~ALC_FLAG_LINK; callout_stop(&sc->alc_tick_ch); sc->alc_watchdog_timer = 0; alc_stats_update(sc); /* Disable interrupts. */ CSR_WRITE_4(sc, ALC_INTR_MASK, 0); CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF); /* Disable DMA. */ reg = CSR_READ_4(sc, ALC_DMA_CFG); reg &= ~(DMA_CFG_CMB_ENB | DMA_CFG_SMB_ENB); reg |= DMA_CFG_SMB_DIS; CSR_WRITE_4(sc, ALC_DMA_CFG, reg); DELAY(1000); /* Stop Rx/Tx MACs. */ alc_stop_mac(sc); /* Disable interrupts which might be touched in taskq handler. */ CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF); /* Disable L0s/L1s */ alc_aspm(sc, 0, IFM_UNKNOWN); /* Reclaim Rx buffers that have been processed. */ if (sc->alc_cdata.alc_rxhead != NULL) m_freem(sc->alc_cdata.alc_rxhead); ALC_RXCHAIN_RESET(sc); /* * Free Tx/Rx mbufs still in the queues. */ for (i = 0; i < ALC_RX_RING_CNT; i++) { rxd = &sc->alc_cdata.alc_rxdesc[i]; if (rxd->rx_m != NULL) { bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap); m_freem(rxd->rx_m); rxd->rx_m = NULL; } } for (i = 0; i < ALC_TX_RING_CNT; i++) { txd = &sc->alc_cdata.alc_txdesc[i]; if (txd->tx_m != NULL) { bus_dmamap_sync(sc->alc_cdata.alc_tx_tag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->alc_cdata.alc_tx_tag, txd->tx_dmamap); m_freem(txd->tx_m); txd->tx_m = NULL; } } } static void alc_stop_mac(struct alc_softc *sc) { uint32_t reg; int i; alc_stop_queue(sc); /* Disable Rx/Tx MAC. */ reg = CSR_READ_4(sc, ALC_MAC_CFG); if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) { reg &= ~(MAC_CFG_TX_ENB | MAC_CFG_RX_ENB); CSR_WRITE_4(sc, ALC_MAC_CFG, reg); } for (i = ALC_TIMEOUT; i > 0; i--) { reg = CSR_READ_4(sc, ALC_IDLE_STATUS); if ((reg & (IDLE_STATUS_RXMAC | IDLE_STATUS_TXMAC)) == 0) break; DELAY(10); } if (i == 0) device_printf(sc->alc_dev, "could not disable Rx/Tx MAC(0x%08x)!\n", reg); } static void alc_start_queue(struct alc_softc *sc) { uint32_t qcfg[] = { 0, RXQ_CFG_QUEUE0_ENB, RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB, RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB | RXQ_CFG_QUEUE2_ENB, RXQ_CFG_ENB }; uint32_t cfg; ALC_LOCK_ASSERT(sc); /* Enable RxQ. */ cfg = CSR_READ_4(sc, ALC_RXQ_CFG); if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { cfg &= ~RXQ_CFG_ENB; cfg |= qcfg[1]; } else cfg |= RXQ_CFG_QUEUE0_ENB; CSR_WRITE_4(sc, ALC_RXQ_CFG, cfg); /* Enable TxQ. */ cfg = CSR_READ_4(sc, ALC_TXQ_CFG); cfg |= TXQ_CFG_ENB; CSR_WRITE_4(sc, ALC_TXQ_CFG, cfg); } static void alc_stop_queue(struct alc_softc *sc) { uint32_t reg; int i; /* Disable RxQ. */ reg = CSR_READ_4(sc, ALC_RXQ_CFG); if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { if ((reg & RXQ_CFG_ENB) != 0) { reg &= ~RXQ_CFG_ENB; CSR_WRITE_4(sc, ALC_RXQ_CFG, reg); } } else { if ((reg & RXQ_CFG_QUEUE0_ENB) != 0) { reg &= ~RXQ_CFG_QUEUE0_ENB; CSR_WRITE_4(sc, ALC_RXQ_CFG, reg); } } /* Disable TxQ. */ reg = CSR_READ_4(sc, ALC_TXQ_CFG); if ((reg & TXQ_CFG_ENB) != 0) { reg &= ~TXQ_CFG_ENB; CSR_WRITE_4(sc, ALC_TXQ_CFG, reg); } DELAY(40); for (i = ALC_TIMEOUT; i > 0; i--) { reg = CSR_READ_4(sc, ALC_IDLE_STATUS); if ((reg & (IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0) break; DELAY(10); } if (i == 0) device_printf(sc->alc_dev, "could not disable RxQ/TxQ (0x%08x)!\n", reg); } static void alc_init_tx_ring(struct alc_softc *sc) { struct alc_ring_data *rd; struct alc_txdesc *txd; int i; ALC_LOCK_ASSERT(sc); sc->alc_cdata.alc_tx_prod = 0; sc->alc_cdata.alc_tx_cons = 0; sc->alc_cdata.alc_tx_cnt = 0; rd = &sc->alc_rdata; bzero(rd->alc_tx_ring, ALC_TX_RING_SZ); for (i = 0; i < ALC_TX_RING_CNT; i++) { txd = &sc->alc_cdata.alc_txdesc[i]; txd->tx_m = NULL; } bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag, sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_PREWRITE); } static int alc_init_rx_ring(struct alc_softc *sc) { struct alc_ring_data *rd; struct alc_rxdesc *rxd; int i; ALC_LOCK_ASSERT(sc); sc->alc_cdata.alc_rx_cons = ALC_RX_RING_CNT - 1; sc->alc_morework = 0; rd = &sc->alc_rdata; bzero(rd->alc_rx_ring, ALC_RX_RING_SZ); for (i = 0; i < ALC_RX_RING_CNT; i++) { rxd = &sc->alc_cdata.alc_rxdesc[i]; rxd->rx_m = NULL; rxd->rx_desc = &rd->alc_rx_ring[i]; if (alc_newbuf(sc, rxd) != 0) return (ENOBUFS); } /* * Since controller does not update Rx descriptors, driver * does have to read Rx descriptors back so BUS_DMASYNC_PREWRITE * is enough to ensure coherence. */ bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag, sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_PREWRITE); /* Let controller know availability of new Rx buffers. */ CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, sc->alc_cdata.alc_rx_cons); return (0); } static void alc_init_rr_ring(struct alc_softc *sc) { struct alc_ring_data *rd; ALC_LOCK_ASSERT(sc); sc->alc_cdata.alc_rr_cons = 0; ALC_RXCHAIN_RESET(sc); rd = &sc->alc_rdata; bzero(rd->alc_rr_ring, ALC_RR_RING_SZ); bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag, sc->alc_cdata.alc_rr_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } static void alc_init_cmb(struct alc_softc *sc) { struct alc_ring_data *rd; ALC_LOCK_ASSERT(sc); rd = &sc->alc_rdata; bzero(rd->alc_cmb, ALC_CMB_SZ); bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag, sc->alc_cdata.alc_cmb_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } static void alc_init_smb(struct alc_softc *sc) { struct alc_ring_data *rd; ALC_LOCK_ASSERT(sc); rd = &sc->alc_rdata; bzero(rd->alc_smb, ALC_SMB_SZ); bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, sc->alc_cdata.alc_smb_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } static void alc_rxvlan(struct alc_softc *sc) { struct ifnet *ifp; uint32_t reg; ALC_LOCK_ASSERT(sc); ifp = sc->alc_ifp; reg = CSR_READ_4(sc, ALC_MAC_CFG); if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) reg |= MAC_CFG_VLAN_TAG_STRIP; else reg &= ~MAC_CFG_VLAN_TAG_STRIP; CSR_WRITE_4(sc, ALC_MAC_CFG, reg); } static u_int alc_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { uint32_t *mchash = arg; uint32_t crc; crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN); mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); return (1); } static void alc_rxfilter(struct alc_softc *sc) { struct ifnet *ifp; uint32_t mchash[2]; uint32_t rxcfg; ALC_LOCK_ASSERT(sc); ifp = sc->alc_ifp; bzero(mchash, sizeof(mchash)); rxcfg = CSR_READ_4(sc, ALC_MAC_CFG); rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC); if ((ifp->if_flags & IFF_BROADCAST) != 0) rxcfg |= MAC_CFG_BCAST; if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { if ((ifp->if_flags & IFF_PROMISC) != 0) rxcfg |= MAC_CFG_PROMISC; if ((ifp->if_flags & IFF_ALLMULTI) != 0) rxcfg |= MAC_CFG_ALLMULTI; mchash[0] = 0xFFFFFFFF; mchash[1] = 0xFFFFFFFF; goto chipit; } if_foreach_llmaddr(ifp, alc_hash_maddr, mchash); chipit: CSR_WRITE_4(sc, ALC_MAR0, mchash[0]); CSR_WRITE_4(sc, ALC_MAR1, mchash[1]); CSR_WRITE_4(sc, ALC_MAC_CFG, rxcfg); } static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) { int error, value; if (arg1 == NULL) return (EINVAL); value = *(int *)arg1; error = sysctl_handle_int(oidp, &value, 0, req); if (error || req->newptr == NULL) return (error); if (value < low || value > high) return (EINVAL); *(int *)arg1 = value; return (0); } static int sysctl_hw_alc_proc_limit(SYSCTL_HANDLER_ARGS) { return (sysctl_int_range(oidp, arg1, arg2, req, ALC_PROC_MIN, ALC_PROC_MAX)); } static int sysctl_hw_alc_int_mod(SYSCTL_HANDLER_ARGS) { return (sysctl_int_range(oidp, arg1, arg2, req, ALC_IM_TIMER_MIN, ALC_IM_TIMER_MAX)); } #ifdef DEBUGNET static void alc_debugnet_init(struct ifnet *ifp, int *nrxr, int *ncl, int *clsize) { struct alc_softc *sc; sc = if_getsoftc(ifp); KASSERT(sc->alc_buf_size <= MCLBYTES, ("incorrect cluster size")); *nrxr = ALC_RX_RING_CNT; *ncl = DEBUGNET_MAX_IN_FLIGHT; *clsize = MCLBYTES; } static void alc_debugnet_event(struct ifnet *ifp __unused, enum debugnet_ev event __unused) { } static int alc_debugnet_transmit(struct ifnet *ifp, struct mbuf *m) { struct alc_softc *sc; int error; sc = if_getsoftc(ifp); if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return (EBUSY); error = alc_encap(sc, &m); if (error == 0) alc_start_tx(sc); return (error); } static int alc_debugnet_poll(struct ifnet *ifp, int count) { struct alc_softc *sc; sc = if_getsoftc(ifp); if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return (EBUSY); alc_txeof(sc); return (alc_rxintr(sc, count)); } #endif /* DEBUGNET */ diff --git a/sys/dev/ale/if_ale.c b/sys/dev/ale/if_ale.c index 27aa9321357a..4a8afd887de6 100644 --- a/sys/dev/ale/if_ale.c +++ b/sys/dev/ale/if_ale.c @@ -1,3088 +1,3088 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2008, Pyun YongHyeon * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* Driver for Atheros AR8121/AR8113/AR8114 PCIe Ethernet. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" /* For more information about Tx checksum offload issues see ale_encap(). */ #define ALE_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) MODULE_DEPEND(ale, pci, 1, 1, 1); MODULE_DEPEND(ale, ether, 1, 1, 1); MODULE_DEPEND(ale, miibus, 1, 1, 1); /* Tunables. */ static int msi_disable = 0; static int msix_disable = 0; TUNABLE_INT("hw.ale.msi_disable", &msi_disable); TUNABLE_INT("hw.ale.msix_disable", &msix_disable); /* * Devices supported by this driver. */ static const struct ale_dev { uint16_t ale_vendorid; uint16_t ale_deviceid; const char *ale_name; } ale_devs[] = { { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR81XX, "Atheros AR8121/AR8113/AR8114 PCIe Ethernet" }, }; static int ale_attach(device_t); static int ale_check_boundary(struct ale_softc *); static int ale_detach(device_t); static int ale_dma_alloc(struct ale_softc *); static void ale_dma_free(struct ale_softc *); static void ale_dmamap_cb(void *, bus_dma_segment_t *, int, int); static int ale_encap(struct ale_softc *, struct mbuf **); static void ale_get_macaddr(struct ale_softc *); static void ale_init(void *); static void ale_init_locked(struct ale_softc *); static void ale_init_rx_pages(struct ale_softc *); static void ale_init_tx_ring(struct ale_softc *); static void ale_int_task(void *, int); static int ale_intr(void *); static int ale_ioctl(struct ifnet *, u_long, caddr_t); static void ale_mac_config(struct ale_softc *); static int ale_miibus_readreg(device_t, int, int); static void ale_miibus_statchg(device_t); static int ale_miibus_writereg(device_t, int, int, int); static int ale_mediachange(struct ifnet *); static void ale_mediastatus(struct ifnet *, struct ifmediareq *); static void ale_phy_reset(struct ale_softc *); static int ale_probe(device_t); static void ale_reset(struct ale_softc *); static int ale_resume(device_t); static void ale_rx_update_page(struct ale_softc *, struct ale_rx_page **, uint32_t, uint32_t *); static void ale_rxcsum(struct ale_softc *, struct mbuf *, uint32_t); static int ale_rxeof(struct ale_softc *sc, int); static void ale_rxfilter(struct ale_softc *); static void ale_rxvlan(struct ale_softc *); static void ale_setlinkspeed(struct ale_softc *); static void ale_setwol(struct ale_softc *); static int ale_shutdown(device_t); static void ale_start(struct ifnet *); static void ale_start_locked(struct ifnet *); static void ale_stats_clear(struct ale_softc *); static void ale_stats_update(struct ale_softc *); static void ale_stop(struct ale_softc *); static void ale_stop_mac(struct ale_softc *); static int ale_suspend(device_t); static void ale_sysctl_node(struct ale_softc *); static void ale_tick(void *); static void ale_txeof(struct ale_softc *); static void ale_watchdog(struct ale_softc *); static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); static int sysctl_hw_ale_proc_limit(SYSCTL_HANDLER_ARGS); static int sysctl_hw_ale_int_mod(SYSCTL_HANDLER_ARGS); static device_method_t ale_methods[] = { /* Device interface. */ DEVMETHOD(device_probe, ale_probe), DEVMETHOD(device_attach, ale_attach), DEVMETHOD(device_detach, ale_detach), DEVMETHOD(device_shutdown, ale_shutdown), DEVMETHOD(device_suspend, ale_suspend), DEVMETHOD(device_resume, ale_resume), /* MII interface. */ DEVMETHOD(miibus_readreg, ale_miibus_readreg), DEVMETHOD(miibus_writereg, ale_miibus_writereg), DEVMETHOD(miibus_statchg, ale_miibus_statchg), DEVMETHOD_END }; static driver_t ale_driver = { "ale", ale_methods, sizeof(struct ale_softc) }; static devclass_t ale_devclass; DRIVER_MODULE(ale, pci, ale_driver, ale_devclass, NULL, NULL); MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, ale, ale_devs, nitems(ale_devs)); DRIVER_MODULE(miibus, ale, miibus_driver, miibus_devclass, NULL, NULL); static struct resource_spec ale_res_spec_mem[] = { { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE }, { -1, 0, 0 } }; static struct resource_spec ale_irq_spec_legacy[] = { { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { -1, 0, 0 } }; static struct resource_spec ale_irq_spec_msi[] = { { SYS_RES_IRQ, 1, RF_ACTIVE }, { -1, 0, 0 } }; static struct resource_spec ale_irq_spec_msix[] = { { SYS_RES_IRQ, 1, RF_ACTIVE }, { -1, 0, 0 } }; static int ale_miibus_readreg(device_t dev, int phy, int reg) { struct ale_softc *sc; uint32_t v; int i; sc = device_get_softc(dev); CSR_WRITE_4(sc, ALE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); for (i = ALE_PHY_TIMEOUT; i > 0; i--) { DELAY(5); v = CSR_READ_4(sc, ALE_MDIO); if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) break; } if (i == 0) { device_printf(sc->ale_dev, "phy read timeout : %d\n", reg); return (0); } return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT); } static int ale_miibus_writereg(device_t dev, int phy, int reg, int val) { struct ale_softc *sc; uint32_t v; int i; sc = device_get_softc(dev); CSR_WRITE_4(sc, ALE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT | MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); for (i = ALE_PHY_TIMEOUT; i > 0; i--) { DELAY(5); v = CSR_READ_4(sc, ALE_MDIO); if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) break; } if (i == 0) device_printf(sc->ale_dev, "phy write timeout : %d\n", reg); return (0); } static void ale_miibus_statchg(device_t dev) { struct ale_softc *sc; struct mii_data *mii; struct ifnet *ifp; uint32_t reg; sc = device_get_softc(dev); mii = device_get_softc(sc->ale_miibus); ifp = sc->ale_ifp; if (mii == NULL || ifp == NULL || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; sc->ale_flags &= ~ALE_FLAG_LINK; if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: sc->ale_flags |= ALE_FLAG_LINK; break; case IFM_1000_T: if ((sc->ale_flags & ALE_FLAG_FASTETHER) == 0) sc->ale_flags |= ALE_FLAG_LINK; break; default: break; } } /* Stop Rx/Tx MACs. */ ale_stop_mac(sc); /* Program MACs with resolved speed/duplex/flow-control. */ if ((sc->ale_flags & ALE_FLAG_LINK) != 0) { ale_mac_config(sc); /* Reenable Tx/Rx MACs. */ reg = CSR_READ_4(sc, ALE_MAC_CFG); reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB; CSR_WRITE_4(sc, ALE_MAC_CFG, reg); } } static void ale_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) { struct ale_softc *sc; struct mii_data *mii; sc = ifp->if_softc; ALE_LOCK(sc); if ((ifp->if_flags & IFF_UP) == 0) { ALE_UNLOCK(sc); return; } mii = device_get_softc(sc->ale_miibus); mii_pollstat(mii); ifmr->ifm_status = mii->mii_media_status; ifmr->ifm_active = mii->mii_media_active; ALE_UNLOCK(sc); } static int ale_mediachange(struct ifnet *ifp) { struct ale_softc *sc; struct mii_data *mii; struct mii_softc *miisc; int error; sc = ifp->if_softc; ALE_LOCK(sc); mii = device_get_softc(sc->ale_miibus); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); error = mii_mediachg(mii); ALE_UNLOCK(sc); return (error); } static int ale_probe(device_t dev) { const struct ale_dev *sp; int i; uint16_t vendor, devid; vendor = pci_get_vendor(dev); devid = pci_get_device(dev); sp = ale_devs; for (i = 0; i < nitems(ale_devs); i++) { if (vendor == sp->ale_vendorid && devid == sp->ale_deviceid) { device_set_desc(dev, sp->ale_name); return (BUS_PROBE_DEFAULT); } sp++; } return (ENXIO); } static void ale_get_macaddr(struct ale_softc *sc) { uint32_t ea[2], reg; int i, vpdc; reg = CSR_READ_4(sc, ALE_SPI_CTRL); if ((reg & SPI_VPD_ENB) != 0) { reg &= ~SPI_VPD_ENB; CSR_WRITE_4(sc, ALE_SPI_CTRL, reg); } if (pci_find_cap(sc->ale_dev, PCIY_VPD, &vpdc) == 0) { /* * PCI VPD capability found, let TWSI reload EEPROM. * This will set ethernet address of controller. */ CSR_WRITE_4(sc, ALE_TWSI_CTRL, CSR_READ_4(sc, ALE_TWSI_CTRL) | TWSI_CTRL_SW_LD_START); for (i = 100; i > 0; i--) { DELAY(1000); reg = CSR_READ_4(sc, ALE_TWSI_CTRL); if ((reg & TWSI_CTRL_SW_LD_START) == 0) break; } if (i == 0) device_printf(sc->ale_dev, "reloading EEPROM timeout!\n"); } else { if (bootverbose) device_printf(sc->ale_dev, "PCI VPD capability not found!\n"); } ea[0] = CSR_READ_4(sc, ALE_PAR0); ea[1] = CSR_READ_4(sc, ALE_PAR1); sc->ale_eaddr[0] = (ea[1] >> 8) & 0xFF; sc->ale_eaddr[1] = (ea[1] >> 0) & 0xFF; sc->ale_eaddr[2] = (ea[0] >> 24) & 0xFF; sc->ale_eaddr[3] = (ea[0] >> 16) & 0xFF; sc->ale_eaddr[4] = (ea[0] >> 8) & 0xFF; sc->ale_eaddr[5] = (ea[0] >> 0) & 0xFF; } static void ale_phy_reset(struct ale_softc *sc) { /* Reset magic from Linux. */ CSR_WRITE_2(sc, ALE_GPHY_CTRL, GPHY_CTRL_HIB_EN | GPHY_CTRL_HIB_PULSE | GPHY_CTRL_SEL_ANA_RESET | GPHY_CTRL_PHY_PLL_ON); DELAY(1000); CSR_WRITE_2(sc, ALE_GPHY_CTRL, GPHY_CTRL_EXT_RESET | GPHY_CTRL_HIB_EN | GPHY_CTRL_HIB_PULSE | GPHY_CTRL_SEL_ANA_RESET | GPHY_CTRL_PHY_PLL_ON); DELAY(1000); #define ATPHY_DBG_ADDR 0x1D #define ATPHY_DBG_DATA 0x1E /* Enable hibernation mode. */ ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, ATPHY_DBG_ADDR, 0x0B); ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, ATPHY_DBG_DATA, 0xBC00); /* Set Class A/B for all modes. */ ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, ATPHY_DBG_ADDR, 0x00); ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, ATPHY_DBG_DATA, 0x02EF); /* Enable 10BT power saving. */ ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, ATPHY_DBG_ADDR, 0x12); ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, ATPHY_DBG_DATA, 0x4C04); /* Adjust 1000T power. */ ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, ATPHY_DBG_ADDR, 0x04); ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, ATPHY_DBG_ADDR, 0x8BBB); /* 10BT center tap voltage. */ ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, ATPHY_DBG_ADDR, 0x05); ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, ATPHY_DBG_ADDR, 0x2C46); #undef ATPHY_DBG_ADDR #undef ATPHY_DBG_DATA DELAY(1000); } static int ale_attach(device_t dev) { struct ale_softc *sc; struct ifnet *ifp; uint16_t burst; int error, i, msic, msixc, pmc; uint32_t rxf_len, txf_len; error = 0; sc = device_get_softc(dev); sc->ale_dev = dev; mtx_init(&sc->ale_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->ale_tick_ch, &sc->ale_mtx, 0); - TASK_INIT(&sc->ale_int_task, 0, ale_int_task, sc); + NET_TASK_INIT(&sc->ale_int_task, 0, ale_int_task, sc); /* Map the device. */ pci_enable_busmaster(dev); sc->ale_res_spec = ale_res_spec_mem; sc->ale_irq_spec = ale_irq_spec_legacy; error = bus_alloc_resources(dev, sc->ale_res_spec, sc->ale_res); if (error != 0) { device_printf(dev, "cannot allocate memory resources.\n"); goto fail; } /* Set PHY address. */ sc->ale_phyaddr = ALE_PHY_ADDR; /* Reset PHY. */ ale_phy_reset(sc); /* Reset the ethernet controller. */ ale_reset(sc); /* Get PCI and chip id/revision. */ sc->ale_rev = pci_get_revid(dev); if (sc->ale_rev >= 0xF0) { /* L2E Rev. B. AR8114 */ sc->ale_flags |= ALE_FLAG_FASTETHER; } else { if ((CSR_READ_4(sc, ALE_PHY_STATUS) & PHY_STATUS_100M) != 0) { /* L1E AR8121 */ sc->ale_flags |= ALE_FLAG_JUMBO; } else { /* L2E Rev. A. AR8113 */ sc->ale_flags |= ALE_FLAG_FASTETHER; } } /* * All known controllers seems to require 4 bytes alignment * of Tx buffers to make Tx checksum offload with custom * checksum generation method work. */ sc->ale_flags |= ALE_FLAG_TXCSUM_BUG; /* * All known controllers seems to have issues on Rx checksum * offload for fragmented IP datagrams. */ sc->ale_flags |= ALE_FLAG_RXCSUM_BUG; /* * Don't use Tx CMB. It is known to cause RRS update failure * under certain circumstances. Typical phenomenon of the * issue would be unexpected sequence number encountered in * Rx handler. */ sc->ale_flags |= ALE_FLAG_TXCMB_BUG; sc->ale_chip_rev = CSR_READ_4(sc, ALE_MASTER_CFG) >> MASTER_CHIP_REV_SHIFT; if (bootverbose) { device_printf(dev, "PCI device revision : 0x%04x\n", sc->ale_rev); device_printf(dev, "Chip id/revision : 0x%04x\n", sc->ale_chip_rev); } txf_len = CSR_READ_4(sc, ALE_SRAM_TX_FIFO_LEN); rxf_len = CSR_READ_4(sc, ALE_SRAM_RX_FIFO_LEN); /* * Uninitialized hardware returns an invalid chip id/revision * as well as 0xFFFFFFFF for Tx/Rx fifo length. */ if (sc->ale_chip_rev == 0xFFFF || txf_len == 0xFFFFFFFF || rxf_len == 0xFFFFFFF) { device_printf(dev,"chip revision : 0x%04x, %u Tx FIFO " "%u Rx FIFO -- not initialized?\n", sc->ale_chip_rev, txf_len, rxf_len); error = ENXIO; goto fail; } device_printf(dev, "%u Tx FIFO, %u Rx FIFO\n", txf_len, rxf_len); /* Allocate IRQ resources. */ msixc = pci_msix_count(dev); msic = pci_msi_count(dev); if (bootverbose) { device_printf(dev, "MSIX count : %d\n", msixc); device_printf(dev, "MSI count : %d\n", msic); } /* Prefer MSIX over MSI. */ if (msix_disable == 0 || msi_disable == 0) { if (msix_disable == 0 && msixc == ALE_MSIX_MESSAGES && pci_alloc_msix(dev, &msixc) == 0) { if (msixc == ALE_MSIX_MESSAGES) { device_printf(dev, "Using %d MSIX messages.\n", msixc); sc->ale_flags |= ALE_FLAG_MSIX; sc->ale_irq_spec = ale_irq_spec_msix; } else pci_release_msi(dev); } if (msi_disable == 0 && (sc->ale_flags & ALE_FLAG_MSIX) == 0 && msic == ALE_MSI_MESSAGES && pci_alloc_msi(dev, &msic) == 0) { if (msic == ALE_MSI_MESSAGES) { device_printf(dev, "Using %d MSI messages.\n", msic); sc->ale_flags |= ALE_FLAG_MSI; sc->ale_irq_spec = ale_irq_spec_msi; } else pci_release_msi(dev); } } error = bus_alloc_resources(dev, sc->ale_irq_spec, sc->ale_irq); if (error != 0) { device_printf(dev, "cannot allocate IRQ resources.\n"); goto fail; } /* Get DMA parameters from PCIe device control register. */ if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) { sc->ale_flags |= ALE_FLAG_PCIE; burst = pci_read_config(dev, i + 0x08, 2); /* Max read request size. */ sc->ale_dma_rd_burst = ((burst >> 12) & 0x07) << DMA_CFG_RD_BURST_SHIFT; /* Max payload size. */ sc->ale_dma_wr_burst = ((burst >> 5) & 0x07) << DMA_CFG_WR_BURST_SHIFT; if (bootverbose) { device_printf(dev, "Read request size : %d bytes.\n", 128 << ((burst >> 12) & 0x07)); device_printf(dev, "TLP payload size : %d bytes.\n", 128 << ((burst >> 5) & 0x07)); } } else { sc->ale_dma_rd_burst = DMA_CFG_RD_BURST_128; sc->ale_dma_wr_burst = DMA_CFG_WR_BURST_128; } /* Create device sysctl node. */ ale_sysctl_node(sc); if ((error = ale_dma_alloc(sc)) != 0) goto fail; /* Load station address. */ ale_get_macaddr(sc); ifp = sc->ale_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "cannot allocate ifnet structure.\n"); error = ENXIO; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = ale_ioctl; ifp->if_start = ale_start; ifp->if_init = ale_init; ifp->if_snd.ifq_drv_maxlen = ALE_TX_RING_CNT - 1; IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); IFQ_SET_READY(&ifp->if_snd); ifp->if_capabilities = IFCAP_RXCSUM | IFCAP_TXCSUM | IFCAP_TSO4; ifp->if_hwassist = ALE_CSUM_FEATURES | CSUM_TSO; if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0) { sc->ale_flags |= ALE_FLAG_PMCAP; ifp->if_capabilities |= IFCAP_WOL_MAGIC | IFCAP_WOL_MCAST; } ifp->if_capenable = ifp->if_capabilities; /* Set up MII bus. */ error = mii_attach(dev, &sc->ale_miibus, ifp, ale_mediachange, ale_mediastatus, BMSR_DEFCAPMASK, sc->ale_phyaddr, MII_OFFSET_ANY, MIIF_DOPAUSE); if (error != 0) { device_printf(dev, "attaching PHYs failed\n"); goto fail; } ether_ifattach(ifp, sc->ale_eaddr); /* VLAN capability setup. */ ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO; ifp->if_capenable = ifp->if_capabilities; /* * Even though controllers supported by ale(3) have Rx checksum * offload bug the workaround for fragmented frames seemed to * work so far. However it seems Rx checksum offload does not * work under certain conditions. So disable Rx checksum offload * until I find more clue about it but allow users to override it. */ ifp->if_capenable &= ~IFCAP_RXCSUM; /* Tell the upper layer(s) we support long frames. */ ifp->if_hdrlen = sizeof(struct ether_vlan_header); /* Create local taskq. */ sc->ale_tq = taskqueue_create_fast("ale_taskq", M_WAITOK, taskqueue_thread_enqueue, &sc->ale_tq); if (sc->ale_tq == NULL) { device_printf(dev, "could not create taskqueue.\n"); ether_ifdetach(ifp); error = ENXIO; goto fail; } taskqueue_start_threads(&sc->ale_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->ale_dev)); if ((sc->ale_flags & ALE_FLAG_MSIX) != 0) msic = ALE_MSIX_MESSAGES; else if ((sc->ale_flags & ALE_FLAG_MSI) != 0) msic = ALE_MSI_MESSAGES; else msic = 1; for (i = 0; i < msic; i++) { error = bus_setup_intr(dev, sc->ale_irq[i], INTR_TYPE_NET | INTR_MPSAFE, ale_intr, NULL, sc, &sc->ale_intrhand[i]); if (error != 0) break; } if (error != 0) { device_printf(dev, "could not set up interrupt handler.\n"); taskqueue_free(sc->ale_tq); sc->ale_tq = NULL; ether_ifdetach(ifp); goto fail; } fail: if (error != 0) ale_detach(dev); return (error); } static int ale_detach(device_t dev) { struct ale_softc *sc; struct ifnet *ifp; int i, msic; sc = device_get_softc(dev); ifp = sc->ale_ifp; if (device_is_attached(dev)) { ether_ifdetach(ifp); ALE_LOCK(sc); ale_stop(sc); ALE_UNLOCK(sc); callout_drain(&sc->ale_tick_ch); taskqueue_drain(sc->ale_tq, &sc->ale_int_task); } if (sc->ale_tq != NULL) { taskqueue_drain(sc->ale_tq, &sc->ale_int_task); taskqueue_free(sc->ale_tq); sc->ale_tq = NULL; } if (sc->ale_miibus != NULL) { device_delete_child(dev, sc->ale_miibus); sc->ale_miibus = NULL; } bus_generic_detach(dev); ale_dma_free(sc); if (ifp != NULL) { if_free(ifp); sc->ale_ifp = NULL; } if ((sc->ale_flags & ALE_FLAG_MSIX) != 0) msic = ALE_MSIX_MESSAGES; else if ((sc->ale_flags & ALE_FLAG_MSI) != 0) msic = ALE_MSI_MESSAGES; else msic = 1; for (i = 0; i < msic; i++) { if (sc->ale_intrhand[i] != NULL) { bus_teardown_intr(dev, sc->ale_irq[i], sc->ale_intrhand[i]); sc->ale_intrhand[i] = NULL; } } bus_release_resources(dev, sc->ale_irq_spec, sc->ale_irq); if ((sc->ale_flags & (ALE_FLAG_MSI | ALE_FLAG_MSIX)) != 0) pci_release_msi(dev); bus_release_resources(dev, sc->ale_res_spec, sc->ale_res); mtx_destroy(&sc->ale_mtx); return (0); } #define ALE_SYSCTL_STAT_ADD32(c, h, n, p, d) \ SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) #if __FreeBSD_version >= 900030 #define ALE_SYSCTL_STAT_ADD64(c, h, n, p, d) \ SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d) #elif __FreeBSD_version > 800000 #define ALE_SYSCTL_STAT_ADD64(c, h, n, p, d) \ SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d) #else #define ALE_SYSCTL_STAT_ADD64(c, h, n, p, d) \ SYSCTL_ADD_ULONG(c, h, OID_AUTO, n, CTLFLAG_RD, p, d) #endif static void ale_sysctl_node(struct ale_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *child, *parent; struct sysctl_oid *tree; struct ale_hw_stats *stats; int error; stats = &sc->ale_stats; ctx = device_get_sysctl_ctx(sc->ale_dev); child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->ale_dev)); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_rx_mod", CTLTYPE_INT | CTLFLAG_RW, &sc->ale_int_rx_mod, 0, sysctl_hw_ale_int_mod, "I", "ale Rx interrupt moderation"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_tx_mod", CTLTYPE_INT | CTLFLAG_RW, &sc->ale_int_tx_mod, 0, sysctl_hw_ale_int_mod, "I", "ale Tx interrupt moderation"); /* Pull in device tunables. */ sc->ale_int_rx_mod = ALE_IM_RX_TIMER_DEFAULT; error = resource_int_value(device_get_name(sc->ale_dev), device_get_unit(sc->ale_dev), "int_rx_mod", &sc->ale_int_rx_mod); if (error == 0) { if (sc->ale_int_rx_mod < ALE_IM_TIMER_MIN || sc->ale_int_rx_mod > ALE_IM_TIMER_MAX) { device_printf(sc->ale_dev, "int_rx_mod value out of " "range; using default: %d\n", ALE_IM_RX_TIMER_DEFAULT); sc->ale_int_rx_mod = ALE_IM_RX_TIMER_DEFAULT; } } sc->ale_int_tx_mod = ALE_IM_TX_TIMER_DEFAULT; error = resource_int_value(device_get_name(sc->ale_dev), device_get_unit(sc->ale_dev), "int_tx_mod", &sc->ale_int_tx_mod); if (error == 0) { if (sc->ale_int_tx_mod < ALE_IM_TIMER_MIN || sc->ale_int_tx_mod > ALE_IM_TIMER_MAX) { device_printf(sc->ale_dev, "int_tx_mod value out of " "range; using default: %d\n", ALE_IM_TX_TIMER_DEFAULT); sc->ale_int_tx_mod = ALE_IM_TX_TIMER_DEFAULT; } } SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW, &sc->ale_process_limit, 0, sysctl_hw_ale_proc_limit, "I", "max number of Rx events to process"); /* Pull in device tunables. */ sc->ale_process_limit = ALE_PROC_DEFAULT; error = resource_int_value(device_get_name(sc->ale_dev), device_get_unit(sc->ale_dev), "process_limit", &sc->ale_process_limit); if (error == 0) { if (sc->ale_process_limit < ALE_PROC_MIN || sc->ale_process_limit > ALE_PROC_MAX) { device_printf(sc->ale_dev, "process_limit value out of range; " "using default: %d\n", ALE_PROC_DEFAULT); sc->ale_process_limit = ALE_PROC_DEFAULT; } } /* Misc statistics. */ ALE_SYSCTL_STAT_ADD32(ctx, child, "reset_brk_seq", &stats->reset_brk_seq, "Controller resets due to broken Rx sequnce number"); tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, NULL, "ATE statistics"); parent = SYSCTL_CHILDREN(tree); /* Rx statistics. */ tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, NULL, "Rx MAC statistics"); child = SYSCTL_CHILDREN(tree); ALE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", &stats->rx_frames, "Good frames"); ALE_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames", &stats->rx_bcast_frames, "Good broadcast frames"); ALE_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames", &stats->rx_mcast_frames, "Good multicast frames"); ALE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", &stats->rx_pause_frames, "Pause control frames"); ALE_SYSCTL_STAT_ADD32(ctx, child, "control_frames", &stats->rx_control_frames, "Control frames"); ALE_SYSCTL_STAT_ADD32(ctx, child, "crc_errs", &stats->rx_crcerrs, "CRC errors"); ALE_SYSCTL_STAT_ADD32(ctx, child, "len_errs", &stats->rx_lenerrs, "Frames with length mismatched"); ALE_SYSCTL_STAT_ADD64(ctx, child, "good_octets", &stats->rx_bytes, "Good octets"); ALE_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets", &stats->rx_bcast_bytes, "Good broadcast octets"); ALE_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets", &stats->rx_mcast_bytes, "Good multicast octets"); ALE_SYSCTL_STAT_ADD32(ctx, child, "runts", &stats->rx_runts, "Too short frames"); ALE_SYSCTL_STAT_ADD32(ctx, child, "fragments", &stats->rx_fragments, "Fragmented frames"); ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_64", &stats->rx_pkts_64, "64 bytes frames"); ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127", &stats->rx_pkts_65_127, "65 to 127 bytes frames"); ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255", &stats->rx_pkts_128_255, "128 to 255 bytes frames"); ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511", &stats->rx_pkts_256_511, "256 to 511 bytes frames"); ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023", &stats->rx_pkts_512_1023, "512 to 1023 bytes frames"); ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518", &stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames"); ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max", &stats->rx_pkts_1519_max, "1519 to max frames"); ALE_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs", &stats->rx_pkts_truncated, "Truncated frames due to MTU size"); ALE_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows", &stats->rx_fifo_oflows, "FIFO overflows"); ALE_SYSCTL_STAT_ADD32(ctx, child, "rrs_errs", &stats->rx_rrs_errs, "Return status write-back errors"); ALE_SYSCTL_STAT_ADD32(ctx, child, "align_errs", &stats->rx_alignerrs, "Alignment errors"); ALE_SYSCTL_STAT_ADD32(ctx, child, "filtered", &stats->rx_pkts_filtered, "Frames dropped due to address filtering"); /* Tx statistics. */ tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, NULL, "Tx MAC statistics"); child = SYSCTL_CHILDREN(tree); ALE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", &stats->tx_frames, "Good frames"); ALE_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames", &stats->tx_bcast_frames, "Good broadcast frames"); ALE_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames", &stats->tx_mcast_frames, "Good multicast frames"); ALE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", &stats->tx_pause_frames, "Pause control frames"); ALE_SYSCTL_STAT_ADD32(ctx, child, "control_frames", &stats->tx_control_frames, "Control frames"); ALE_SYSCTL_STAT_ADD32(ctx, child, "excess_defers", &stats->tx_excess_defer, "Frames with excessive derferrals"); ALE_SYSCTL_STAT_ADD32(ctx, child, "defers", &stats->tx_excess_defer, "Frames with derferrals"); ALE_SYSCTL_STAT_ADD64(ctx, child, "good_octets", &stats->tx_bytes, "Good octets"); ALE_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets", &stats->tx_bcast_bytes, "Good broadcast octets"); ALE_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets", &stats->tx_mcast_bytes, "Good multicast octets"); ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_64", &stats->tx_pkts_64, "64 bytes frames"); ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127", &stats->tx_pkts_65_127, "65 to 127 bytes frames"); ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255", &stats->tx_pkts_128_255, "128 to 255 bytes frames"); ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511", &stats->tx_pkts_256_511, "256 to 511 bytes frames"); ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023", &stats->tx_pkts_512_1023, "512 to 1023 bytes frames"); ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518", &stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames"); ALE_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max", &stats->tx_pkts_1519_max, "1519 to max frames"); ALE_SYSCTL_STAT_ADD32(ctx, child, "single_colls", &stats->tx_single_colls, "Single collisions"); ALE_SYSCTL_STAT_ADD32(ctx, child, "multi_colls", &stats->tx_multi_colls, "Multiple collisions"); ALE_SYSCTL_STAT_ADD32(ctx, child, "late_colls", &stats->tx_late_colls, "Late collisions"); ALE_SYSCTL_STAT_ADD32(ctx, child, "excess_colls", &stats->tx_excess_colls, "Excessive collisions"); ALE_SYSCTL_STAT_ADD32(ctx, child, "underruns", &stats->tx_underrun, "FIFO underruns"); ALE_SYSCTL_STAT_ADD32(ctx, child, "desc_underruns", &stats->tx_desc_underrun, "Descriptor write-back errors"); ALE_SYSCTL_STAT_ADD32(ctx, child, "len_errs", &stats->tx_lenerrs, "Frames with length mismatched"); ALE_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs", &stats->tx_pkts_truncated, "Truncated frames due to MTU size"); } #undef ALE_SYSCTL_STAT_ADD32 #undef ALE_SYSCTL_STAT_ADD64 struct ale_dmamap_arg { bus_addr_t ale_busaddr; }; static void ale_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct ale_dmamap_arg *ctx; if (error != 0) return; KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); ctx = (struct ale_dmamap_arg *)arg; ctx->ale_busaddr = segs[0].ds_addr; } /* * Tx descriptors/RXF0/CMB DMA blocks share ALE_DESC_ADDR_HI register * which specifies high address region of DMA blocks. Therefore these * blocks should have the same high address of given 4GB address * space(i.e. crossing 4GB boundary is not allowed). */ static int ale_check_boundary(struct ale_softc *sc) { bus_addr_t rx_cmb_end[ALE_RX_PAGES], tx_cmb_end; bus_addr_t rx_page_end[ALE_RX_PAGES], tx_ring_end; rx_page_end[0] = sc->ale_cdata.ale_rx_page[0].page_paddr + sc->ale_pagesize; rx_page_end[1] = sc->ale_cdata.ale_rx_page[1].page_paddr + sc->ale_pagesize; tx_ring_end = sc->ale_cdata.ale_tx_ring_paddr + ALE_TX_RING_SZ; tx_cmb_end = sc->ale_cdata.ale_tx_cmb_paddr + ALE_TX_CMB_SZ; rx_cmb_end[0] = sc->ale_cdata.ale_rx_page[0].cmb_paddr + ALE_RX_CMB_SZ; rx_cmb_end[1] = sc->ale_cdata.ale_rx_page[1].cmb_paddr + ALE_RX_CMB_SZ; if ((ALE_ADDR_HI(tx_ring_end) != ALE_ADDR_HI(sc->ale_cdata.ale_tx_ring_paddr)) || (ALE_ADDR_HI(rx_page_end[0]) != ALE_ADDR_HI(sc->ale_cdata.ale_rx_page[0].page_paddr)) || (ALE_ADDR_HI(rx_page_end[1]) != ALE_ADDR_HI(sc->ale_cdata.ale_rx_page[1].page_paddr)) || (ALE_ADDR_HI(tx_cmb_end) != ALE_ADDR_HI(sc->ale_cdata.ale_tx_cmb_paddr)) || (ALE_ADDR_HI(rx_cmb_end[0]) != ALE_ADDR_HI(sc->ale_cdata.ale_rx_page[0].cmb_paddr)) || (ALE_ADDR_HI(rx_cmb_end[1]) != ALE_ADDR_HI(sc->ale_cdata.ale_rx_page[1].cmb_paddr))) return (EFBIG); if ((ALE_ADDR_HI(tx_ring_end) != ALE_ADDR_HI(rx_page_end[0])) || (ALE_ADDR_HI(tx_ring_end) != ALE_ADDR_HI(rx_page_end[1])) || (ALE_ADDR_HI(tx_ring_end) != ALE_ADDR_HI(rx_cmb_end[0])) || (ALE_ADDR_HI(tx_ring_end) != ALE_ADDR_HI(rx_cmb_end[1])) || (ALE_ADDR_HI(tx_ring_end) != ALE_ADDR_HI(tx_cmb_end))) return (EFBIG); return (0); } static int ale_dma_alloc(struct ale_softc *sc) { struct ale_txdesc *txd; bus_addr_t lowaddr; struct ale_dmamap_arg ctx; int error, guard_size, i; if ((sc->ale_flags & ALE_FLAG_JUMBO) != 0) guard_size = ALE_JUMBO_FRAMELEN; else guard_size = ALE_MAX_FRAMELEN; sc->ale_pagesize = roundup(guard_size + ALE_RX_PAGE_SZ, ALE_RX_PAGE_ALIGN); lowaddr = BUS_SPACE_MAXADDR; again: /* Create parent DMA tag. */ error = bus_dma_tag_create( bus_get_dma_tag(sc->ale_dev), /* parent */ 1, 0, /* alignment, boundary */ lowaddr, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 0, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->ale_cdata.ale_parent_tag); if (error != 0) { device_printf(sc->ale_dev, "could not create parent DMA tag.\n"); goto fail; } /* Create DMA tag for Tx descriptor ring. */ error = bus_dma_tag_create( sc->ale_cdata.ale_parent_tag, /* parent */ ALE_TX_RING_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ ALE_TX_RING_SZ, /* maxsize */ 1, /* nsegments */ ALE_TX_RING_SZ, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->ale_cdata.ale_tx_ring_tag); if (error != 0) { device_printf(sc->ale_dev, "could not create Tx ring DMA tag.\n"); goto fail; } /* Create DMA tag for Rx pages. */ for (i = 0; i < ALE_RX_PAGES; i++) { error = bus_dma_tag_create( sc->ale_cdata.ale_parent_tag, /* parent */ ALE_RX_PAGE_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sc->ale_pagesize, /* maxsize */ 1, /* nsegments */ sc->ale_pagesize, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->ale_cdata.ale_rx_page[i].page_tag); if (error != 0) { device_printf(sc->ale_dev, "could not create Rx page %d DMA tag.\n", i); goto fail; } } /* Create DMA tag for Tx coalescing message block. */ error = bus_dma_tag_create( sc->ale_cdata.ale_parent_tag, /* parent */ ALE_CMB_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ ALE_TX_CMB_SZ, /* maxsize */ 1, /* nsegments */ ALE_TX_CMB_SZ, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->ale_cdata.ale_tx_cmb_tag); if (error != 0) { device_printf(sc->ale_dev, "could not create Tx CMB DMA tag.\n"); goto fail; } /* Create DMA tag for Rx coalescing message block. */ for (i = 0; i < ALE_RX_PAGES; i++) { error = bus_dma_tag_create( sc->ale_cdata.ale_parent_tag, /* parent */ ALE_CMB_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ ALE_RX_CMB_SZ, /* maxsize */ 1, /* nsegments */ ALE_RX_CMB_SZ, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->ale_cdata.ale_rx_page[i].cmb_tag); if (error != 0) { device_printf(sc->ale_dev, "could not create Rx page %d CMB DMA tag.\n", i); goto fail; } } /* Allocate DMA'able memory and load the DMA map for Tx ring. */ error = bus_dmamem_alloc(sc->ale_cdata.ale_tx_ring_tag, (void **)&sc->ale_cdata.ale_tx_ring, BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->ale_cdata.ale_tx_ring_map); if (error != 0) { device_printf(sc->ale_dev, "could not allocate DMA'able memory for Tx ring.\n"); goto fail; } ctx.ale_busaddr = 0; error = bus_dmamap_load(sc->ale_cdata.ale_tx_ring_tag, sc->ale_cdata.ale_tx_ring_map, sc->ale_cdata.ale_tx_ring, ALE_TX_RING_SZ, ale_dmamap_cb, &ctx, 0); if (error != 0 || ctx.ale_busaddr == 0) { device_printf(sc->ale_dev, "could not load DMA'able memory for Tx ring.\n"); goto fail; } sc->ale_cdata.ale_tx_ring_paddr = ctx.ale_busaddr; /* Rx pages. */ for (i = 0; i < ALE_RX_PAGES; i++) { error = bus_dmamem_alloc(sc->ale_cdata.ale_rx_page[i].page_tag, (void **)&sc->ale_cdata.ale_rx_page[i].page_addr, BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->ale_cdata.ale_rx_page[i].page_map); if (error != 0) { device_printf(sc->ale_dev, "could not allocate DMA'able memory for " "Rx page %d.\n", i); goto fail; } ctx.ale_busaddr = 0; error = bus_dmamap_load(sc->ale_cdata.ale_rx_page[i].page_tag, sc->ale_cdata.ale_rx_page[i].page_map, sc->ale_cdata.ale_rx_page[i].page_addr, sc->ale_pagesize, ale_dmamap_cb, &ctx, 0); if (error != 0 || ctx.ale_busaddr == 0) { device_printf(sc->ale_dev, "could not load DMA'able memory for " "Rx page %d.\n", i); goto fail; } sc->ale_cdata.ale_rx_page[i].page_paddr = ctx.ale_busaddr; } /* Tx CMB. */ error = bus_dmamem_alloc(sc->ale_cdata.ale_tx_cmb_tag, (void **)&sc->ale_cdata.ale_tx_cmb, BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->ale_cdata.ale_tx_cmb_map); if (error != 0) { device_printf(sc->ale_dev, "could not allocate DMA'able memory for Tx CMB.\n"); goto fail; } ctx.ale_busaddr = 0; error = bus_dmamap_load(sc->ale_cdata.ale_tx_cmb_tag, sc->ale_cdata.ale_tx_cmb_map, sc->ale_cdata.ale_tx_cmb, ALE_TX_CMB_SZ, ale_dmamap_cb, &ctx, 0); if (error != 0 || ctx.ale_busaddr == 0) { device_printf(sc->ale_dev, "could not load DMA'able memory for Tx CMB.\n"); goto fail; } sc->ale_cdata.ale_tx_cmb_paddr = ctx.ale_busaddr; /* Rx CMB. */ for (i = 0; i < ALE_RX_PAGES; i++) { error = bus_dmamem_alloc(sc->ale_cdata.ale_rx_page[i].cmb_tag, (void **)&sc->ale_cdata.ale_rx_page[i].cmb_addr, BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sc->ale_cdata.ale_rx_page[i].cmb_map); if (error != 0) { device_printf(sc->ale_dev, "could not allocate " "DMA'able memory for Rx page %d CMB.\n", i); goto fail; } ctx.ale_busaddr = 0; error = bus_dmamap_load(sc->ale_cdata.ale_rx_page[i].cmb_tag, sc->ale_cdata.ale_rx_page[i].cmb_map, sc->ale_cdata.ale_rx_page[i].cmb_addr, ALE_RX_CMB_SZ, ale_dmamap_cb, &ctx, 0); if (error != 0 || ctx.ale_busaddr == 0) { device_printf(sc->ale_dev, "could not load DMA'able " "memory for Rx page %d CMB.\n", i); goto fail; } sc->ale_cdata.ale_rx_page[i].cmb_paddr = ctx.ale_busaddr; } /* * Tx descriptors/RXF0/CMB DMA blocks share the same * high address region of 64bit DMA address space. */ if (lowaddr != BUS_SPACE_MAXADDR_32BIT && (error = ale_check_boundary(sc)) != 0) { device_printf(sc->ale_dev, "4GB boundary crossed, " "switching to 32bit DMA addressing mode.\n"); ale_dma_free(sc); /* * Limit max allowable DMA address space to 32bit * and try again. */ lowaddr = BUS_SPACE_MAXADDR_32BIT; goto again; } /* * Create Tx buffer parent tag. * AR81xx allows 64bit DMA addressing of Tx buffers so it * needs separate parent DMA tag as parent DMA address space * could be restricted to be within 32bit address space by * 4GB boundary crossing. */ error = bus_dma_tag_create( bus_get_dma_tag(sc->ale_dev), /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 0, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->ale_cdata.ale_buffer_tag); if (error != 0) { device_printf(sc->ale_dev, "could not create parent buffer DMA tag.\n"); goto fail; } /* Create DMA tag for Tx buffers. */ error = bus_dma_tag_create( sc->ale_cdata.ale_buffer_tag, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ ALE_TSO_MAXSIZE, /* maxsize */ ALE_MAXTXSEGS, /* nsegments */ ALE_TSO_MAXSEGSIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->ale_cdata.ale_tx_tag); if (error != 0) { device_printf(sc->ale_dev, "could not create Tx DMA tag.\n"); goto fail; } /* Create DMA maps for Tx buffers. */ for (i = 0; i < ALE_TX_RING_CNT; i++) { txd = &sc->ale_cdata.ale_txdesc[i]; txd->tx_m = NULL; txd->tx_dmamap = NULL; error = bus_dmamap_create(sc->ale_cdata.ale_tx_tag, 0, &txd->tx_dmamap); if (error != 0) { device_printf(sc->ale_dev, "could not create Tx dmamap.\n"); goto fail; } } fail: return (error); } static void ale_dma_free(struct ale_softc *sc) { struct ale_txdesc *txd; int i; /* Tx buffers. */ if (sc->ale_cdata.ale_tx_tag != NULL) { for (i = 0; i < ALE_TX_RING_CNT; i++) { txd = &sc->ale_cdata.ale_txdesc[i]; if (txd->tx_dmamap != NULL) { bus_dmamap_destroy(sc->ale_cdata.ale_tx_tag, txd->tx_dmamap); txd->tx_dmamap = NULL; } } bus_dma_tag_destroy(sc->ale_cdata.ale_tx_tag); sc->ale_cdata.ale_tx_tag = NULL; } /* Tx descriptor ring. */ if (sc->ale_cdata.ale_tx_ring_tag != NULL) { if (sc->ale_cdata.ale_tx_ring_paddr != 0) bus_dmamap_unload(sc->ale_cdata.ale_tx_ring_tag, sc->ale_cdata.ale_tx_ring_map); if (sc->ale_cdata.ale_tx_ring != NULL) bus_dmamem_free(sc->ale_cdata.ale_tx_ring_tag, sc->ale_cdata.ale_tx_ring, sc->ale_cdata.ale_tx_ring_map); sc->ale_cdata.ale_tx_ring_paddr = 0; sc->ale_cdata.ale_tx_ring = NULL; bus_dma_tag_destroy(sc->ale_cdata.ale_tx_ring_tag); sc->ale_cdata.ale_tx_ring_tag = NULL; } /* Rx page block. */ for (i = 0; i < ALE_RX_PAGES; i++) { if (sc->ale_cdata.ale_rx_page[i].page_tag != NULL) { if (sc->ale_cdata.ale_rx_page[i].page_paddr != 0) bus_dmamap_unload( sc->ale_cdata.ale_rx_page[i].page_tag, sc->ale_cdata.ale_rx_page[i].page_map); if (sc->ale_cdata.ale_rx_page[i].page_addr != NULL) bus_dmamem_free( sc->ale_cdata.ale_rx_page[i].page_tag, sc->ale_cdata.ale_rx_page[i].page_addr, sc->ale_cdata.ale_rx_page[i].page_map); sc->ale_cdata.ale_rx_page[i].page_paddr = 0; sc->ale_cdata.ale_rx_page[i].page_addr = NULL; bus_dma_tag_destroy( sc->ale_cdata.ale_rx_page[i].page_tag); sc->ale_cdata.ale_rx_page[i].page_tag = NULL; } } /* Rx CMB. */ for (i = 0; i < ALE_RX_PAGES; i++) { if (sc->ale_cdata.ale_rx_page[i].cmb_tag != NULL) { if (sc->ale_cdata.ale_rx_page[i].cmb_paddr != 0) bus_dmamap_unload( sc->ale_cdata.ale_rx_page[i].cmb_tag, sc->ale_cdata.ale_rx_page[i].cmb_map); if (sc->ale_cdata.ale_rx_page[i].cmb_addr != NULL) bus_dmamem_free( sc->ale_cdata.ale_rx_page[i].cmb_tag, sc->ale_cdata.ale_rx_page[i].cmb_addr, sc->ale_cdata.ale_rx_page[i].cmb_map); sc->ale_cdata.ale_rx_page[i].cmb_paddr = 0; sc->ale_cdata.ale_rx_page[i].cmb_addr = NULL; bus_dma_tag_destroy( sc->ale_cdata.ale_rx_page[i].cmb_tag); sc->ale_cdata.ale_rx_page[i].cmb_tag = NULL; } } /* Tx CMB. */ if (sc->ale_cdata.ale_tx_cmb_tag != NULL) { if (sc->ale_cdata.ale_tx_cmb_paddr != 0) bus_dmamap_unload(sc->ale_cdata.ale_tx_cmb_tag, sc->ale_cdata.ale_tx_cmb_map); if (sc->ale_cdata.ale_tx_cmb != NULL) bus_dmamem_free(sc->ale_cdata.ale_tx_cmb_tag, sc->ale_cdata.ale_tx_cmb, sc->ale_cdata.ale_tx_cmb_map); sc->ale_cdata.ale_tx_cmb_paddr = 0; sc->ale_cdata.ale_tx_cmb = NULL; bus_dma_tag_destroy(sc->ale_cdata.ale_tx_cmb_tag); sc->ale_cdata.ale_tx_cmb_tag = NULL; } if (sc->ale_cdata.ale_buffer_tag != NULL) { bus_dma_tag_destroy(sc->ale_cdata.ale_buffer_tag); sc->ale_cdata.ale_buffer_tag = NULL; } if (sc->ale_cdata.ale_parent_tag != NULL) { bus_dma_tag_destroy(sc->ale_cdata.ale_parent_tag); sc->ale_cdata.ale_parent_tag = NULL; } } static int ale_shutdown(device_t dev) { return (ale_suspend(dev)); } /* * Note, this driver resets the link speed to 10/100Mbps by * restarting auto-negotiation in suspend/shutdown phase but we * don't know whether that auto-negotiation would succeed or not * as driver has no control after powering off/suspend operation. * If the renegotiation fail WOL may not work. Running at 1Gbps * will draw more power than 375mA at 3.3V which is specified in * PCI specification and that would result in complete * shutdowning power to ethernet controller. * * TODO * Save current negotiated media speed/duplex/flow-control to * softc and restore the same link again after resuming. PHY * handling such as power down/resetting to 100Mbps may be better * handled in suspend method in phy driver. */ static void ale_setlinkspeed(struct ale_softc *sc) { struct mii_data *mii; int aneg, i; mii = device_get_softc(sc->ale_miibus); mii_pollstat(mii); aneg = 0; if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch IFM_SUBTYPE(mii->mii_media_active) { case IFM_10_T: case IFM_100_TX: return; case IFM_1000_T: aneg++; break; default: break; } } ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, MII_100T2CR, 0); ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); ale_miibus_writereg(sc->ale_dev, sc->ale_phyaddr, MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG); DELAY(1000); if (aneg != 0) { /* * Poll link state until ale(4) get a 10/100Mbps link. */ for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { mii_pollstat(mii); if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE( mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: ale_mac_config(sc); return; default: break; } } ALE_UNLOCK(sc); pause("alelnk", hz); ALE_LOCK(sc); } if (i == MII_ANEGTICKS_GIGE) device_printf(sc->ale_dev, "establishing a link failed, WOL may not work!"); } /* * No link, force MAC to have 100Mbps, full-duplex link. * This is the last resort and may/may not work. */ mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; ale_mac_config(sc); } static void ale_setwol(struct ale_softc *sc) { struct ifnet *ifp; uint32_t reg, pmcs; uint16_t pmstat; int pmc; ALE_LOCK_ASSERT(sc); if (pci_find_cap(sc->ale_dev, PCIY_PMG, &pmc) != 0) { /* Disable WOL. */ CSR_WRITE_4(sc, ALE_WOL_CFG, 0); reg = CSR_READ_4(sc, ALE_PCIE_PHYMISC); reg |= PCIE_PHYMISC_FORCE_RCV_DET; CSR_WRITE_4(sc, ALE_PCIE_PHYMISC, reg); /* Force PHY power down. */ CSR_WRITE_2(sc, ALE_GPHY_CTRL, GPHY_CTRL_EXT_RESET | GPHY_CTRL_HIB_EN | GPHY_CTRL_HIB_PULSE | GPHY_CTRL_PHY_PLL_ON | GPHY_CTRL_SEL_ANA_RESET | GPHY_CTRL_PHY_IDDQ | GPHY_CTRL_PCLK_SEL_DIS | GPHY_CTRL_PWDOWN_HW); return; } ifp = sc->ale_ifp; if ((ifp->if_capenable & IFCAP_WOL) != 0) { if ((sc->ale_flags & ALE_FLAG_FASTETHER) == 0) ale_setlinkspeed(sc); } pmcs = 0; if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) pmcs |= WOL_CFG_MAGIC | WOL_CFG_MAGIC_ENB; CSR_WRITE_4(sc, ALE_WOL_CFG, pmcs); reg = CSR_READ_4(sc, ALE_MAC_CFG); reg &= ~(MAC_CFG_DBG | MAC_CFG_PROMISC | MAC_CFG_ALLMULTI | MAC_CFG_BCAST); if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) reg |= MAC_CFG_ALLMULTI | MAC_CFG_BCAST; if ((ifp->if_capenable & IFCAP_WOL) != 0) reg |= MAC_CFG_RX_ENB; CSR_WRITE_4(sc, ALE_MAC_CFG, reg); if ((ifp->if_capenable & IFCAP_WOL) == 0) { /* WOL disabled, PHY power down. */ reg = CSR_READ_4(sc, ALE_PCIE_PHYMISC); reg |= PCIE_PHYMISC_FORCE_RCV_DET; CSR_WRITE_4(sc, ALE_PCIE_PHYMISC, reg); CSR_WRITE_2(sc, ALE_GPHY_CTRL, GPHY_CTRL_EXT_RESET | GPHY_CTRL_HIB_EN | GPHY_CTRL_HIB_PULSE | GPHY_CTRL_SEL_ANA_RESET | GPHY_CTRL_PHY_IDDQ | GPHY_CTRL_PCLK_SEL_DIS | GPHY_CTRL_PWDOWN_HW); } /* Request PME. */ pmstat = pci_read_config(sc->ale_dev, pmc + PCIR_POWER_STATUS, 2); pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); if ((ifp->if_capenable & IFCAP_WOL) != 0) pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; pci_write_config(sc->ale_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); } static int ale_suspend(device_t dev) { struct ale_softc *sc; sc = device_get_softc(dev); ALE_LOCK(sc); ale_stop(sc); ale_setwol(sc); ALE_UNLOCK(sc); return (0); } static int ale_resume(device_t dev) { struct ale_softc *sc; struct ifnet *ifp; int pmc; uint16_t pmstat; sc = device_get_softc(dev); ALE_LOCK(sc); if (pci_find_cap(sc->ale_dev, PCIY_PMG, &pmc) == 0) { /* Disable PME and clear PME status. */ pmstat = pci_read_config(sc->ale_dev, pmc + PCIR_POWER_STATUS, 2); if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) { pmstat &= ~PCIM_PSTAT_PMEENABLE; pci_write_config(sc->ale_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); } } /* Reset PHY. */ ale_phy_reset(sc); ifp = sc->ale_ifp; if ((ifp->if_flags & IFF_UP) != 0) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; ale_init_locked(sc); } ALE_UNLOCK(sc); return (0); } static int ale_encap(struct ale_softc *sc, struct mbuf **m_head) { struct ale_txdesc *txd, *txd_last; struct tx_desc *desc; struct mbuf *m; struct ip *ip; struct tcphdr *tcp; bus_dma_segment_t txsegs[ALE_MAXTXSEGS]; bus_dmamap_t map; uint32_t cflags, hdrlen, ip_off, poff, vtag; int error, i, nsegs, prod, si; ALE_LOCK_ASSERT(sc); M_ASSERTPKTHDR((*m_head)); m = *m_head; ip = NULL; tcp = NULL; cflags = vtag = 0; ip_off = poff = 0; if ((m->m_pkthdr.csum_flags & (ALE_CSUM_FEATURES | CSUM_TSO)) != 0) { /* * AR81xx requires offset of TCP/UDP payload in its Tx * descriptor to perform hardware Tx checksum offload. * Additionally, TSO requires IP/TCP header size and * modification of IP/TCP header in order to make TSO * engine work. This kind of operation takes many CPU * cycles on FreeBSD so fast host CPU is required to * get smooth TSO performance. */ struct ether_header *eh; if (M_WRITABLE(m) == 0) { /* Get a writable copy. */ m = m_dup(*m_head, M_NOWAIT); /* Release original mbufs. */ m_freem(*m_head); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } *m_head = m; } /* * Buggy-controller requires 4 byte aligned Tx buffer * to make custom checksum offload work. */ if ((sc->ale_flags & ALE_FLAG_TXCSUM_BUG) != 0 && (m->m_pkthdr.csum_flags & ALE_CSUM_FEATURES) != 0 && (mtod(m, intptr_t) & 3) != 0) { m = m_defrag(*m_head, M_NOWAIT); if (m == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } *m_head = m; } ip_off = sizeof(struct ether_header); m = m_pullup(m, ip_off); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } eh = mtod(m, struct ether_header *); /* * Check if hardware VLAN insertion is off. * Additional check for LLC/SNAP frame? */ if (eh->ether_type == htons(ETHERTYPE_VLAN)) { ip_off = sizeof(struct ether_vlan_header); m = m_pullup(m, ip_off); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } } m = m_pullup(m, ip_off + sizeof(struct ip)); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } ip = (struct ip *)(mtod(m, char *) + ip_off); poff = ip_off + (ip->ip_hl << 2); if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { /* * XXX * AR81xx requires the first descriptor should * not include any TCP playload for TSO case. * (i.e. ethernet header + IP + TCP header only) * m_pullup(9) above will ensure this too. * However it's not correct if the first mbuf * of the chain does not use cluster. */ m = m_pullup(m, poff + sizeof(struct tcphdr)); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } ip = (struct ip *)(mtod(m, char *) + ip_off); tcp = (struct tcphdr *)(mtod(m, char *) + poff); m = m_pullup(m, poff + (tcp->th_off << 2)); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } /* * AR81xx requires IP/TCP header size and offset as * well as TCP pseudo checksum which complicates * TSO configuration. I guess this comes from the * adherence to Microsoft NDIS Large Send * specification which requires insertion of * pseudo checksum by upper stack. The pseudo * checksum that NDIS refers to doesn't include * TCP payload length so ale(4) should recompute * the pseudo checksum here. Hopefully this wouldn't * be much burden on modern CPUs. * Reset IP checksum and recompute TCP pseudo * checksum as NDIS specification said. */ ip->ip_sum = 0; tcp->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htons(IPPROTO_TCP)); } *m_head = m; } si = prod = sc->ale_cdata.ale_tx_prod; txd = &sc->ale_cdata.ale_txdesc[prod]; txd_last = txd; map = txd->tx_dmamap; error = bus_dmamap_load_mbuf_sg(sc->ale_cdata.ale_tx_tag, map, *m_head, txsegs, &nsegs, 0); if (error == EFBIG) { m = m_collapse(*m_head, M_NOWAIT, ALE_MAXTXSEGS); if (m == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOMEM); } *m_head = m; error = bus_dmamap_load_mbuf_sg(sc->ale_cdata.ale_tx_tag, map, *m_head, txsegs, &nsegs, 0); if (error != 0) { m_freem(*m_head); *m_head = NULL; return (error); } } else if (error != 0) return (error); if (nsegs == 0) { m_freem(*m_head); *m_head = NULL; return (EIO); } /* Check descriptor overrun. */ if (sc->ale_cdata.ale_tx_cnt + nsegs >= ALE_TX_RING_CNT - 3) { bus_dmamap_unload(sc->ale_cdata.ale_tx_tag, map); return (ENOBUFS); } bus_dmamap_sync(sc->ale_cdata.ale_tx_tag, map, BUS_DMASYNC_PREWRITE); m = *m_head; if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { /* Request TSO and set MSS. */ cflags |= ALE_TD_TSO; cflags |= ((uint32_t)m->m_pkthdr.tso_segsz << ALE_TD_MSS_SHIFT); /* Set IP/TCP header size. */ cflags |= ip->ip_hl << ALE_TD_IPHDR_LEN_SHIFT; cflags |= tcp->th_off << ALE_TD_TCPHDR_LEN_SHIFT; } else if ((m->m_pkthdr.csum_flags & ALE_CSUM_FEATURES) != 0) { /* * AR81xx supports Tx custom checksum offload feature * that offloads single 16bit checksum computation. * So you can choose one among IP, TCP and UDP. * Normally driver sets checksum start/insertion * position from the information of TCP/UDP frame as * TCP/UDP checksum takes more time than that of IP. * However it seems that custom checksum offload * requires 4 bytes aligned Tx buffers due to hardware * bug. * AR81xx also supports explicit Tx checksum computation * if it is told that the size of IP header and TCP * header(for UDP, the header size does not matter * because it's fixed length). However with this scheme * TSO does not work so you have to choose one either * TSO or explicit Tx checksum offload. I chosen TSO * plus custom checksum offload with work-around which * will cover most common usage for this consumer * ethernet controller. The work-around takes a lot of * CPU cycles if Tx buffer is not aligned on 4 bytes * boundary, though. */ cflags |= ALE_TD_CXSUM; /* Set checksum start offset. */ cflags |= (poff << ALE_TD_CSUM_PLOADOFFSET_SHIFT); /* Set checksum insertion position of TCP/UDP. */ cflags |= ((poff + m->m_pkthdr.csum_data) << ALE_TD_CSUM_XSUMOFFSET_SHIFT); } /* Configure VLAN hardware tag insertion. */ if ((m->m_flags & M_VLANTAG) != 0) { vtag = ALE_TX_VLAN_TAG(m->m_pkthdr.ether_vtag); vtag = ((vtag << ALE_TD_VLAN_SHIFT) & ALE_TD_VLAN_MASK); cflags |= ALE_TD_INSERT_VLAN_TAG; } i = 0; if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { /* * Make sure the first fragment contains * only ethernet and IP/TCP header with options. */ hdrlen = poff + (tcp->th_off << 2); desc = &sc->ale_cdata.ale_tx_ring[prod]; desc->addr = htole64(txsegs[i].ds_addr); desc->len = htole32(ALE_TX_BYTES(hdrlen) | vtag); desc->flags = htole32(cflags); sc->ale_cdata.ale_tx_cnt++; ALE_DESC_INC(prod, ALE_TX_RING_CNT); if (m->m_len - hdrlen > 0) { /* Handle remaining payload of the first fragment. */ desc = &sc->ale_cdata.ale_tx_ring[prod]; desc->addr = htole64(txsegs[i].ds_addr + hdrlen); desc->len = htole32(ALE_TX_BYTES(m->m_len - hdrlen) | vtag); desc->flags = htole32(cflags); sc->ale_cdata.ale_tx_cnt++; ALE_DESC_INC(prod, ALE_TX_RING_CNT); } i = 1; } for (; i < nsegs; i++) { desc = &sc->ale_cdata.ale_tx_ring[prod]; desc->addr = htole64(txsegs[i].ds_addr); desc->len = htole32(ALE_TX_BYTES(txsegs[i].ds_len) | vtag); desc->flags = htole32(cflags); sc->ale_cdata.ale_tx_cnt++; ALE_DESC_INC(prod, ALE_TX_RING_CNT); } /* Update producer index. */ sc->ale_cdata.ale_tx_prod = prod; /* Set TSO header on the first descriptor. */ if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { desc = &sc->ale_cdata.ale_tx_ring[si]; desc->flags |= htole32(ALE_TD_TSO_HDR); } /* Finally set EOP on the last descriptor. */ prod = (prod + ALE_TX_RING_CNT - 1) % ALE_TX_RING_CNT; desc = &sc->ale_cdata.ale_tx_ring[prod]; desc->flags |= htole32(ALE_TD_EOP); /* Swap dmamap of the first and the last. */ txd = &sc->ale_cdata.ale_txdesc[prod]; map = txd_last->tx_dmamap; txd_last->tx_dmamap = txd->tx_dmamap; txd->tx_dmamap = map; txd->tx_m = m; /* Sync descriptors. */ bus_dmamap_sync(sc->ale_cdata.ale_tx_ring_tag, sc->ale_cdata.ale_tx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } static void ale_start(struct ifnet *ifp) { struct ale_softc *sc; sc = ifp->if_softc; ALE_LOCK(sc); ale_start_locked(ifp); ALE_UNLOCK(sc); } static void ale_start_locked(struct ifnet *ifp) { struct ale_softc *sc; struct mbuf *m_head; int enq; sc = ifp->if_softc; ALE_LOCK_ASSERT(sc); /* Reclaim transmitted frames. */ if (sc->ale_cdata.ale_tx_cnt >= ALE_TX_DESC_HIWAT) ale_txeof(sc); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING || (sc->ale_flags & ALE_FLAG_LINK) == 0) return; for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* * Pack the data into the transmit ring. If we * don't have room, set the OACTIVE flag and wait * for the NIC to drain the ring. */ if (ale_encap(sc, &m_head)) { if (m_head == NULL) break; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } enq++; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ ETHER_BPF_MTAP(ifp, m_head); } if (enq > 0) { /* Kick. */ CSR_WRITE_4(sc, ALE_MBOX_TPD_PROD_IDX, sc->ale_cdata.ale_tx_prod); /* Set a timeout in case the chip goes out to lunch. */ sc->ale_watchdog_timer = ALE_TX_TIMEOUT; } } static void ale_watchdog(struct ale_softc *sc) { struct ifnet *ifp; ALE_LOCK_ASSERT(sc); if (sc->ale_watchdog_timer == 0 || --sc->ale_watchdog_timer) return; ifp = sc->ale_ifp; if ((sc->ale_flags & ALE_FLAG_LINK) == 0) { if_printf(sc->ale_ifp, "watchdog timeout (lost link)\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; ale_init_locked(sc); return; } if_printf(sc->ale_ifp, "watchdog timeout -- resetting\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; ale_init_locked(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) ale_start_locked(ifp); } static int ale_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct ale_softc *sc; struct ifreq *ifr; struct mii_data *mii; int error, mask; sc = ifp->if_softc; ifr = (struct ifreq *)data; error = 0; switch (cmd) { case SIOCSIFMTU: if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ALE_JUMBO_MTU || ((sc->ale_flags & ALE_FLAG_JUMBO) == 0 && ifr->ifr_mtu > ETHERMTU)) error = EINVAL; else if (ifp->if_mtu != ifr->ifr_mtu) { ALE_LOCK(sc); ifp->if_mtu = ifr->ifr_mtu; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; ale_init_locked(sc); } ALE_UNLOCK(sc); } break; case SIOCSIFFLAGS: ALE_LOCK(sc); if ((ifp->if_flags & IFF_UP) != 0) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { if (((ifp->if_flags ^ sc->ale_if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) ale_rxfilter(sc); } else { ale_init_locked(sc); } } else { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) ale_stop(sc); } sc->ale_if_flags = ifp->if_flags; ALE_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: ALE_LOCK(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) ale_rxfilter(sc); ALE_UNLOCK(sc); break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: mii = device_get_softc(sc->ale_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); break; case SIOCSIFCAP: ALE_LOCK(sc); mask = ifr->ifr_reqcap ^ ifp->if_capenable; if ((mask & IFCAP_TXCSUM) != 0 && (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { ifp->if_capenable ^= IFCAP_TXCSUM; if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) ifp->if_hwassist |= ALE_CSUM_FEATURES; else ifp->if_hwassist &= ~ALE_CSUM_FEATURES; } if ((mask & IFCAP_RXCSUM) != 0 && (ifp->if_capabilities & IFCAP_RXCSUM) != 0) ifp->if_capenable ^= IFCAP_RXCSUM; if ((mask & IFCAP_TSO4) != 0 && (ifp->if_capabilities & IFCAP_TSO4) != 0) { ifp->if_capenable ^= IFCAP_TSO4; if ((ifp->if_capenable & IFCAP_TSO4) != 0) ifp->if_hwassist |= CSUM_TSO; else ifp->if_hwassist &= ~CSUM_TSO; } if ((mask & IFCAP_WOL_MCAST) != 0 && (ifp->if_capabilities & IFCAP_WOL_MCAST) != 0) ifp->if_capenable ^= IFCAP_WOL_MCAST; if ((mask & IFCAP_WOL_MAGIC) != 0 && (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0) ifp->if_capenable ^= IFCAP_WOL_MAGIC; if ((mask & IFCAP_VLAN_HWCSUM) != 0 && (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0) ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; if ((mask & IFCAP_VLAN_HWTSO) != 0 && (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0) ifp->if_capenable ^= IFCAP_VLAN_HWTSO; if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) ifp->if_capenable &= ~IFCAP_VLAN_HWTSO; ale_rxvlan(sc); } ALE_UNLOCK(sc); VLAN_CAPABILITIES(ifp); break; default: error = ether_ioctl(ifp, cmd, data); break; } return (error); } static void ale_mac_config(struct ale_softc *sc) { struct mii_data *mii; uint32_t reg; ALE_LOCK_ASSERT(sc); mii = device_get_softc(sc->ale_miibus); reg = CSR_READ_4(sc, ALE_MAC_CFG); reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC | MAC_CFG_SPEED_MASK); /* Reprogram MAC with resolved speed/duplex. */ switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: reg |= MAC_CFG_SPEED_10_100; break; case IFM_1000_T: reg |= MAC_CFG_SPEED_1000; break; } if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { reg |= MAC_CFG_FULL_DUPLEX; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) reg |= MAC_CFG_TX_FC; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) reg |= MAC_CFG_RX_FC; } CSR_WRITE_4(sc, ALE_MAC_CFG, reg); } static void ale_stats_clear(struct ale_softc *sc) { struct smb sb; uint32_t *reg; int i; for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; reg++) { CSR_READ_4(sc, ALE_RX_MIB_BASE + i); i += sizeof(uint32_t); } /* Read Tx statistics. */ for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; reg++) { CSR_READ_4(sc, ALE_TX_MIB_BASE + i); i += sizeof(uint32_t); } } static void ale_stats_update(struct ale_softc *sc) { struct ale_hw_stats *stat; struct smb sb, *smb; struct ifnet *ifp; uint32_t *reg; int i; ALE_LOCK_ASSERT(sc); ifp = sc->ale_ifp; stat = &sc->ale_stats; smb = &sb; /* Read Rx statistics. */ for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; reg++) { *reg = CSR_READ_4(sc, ALE_RX_MIB_BASE + i); i += sizeof(uint32_t); } /* Read Tx statistics. */ for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; reg++) { *reg = CSR_READ_4(sc, ALE_TX_MIB_BASE + i); i += sizeof(uint32_t); } /* Rx stats. */ stat->rx_frames += smb->rx_frames; stat->rx_bcast_frames += smb->rx_bcast_frames; stat->rx_mcast_frames += smb->rx_mcast_frames; stat->rx_pause_frames += smb->rx_pause_frames; stat->rx_control_frames += smb->rx_control_frames; stat->rx_crcerrs += smb->rx_crcerrs; stat->rx_lenerrs += smb->rx_lenerrs; stat->rx_bytes += smb->rx_bytes; stat->rx_runts += smb->rx_runts; stat->rx_fragments += smb->rx_fragments; stat->rx_pkts_64 += smb->rx_pkts_64; stat->rx_pkts_65_127 += smb->rx_pkts_65_127; stat->rx_pkts_128_255 += smb->rx_pkts_128_255; stat->rx_pkts_256_511 += smb->rx_pkts_256_511; stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023; stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518; stat->rx_pkts_1519_max += smb->rx_pkts_1519_max; stat->rx_pkts_truncated += smb->rx_pkts_truncated; stat->rx_fifo_oflows += smb->rx_fifo_oflows; stat->rx_rrs_errs += smb->rx_rrs_errs; stat->rx_alignerrs += smb->rx_alignerrs; stat->rx_bcast_bytes += smb->rx_bcast_bytes; stat->rx_mcast_bytes += smb->rx_mcast_bytes; stat->rx_pkts_filtered += smb->rx_pkts_filtered; /* Tx stats. */ stat->tx_frames += smb->tx_frames; stat->tx_bcast_frames += smb->tx_bcast_frames; stat->tx_mcast_frames += smb->tx_mcast_frames; stat->tx_pause_frames += smb->tx_pause_frames; stat->tx_excess_defer += smb->tx_excess_defer; stat->tx_control_frames += smb->tx_control_frames; stat->tx_deferred += smb->tx_deferred; stat->tx_bytes += smb->tx_bytes; stat->tx_pkts_64 += smb->tx_pkts_64; stat->tx_pkts_65_127 += smb->tx_pkts_65_127; stat->tx_pkts_128_255 += smb->tx_pkts_128_255; stat->tx_pkts_256_511 += smb->tx_pkts_256_511; stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023; stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518; stat->tx_pkts_1519_max += smb->tx_pkts_1519_max; stat->tx_single_colls += smb->tx_single_colls; stat->tx_multi_colls += smb->tx_multi_colls; stat->tx_late_colls += smb->tx_late_colls; stat->tx_excess_colls += smb->tx_excess_colls; stat->tx_underrun += smb->tx_underrun; stat->tx_desc_underrun += smb->tx_desc_underrun; stat->tx_lenerrs += smb->tx_lenerrs; stat->tx_pkts_truncated += smb->tx_pkts_truncated; stat->tx_bcast_bytes += smb->tx_bcast_bytes; stat->tx_mcast_bytes += smb->tx_mcast_bytes; /* Update counters in ifnet. */ if_inc_counter(ifp, IFCOUNTER_OPACKETS, smb->tx_frames); if_inc_counter(ifp, IFCOUNTER_COLLISIONS, smb->tx_single_colls + smb->tx_multi_colls * 2 + smb->tx_late_colls + smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT); if_inc_counter(ifp, IFCOUNTER_OERRORS, smb->tx_late_colls + smb->tx_excess_colls + smb->tx_underrun + smb->tx_pkts_truncated); if_inc_counter(ifp, IFCOUNTER_IPACKETS, smb->rx_frames); if_inc_counter(ifp, IFCOUNTER_IERRORS, smb->rx_crcerrs + smb->rx_lenerrs + smb->rx_runts + smb->rx_pkts_truncated + smb->rx_fifo_oflows + smb->rx_rrs_errs + smb->rx_alignerrs); } static int ale_intr(void *arg) { struct ale_softc *sc; uint32_t status; sc = (struct ale_softc *)arg; status = CSR_READ_4(sc, ALE_INTR_STATUS); if ((status & ALE_INTRS) == 0) return (FILTER_STRAY); /* Disable interrupts. */ CSR_WRITE_4(sc, ALE_INTR_STATUS, INTR_DIS_INT); taskqueue_enqueue(sc->ale_tq, &sc->ale_int_task); return (FILTER_HANDLED); } static void ale_int_task(void *arg, int pending) { struct ale_softc *sc; struct ifnet *ifp; uint32_t status; int more; sc = (struct ale_softc *)arg; status = CSR_READ_4(sc, ALE_INTR_STATUS); ALE_LOCK(sc); if (sc->ale_morework != 0) status |= INTR_RX_PKT; if ((status & ALE_INTRS) == 0) goto done; /* Acknowledge interrupts but still disable interrupts. */ CSR_WRITE_4(sc, ALE_INTR_STATUS, status | INTR_DIS_INT); ifp = sc->ale_ifp; more = 0; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { more = ale_rxeof(sc, sc->ale_process_limit); if (more == EAGAIN) sc->ale_morework = 1; else if (more == EIO) { sc->ale_stats.reset_brk_seq++; ifp->if_drv_flags &= ~IFF_DRV_RUNNING; ale_init_locked(sc); ALE_UNLOCK(sc); return; } if ((status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) != 0) { if ((status & INTR_DMA_RD_TO_RST) != 0) device_printf(sc->ale_dev, "DMA read error! -- resetting\n"); if ((status & INTR_DMA_WR_TO_RST) != 0) device_printf(sc->ale_dev, "DMA write error! -- resetting\n"); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; ale_init_locked(sc); ALE_UNLOCK(sc); return; } if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) ale_start_locked(ifp); } if (more == EAGAIN || (CSR_READ_4(sc, ALE_INTR_STATUS) & ALE_INTRS) != 0) { ALE_UNLOCK(sc); taskqueue_enqueue(sc->ale_tq, &sc->ale_int_task); return; } done: ALE_UNLOCK(sc); /* Re-enable interrupts. */ CSR_WRITE_4(sc, ALE_INTR_STATUS, 0x7FFFFFFF); } static void ale_txeof(struct ale_softc *sc) { struct ifnet *ifp; struct ale_txdesc *txd; uint32_t cons, prod; int prog; ALE_LOCK_ASSERT(sc); ifp = sc->ale_ifp; if (sc->ale_cdata.ale_tx_cnt == 0) return; bus_dmamap_sync(sc->ale_cdata.ale_tx_ring_tag, sc->ale_cdata.ale_tx_ring_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if ((sc->ale_flags & ALE_FLAG_TXCMB_BUG) == 0) { bus_dmamap_sync(sc->ale_cdata.ale_tx_cmb_tag, sc->ale_cdata.ale_tx_cmb_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); prod = *sc->ale_cdata.ale_tx_cmb & TPD_CNT_MASK; } else prod = CSR_READ_2(sc, ALE_TPD_CONS_IDX); cons = sc->ale_cdata.ale_tx_cons; /* * Go through our Tx list and free mbufs for those * frames which have been transmitted. */ for (prog = 0; cons != prod; prog++, ALE_DESC_INC(cons, ALE_TX_RING_CNT)) { if (sc->ale_cdata.ale_tx_cnt <= 0) break; prog++; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; sc->ale_cdata.ale_tx_cnt--; txd = &sc->ale_cdata.ale_txdesc[cons]; if (txd->tx_m != NULL) { /* Reclaim transmitted mbufs. */ bus_dmamap_sync(sc->ale_cdata.ale_tx_tag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->ale_cdata.ale_tx_tag, txd->tx_dmamap); m_freem(txd->tx_m); txd->tx_m = NULL; } } if (prog > 0) { sc->ale_cdata.ale_tx_cons = cons; /* * Unarm watchdog timer only when there is no pending * Tx descriptors in queue. */ if (sc->ale_cdata.ale_tx_cnt == 0) sc->ale_watchdog_timer = 0; } } static void ale_rx_update_page(struct ale_softc *sc, struct ale_rx_page **page, uint32_t length, uint32_t *prod) { struct ale_rx_page *rx_page; rx_page = *page; /* Update consumer position. */ rx_page->cons += roundup(length + sizeof(struct rx_rs), ALE_RX_PAGE_ALIGN); if (rx_page->cons >= ALE_RX_PAGE_SZ) { /* * End of Rx page reached, let hardware reuse * this page. */ rx_page->cons = 0; *rx_page->cmb_addr = 0; bus_dmamap_sync(rx_page->cmb_tag, rx_page->cmb_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); CSR_WRITE_1(sc, ALE_RXF0_PAGE0 + sc->ale_cdata.ale_rx_curp, RXF_VALID); /* Switch to alternate Rx page. */ sc->ale_cdata.ale_rx_curp ^= 1; rx_page = *page = &sc->ale_cdata.ale_rx_page[sc->ale_cdata.ale_rx_curp]; /* Page flipped, sync CMB and Rx page. */ bus_dmamap_sync(rx_page->page_tag, rx_page->page_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_sync(rx_page->cmb_tag, rx_page->cmb_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* Sync completed, cache updated producer index. */ *prod = *rx_page->cmb_addr; } } /* * It seems that AR81xx controller can compute partial checksum. * The partial checksum value can be used to accelerate checksum * computation for fragmented TCP/UDP packets. Upper network stack * already takes advantage of the partial checksum value in IP * reassembly stage. But I'm not sure the correctness of the * partial hardware checksum assistance due to lack of data sheet. * In addition, the Rx feature of controller that requires copying * for every frames effectively nullifies one of most nice offload * capability of controller. */ static void ale_rxcsum(struct ale_softc *sc, struct mbuf *m, uint32_t status) { struct ifnet *ifp; struct ip *ip; char *p; ifp = sc->ale_ifp; m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if ((status & ALE_RD_IPCSUM_NOK) == 0) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; if ((sc->ale_flags & ALE_FLAG_RXCSUM_BUG) == 0) { if (((status & ALE_RD_IPV4_FRAG) == 0) && ((status & (ALE_RD_TCP | ALE_RD_UDP)) != 0) && ((status & ALE_RD_TCP_UDPCSUM_NOK) == 0)) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } else { if ((status & (ALE_RD_TCP | ALE_RD_UDP)) != 0 && (status & ALE_RD_TCP_UDPCSUM_NOK) == 0) { p = mtod(m, char *); p += ETHER_HDR_LEN; if ((status & ALE_RD_802_3) != 0) p += LLC_SNAPFRAMELEN; if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0 && (status & ALE_RD_VLAN) != 0) p += ETHER_VLAN_ENCAP_LEN; ip = (struct ip *)p; if (ip->ip_off != 0 && (status & ALE_RD_IPV4_DF) == 0) return; m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } /* * Don't mark bad checksum for TCP/UDP frames * as fragmented frames may always have set * bad checksummed bit of frame status. */ } /* Process received frames. */ static int ale_rxeof(struct ale_softc *sc, int count) { struct ale_rx_page *rx_page; struct rx_rs *rs; struct ifnet *ifp; struct mbuf *m; uint32_t length, prod, seqno, status, vtags; int prog; ifp = sc->ale_ifp; rx_page = &sc->ale_cdata.ale_rx_page[sc->ale_cdata.ale_rx_curp]; bus_dmamap_sync(rx_page->cmb_tag, rx_page->cmb_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_sync(rx_page->page_tag, rx_page->page_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* * Don't directly access producer index as hardware may * update it while Rx handler is in progress. It would * be even better if there is a way to let hardware * know how far driver processed its received frames. * Alternatively, hardware could provide a way to disable * CMB updates until driver acknowledges the end of CMB * access. */ prod = *rx_page->cmb_addr; for (prog = 0; prog < count; prog++) { if (rx_page->cons >= prod) break; rs = (struct rx_rs *)(rx_page->page_addr + rx_page->cons); seqno = ALE_RX_SEQNO(le32toh(rs->seqno)); if (sc->ale_cdata.ale_rx_seqno != seqno) { /* * Normally I believe this should not happen unless * severe driver bug or corrupted memory. However * it seems to happen under certain conditions which * is triggered by abrupt Rx events such as initiation * of bulk transfer of remote host. It's not easy to * reproduce this and I doubt it could be related * with FIFO overflow of hardware or activity of Tx * CMB updates. I also remember similar behaviour * seen on RealTek 8139 which uses resembling Rx * scheme. */ if (bootverbose) device_printf(sc->ale_dev, "garbled seq: %u, expected: %u -- " "resetting!\n", seqno, sc->ale_cdata.ale_rx_seqno); return (EIO); } /* Frame received. */ sc->ale_cdata.ale_rx_seqno++; length = ALE_RX_BYTES(le32toh(rs->length)); status = le32toh(rs->flags); if ((status & ALE_RD_ERROR) != 0) { /* * We want to pass the following frames to upper * layer regardless of error status of Rx return * status. * * o IP/TCP/UDP checksum is bad. * o frame length and protocol specific length * does not match. */ if ((status & (ALE_RD_CRC | ALE_RD_CODE | ALE_RD_DRIBBLE | ALE_RD_RUNT | ALE_RD_OFLOW | ALE_RD_TRUNC)) != 0) { ale_rx_update_page(sc, &rx_page, length, &prod); continue; } } /* * m_devget(9) is major bottle-neck of ale(4)(It comes * from hardware limitation). For jumbo frames we could * get a slightly better performance if driver use * m_getjcl(9) with proper buffer size argument. However * that would make code more complicated and I don't * think users would expect good Rx performance numbers * on these low-end consumer ethernet controller. */ m = m_devget((char *)(rs + 1), length - ETHER_CRC_LEN, ETHER_ALIGN, ifp, NULL); if (m == NULL) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); ale_rx_update_page(sc, &rx_page, length, &prod); continue; } if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 && (status & ALE_RD_IPV4) != 0) ale_rxcsum(sc, m, status); if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && (status & ALE_RD_VLAN) != 0) { vtags = ALE_RX_VLAN(le32toh(rs->vtags)); m->m_pkthdr.ether_vtag = ALE_RX_VLAN_TAG(vtags); m->m_flags |= M_VLANTAG; } /* Pass it to upper layer. */ ALE_UNLOCK(sc); (*ifp->if_input)(ifp, m); ALE_LOCK(sc); ale_rx_update_page(sc, &rx_page, length, &prod); } return (count > 0 ? 0 : EAGAIN); } static void ale_tick(void *arg) { struct ale_softc *sc; struct mii_data *mii; sc = (struct ale_softc *)arg; ALE_LOCK_ASSERT(sc); mii = device_get_softc(sc->ale_miibus); mii_tick(mii); ale_stats_update(sc); /* * Reclaim Tx buffers that have been transferred. It's not * needed here but it would release allocated mbuf chains * faster and limit the maximum delay to a hz. */ ale_txeof(sc); ale_watchdog(sc); callout_reset(&sc->ale_tick_ch, hz, ale_tick, sc); } static void ale_reset(struct ale_softc *sc) { uint32_t reg; int i; /* Initialize PCIe module. From Linux. */ CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000); CSR_WRITE_4(sc, ALE_MASTER_CFG, MASTER_RESET); for (i = ALE_RESET_TIMEOUT; i > 0; i--) { DELAY(10); if ((CSR_READ_4(sc, ALE_MASTER_CFG) & MASTER_RESET) == 0) break; } if (i == 0) device_printf(sc->ale_dev, "master reset timeout!\n"); for (i = ALE_RESET_TIMEOUT; i > 0; i--) { if ((reg = CSR_READ_4(sc, ALE_IDLE_STATUS)) == 0) break; DELAY(10); } if (i == 0) device_printf(sc->ale_dev, "reset timeout(0x%08x)!\n", reg); } static void ale_init(void *xsc) { struct ale_softc *sc; sc = (struct ale_softc *)xsc; ALE_LOCK(sc); ale_init_locked(sc); ALE_UNLOCK(sc); } static void ale_init_locked(struct ale_softc *sc) { struct ifnet *ifp; struct mii_data *mii; uint8_t eaddr[ETHER_ADDR_LEN]; bus_addr_t paddr; uint32_t reg, rxf_hi, rxf_lo; ALE_LOCK_ASSERT(sc); ifp = sc->ale_ifp; mii = device_get_softc(sc->ale_miibus); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) return; /* * Cancel any pending I/O. */ ale_stop(sc); /* * Reset the chip to a known state. */ ale_reset(sc); /* Initialize Tx descriptors, DMA memory blocks. */ ale_init_rx_pages(sc); ale_init_tx_ring(sc); /* Reprogram the station address. */ bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); CSR_WRITE_4(sc, ALE_PAR0, eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]); CSR_WRITE_4(sc, ALE_PAR1, eaddr[0] << 8 | eaddr[1]); /* * Clear WOL status and disable all WOL feature as WOL * would interfere Rx operation under normal environments. */ CSR_READ_4(sc, ALE_WOL_CFG); CSR_WRITE_4(sc, ALE_WOL_CFG, 0); /* * Set Tx descriptor/RXF0/CMB base addresses. They share * the same high address part of DMAable region. */ paddr = sc->ale_cdata.ale_tx_ring_paddr; CSR_WRITE_4(sc, ALE_TPD_ADDR_HI, ALE_ADDR_HI(paddr)); CSR_WRITE_4(sc, ALE_TPD_ADDR_LO, ALE_ADDR_LO(paddr)); CSR_WRITE_4(sc, ALE_TPD_CNT, (ALE_TX_RING_CNT << TPD_CNT_SHIFT) & TPD_CNT_MASK); /* Set Rx page base address, note we use single queue. */ paddr = sc->ale_cdata.ale_rx_page[0].page_paddr; CSR_WRITE_4(sc, ALE_RXF0_PAGE0_ADDR_LO, ALE_ADDR_LO(paddr)); paddr = sc->ale_cdata.ale_rx_page[1].page_paddr; CSR_WRITE_4(sc, ALE_RXF0_PAGE1_ADDR_LO, ALE_ADDR_LO(paddr)); /* Set Tx/Rx CMB addresses. */ paddr = sc->ale_cdata.ale_tx_cmb_paddr; CSR_WRITE_4(sc, ALE_TX_CMB_ADDR_LO, ALE_ADDR_LO(paddr)); paddr = sc->ale_cdata.ale_rx_page[0].cmb_paddr; CSR_WRITE_4(sc, ALE_RXF0_CMB0_ADDR_LO, ALE_ADDR_LO(paddr)); paddr = sc->ale_cdata.ale_rx_page[1].cmb_paddr; CSR_WRITE_4(sc, ALE_RXF0_CMB1_ADDR_LO, ALE_ADDR_LO(paddr)); /* Mark RXF0 is valid. */ CSR_WRITE_1(sc, ALE_RXF0_PAGE0, RXF_VALID); CSR_WRITE_1(sc, ALE_RXF0_PAGE1, RXF_VALID); /* * No need to initialize RFX1/RXF2/RXF3. We don't use * multi-queue yet. */ /* Set Rx page size, excluding guard frame size. */ CSR_WRITE_4(sc, ALE_RXF_PAGE_SIZE, ALE_RX_PAGE_SZ); /* Tell hardware that we're ready to load DMA blocks. */ CSR_WRITE_4(sc, ALE_DMA_BLOCK, DMA_BLOCK_LOAD); /* Set Rx/Tx interrupt trigger threshold. */ CSR_WRITE_4(sc, ALE_INT_TRIG_THRESH, (1 << INT_TRIG_RX_THRESH_SHIFT) | (4 << INT_TRIG_TX_THRESH_SHIFT)); /* * XXX * Set interrupt trigger timer, its purpose and relation * with interrupt moderation mechanism is not clear yet. */ CSR_WRITE_4(sc, ALE_INT_TRIG_TIMER, ((ALE_USECS(10) << INT_TRIG_RX_TIMER_SHIFT) | (ALE_USECS(1000) << INT_TRIG_TX_TIMER_SHIFT))); /* Configure interrupt moderation timer. */ reg = ALE_USECS(sc->ale_int_rx_mod) << IM_TIMER_RX_SHIFT; reg |= ALE_USECS(sc->ale_int_tx_mod) << IM_TIMER_TX_SHIFT; CSR_WRITE_4(sc, ALE_IM_TIMER, reg); reg = CSR_READ_4(sc, ALE_MASTER_CFG); reg &= ~(MASTER_CHIP_REV_MASK | MASTER_CHIP_ID_MASK); reg &= ~(MASTER_IM_RX_TIMER_ENB | MASTER_IM_TX_TIMER_ENB); if (ALE_USECS(sc->ale_int_rx_mod) != 0) reg |= MASTER_IM_RX_TIMER_ENB; if (ALE_USECS(sc->ale_int_tx_mod) != 0) reg |= MASTER_IM_TX_TIMER_ENB; CSR_WRITE_4(sc, ALE_MASTER_CFG, reg); CSR_WRITE_2(sc, ALE_INTR_CLR_TIMER, ALE_USECS(1000)); /* Set Maximum frame size of controller. */ if (ifp->if_mtu < ETHERMTU) sc->ale_max_frame_size = ETHERMTU; else sc->ale_max_frame_size = ifp->if_mtu; sc->ale_max_frame_size += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN; CSR_WRITE_4(sc, ALE_FRAME_SIZE, sc->ale_max_frame_size); /* Configure IPG/IFG parameters. */ CSR_WRITE_4(sc, ALE_IPG_IFG_CFG, ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK) | ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) | ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) | ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK)); /* Set parameters for half-duplex media. */ CSR_WRITE_4(sc, ALE_HDPX_CFG, ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) & HDPX_CFG_LCOL_MASK) | ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) & HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN | ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) & HDPX_CFG_ABEBT_MASK) | ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) & HDPX_CFG_JAMIPG_MASK)); /* Configure Tx jumbo frame parameters. */ if ((sc->ale_flags & ALE_FLAG_JUMBO) != 0) { if (ifp->if_mtu < ETHERMTU) reg = sc->ale_max_frame_size; else if (ifp->if_mtu < 6 * 1024) reg = (sc->ale_max_frame_size * 2) / 3; else reg = sc->ale_max_frame_size / 2; CSR_WRITE_4(sc, ALE_TX_JUMBO_THRESH, roundup(reg, TX_JUMBO_THRESH_UNIT) >> TX_JUMBO_THRESH_UNIT_SHIFT); } /* Configure TxQ. */ reg = (128 << (sc->ale_dma_rd_burst >> DMA_CFG_RD_BURST_SHIFT)) << TXQ_CFG_TX_FIFO_BURST_SHIFT; reg |= (TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) & TXQ_CFG_TPD_BURST_MASK; CSR_WRITE_4(sc, ALE_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE | TXQ_CFG_ENB); /* Configure Rx jumbo frame & flow control parameters. */ if ((sc->ale_flags & ALE_FLAG_JUMBO) != 0) { reg = roundup(sc->ale_max_frame_size, RX_JUMBO_THRESH_UNIT); CSR_WRITE_4(sc, ALE_RX_JUMBO_THRESH, (((reg >> RX_JUMBO_THRESH_UNIT_SHIFT) << RX_JUMBO_THRESH_MASK_SHIFT) & RX_JUMBO_THRESH_MASK) | ((RX_JUMBO_LKAH_DEFAULT << RX_JUMBO_LKAH_SHIFT) & RX_JUMBO_LKAH_MASK)); reg = CSR_READ_4(sc, ALE_SRAM_RX_FIFO_LEN); rxf_hi = (reg * 7) / 10; rxf_lo = (reg * 3)/ 10; CSR_WRITE_4(sc, ALE_RX_FIFO_PAUSE_THRESH, ((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) & RX_FIFO_PAUSE_THRESH_LO_MASK) | ((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) & RX_FIFO_PAUSE_THRESH_HI_MASK)); } /* Disable RSS. */ CSR_WRITE_4(sc, ALE_RSS_IDT_TABLE0, 0); CSR_WRITE_4(sc, ALE_RSS_CPU, 0); /* Configure RxQ. */ CSR_WRITE_4(sc, ALE_RXQ_CFG, RXQ_CFG_ALIGN_32 | RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB); /* Configure DMA parameters. */ reg = 0; if ((sc->ale_flags & ALE_FLAG_TXCMB_BUG) == 0) reg |= DMA_CFG_TXCMB_ENB; CSR_WRITE_4(sc, ALE_DMA_CFG, DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI | DMA_CFG_RCB_64 | sc->ale_dma_rd_burst | reg | sc->ale_dma_wr_burst | DMA_CFG_RXCMB_ENB | ((DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) & DMA_CFG_RD_DELAY_CNT_MASK) | ((DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) & DMA_CFG_WR_DELAY_CNT_MASK)); /* * Hardware can be configured to issue SMB interrupt based * on programmed interval. Since there is a callout that is * invoked for every hz in driver we use that instead of * relying on periodic SMB interrupt. */ CSR_WRITE_4(sc, ALE_SMB_STAT_TIMER, ALE_USECS(0)); /* Clear MAC statistics. */ ale_stats_clear(sc); /* * Configure Tx/Rx MACs. * - Auto-padding for short frames. * - Enable CRC generation. * Actual reconfiguration of MAC for resolved speed/duplex * is followed after detection of link establishment. * AR81xx always does checksum computation regardless of * MAC_CFG_RXCSUM_ENB bit. In fact, setting the bit will * cause Rx handling issue for fragmented IP datagrams due * to silicon bug. */ reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX | ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) & MAC_CFG_PREAMBLE_MASK); if ((sc->ale_flags & ALE_FLAG_FASTETHER) != 0) reg |= MAC_CFG_SPEED_10_100; else reg |= MAC_CFG_SPEED_1000; CSR_WRITE_4(sc, ALE_MAC_CFG, reg); /* Set up the receive filter. */ ale_rxfilter(sc); ale_rxvlan(sc); /* Acknowledge all pending interrupts and clear it. */ CSR_WRITE_4(sc, ALE_INTR_MASK, ALE_INTRS); CSR_WRITE_4(sc, ALE_INTR_STATUS, 0xFFFFFFFF); CSR_WRITE_4(sc, ALE_INTR_STATUS, 0); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; sc->ale_flags &= ~ALE_FLAG_LINK; /* Switch to the current media. */ mii_mediachg(mii); callout_reset(&sc->ale_tick_ch, hz, ale_tick, sc); } static void ale_stop(struct ale_softc *sc) { struct ifnet *ifp; struct ale_txdesc *txd; uint32_t reg; int i; ALE_LOCK_ASSERT(sc); /* * Mark the interface down and cancel the watchdog timer. */ ifp = sc->ale_ifp; ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); sc->ale_flags &= ~ALE_FLAG_LINK; callout_stop(&sc->ale_tick_ch); sc->ale_watchdog_timer = 0; ale_stats_update(sc); /* Disable interrupts. */ CSR_WRITE_4(sc, ALE_INTR_MASK, 0); CSR_WRITE_4(sc, ALE_INTR_STATUS, 0xFFFFFFFF); /* Disable queue processing and DMA. */ reg = CSR_READ_4(sc, ALE_TXQ_CFG); reg &= ~TXQ_CFG_ENB; CSR_WRITE_4(sc, ALE_TXQ_CFG, reg); reg = CSR_READ_4(sc, ALE_RXQ_CFG); reg &= ~RXQ_CFG_ENB; CSR_WRITE_4(sc, ALE_RXQ_CFG, reg); reg = CSR_READ_4(sc, ALE_DMA_CFG); reg &= ~(DMA_CFG_TXCMB_ENB | DMA_CFG_RXCMB_ENB); CSR_WRITE_4(sc, ALE_DMA_CFG, reg); DELAY(1000); /* Stop Rx/Tx MACs. */ ale_stop_mac(sc); /* Disable interrupts which might be touched in taskq handler. */ CSR_WRITE_4(sc, ALE_INTR_STATUS, 0xFFFFFFFF); /* * Free TX mbufs still in the queues. */ for (i = 0; i < ALE_TX_RING_CNT; i++) { txd = &sc->ale_cdata.ale_txdesc[i]; if (txd->tx_m != NULL) { bus_dmamap_sync(sc->ale_cdata.ale_tx_tag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->ale_cdata.ale_tx_tag, txd->tx_dmamap); m_freem(txd->tx_m); txd->tx_m = NULL; } } } static void ale_stop_mac(struct ale_softc *sc) { uint32_t reg; int i; ALE_LOCK_ASSERT(sc); reg = CSR_READ_4(sc, ALE_MAC_CFG); if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) { reg &= ~(MAC_CFG_TX_ENB | MAC_CFG_RX_ENB); CSR_WRITE_4(sc, ALE_MAC_CFG, reg); } for (i = ALE_TIMEOUT; i > 0; i--) { reg = CSR_READ_4(sc, ALE_IDLE_STATUS); if (reg == 0) break; DELAY(10); } if (i == 0) device_printf(sc->ale_dev, "could not disable Tx/Rx MAC(0x%08x)!\n", reg); } static void ale_init_tx_ring(struct ale_softc *sc) { struct ale_txdesc *txd; int i; ALE_LOCK_ASSERT(sc); sc->ale_cdata.ale_tx_prod = 0; sc->ale_cdata.ale_tx_cons = 0; sc->ale_cdata.ale_tx_cnt = 0; bzero(sc->ale_cdata.ale_tx_ring, ALE_TX_RING_SZ); bzero(sc->ale_cdata.ale_tx_cmb, ALE_TX_CMB_SZ); for (i = 0; i < ALE_TX_RING_CNT; i++) { txd = &sc->ale_cdata.ale_txdesc[i]; txd->tx_m = NULL; } *sc->ale_cdata.ale_tx_cmb = 0; bus_dmamap_sync(sc->ale_cdata.ale_tx_cmb_tag, sc->ale_cdata.ale_tx_cmb_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->ale_cdata.ale_tx_ring_tag, sc->ale_cdata.ale_tx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } static void ale_init_rx_pages(struct ale_softc *sc) { struct ale_rx_page *rx_page; int i; ALE_LOCK_ASSERT(sc); sc->ale_morework = 0; sc->ale_cdata.ale_rx_seqno = 0; sc->ale_cdata.ale_rx_curp = 0; for (i = 0; i < ALE_RX_PAGES; i++) { rx_page = &sc->ale_cdata.ale_rx_page[i]; bzero(rx_page->page_addr, sc->ale_pagesize); bzero(rx_page->cmb_addr, ALE_RX_CMB_SZ); rx_page->cons = 0; *rx_page->cmb_addr = 0; bus_dmamap_sync(rx_page->page_tag, rx_page->page_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); bus_dmamap_sync(rx_page->cmb_tag, rx_page->cmb_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } } static void ale_rxvlan(struct ale_softc *sc) { struct ifnet *ifp; uint32_t reg; ALE_LOCK_ASSERT(sc); ifp = sc->ale_ifp; reg = CSR_READ_4(sc, ALE_MAC_CFG); reg &= ~MAC_CFG_VLAN_TAG_STRIP; if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) reg |= MAC_CFG_VLAN_TAG_STRIP; CSR_WRITE_4(sc, ALE_MAC_CFG, reg); } static u_int ale_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { uint32_t crc, *mchash = arg; crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN); mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); return (1); } static void ale_rxfilter(struct ale_softc *sc) { struct ifnet *ifp; uint32_t mchash[2]; uint32_t rxcfg; ALE_LOCK_ASSERT(sc); ifp = sc->ale_ifp; rxcfg = CSR_READ_4(sc, ALE_MAC_CFG); rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC); if ((ifp->if_flags & IFF_BROADCAST) != 0) rxcfg |= MAC_CFG_BCAST; if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { if ((ifp->if_flags & IFF_PROMISC) != 0) rxcfg |= MAC_CFG_PROMISC; if ((ifp->if_flags & IFF_ALLMULTI) != 0) rxcfg |= MAC_CFG_ALLMULTI; CSR_WRITE_4(sc, ALE_MAR0, 0xFFFFFFFF); CSR_WRITE_4(sc, ALE_MAR1, 0xFFFFFFFF); CSR_WRITE_4(sc, ALE_MAC_CFG, rxcfg); return; } /* Program new filter. */ bzero(mchash, sizeof(mchash)); if_foreach_llmaddr(ifp, ale_hash_maddr, &mchash); CSR_WRITE_4(sc, ALE_MAR0, mchash[0]); CSR_WRITE_4(sc, ALE_MAR1, mchash[1]); CSR_WRITE_4(sc, ALE_MAC_CFG, rxcfg); } static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) { int error, value; if (arg1 == NULL) return (EINVAL); value = *(int *)arg1; error = sysctl_handle_int(oidp, &value, 0, req); if (error || req->newptr == NULL) return (error); if (value < low || value > high) return (EINVAL); *(int *)arg1 = value; return (0); } static int sysctl_hw_ale_proc_limit(SYSCTL_HANDLER_ARGS) { return (sysctl_int_range(oidp, arg1, arg2, req, ALE_PROC_MIN, ALE_PROC_MAX)); } static int sysctl_hw_ale_int_mod(SYSCTL_HANDLER_ARGS) { return (sysctl_int_range(oidp, arg1, arg2, req, ALE_IM_TIMER_MIN, ALE_IM_TIMER_MAX)); } diff --git a/sys/dev/ath/if_ath.c b/sys/dev/ath/if_ath.c index fa3081cff296..0745d00ce3a8 100644 --- a/sys/dev/ath/if_ath.c +++ b/sys/dev/ath/if_ath.c @@ -1,7060 +1,7060 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); /* * Driver for the Atheros Wireless LAN controller. * * This software is derived from work of Atsushi Onoe; his contribution * is greatly appreciated. */ #include "opt_inet.h" #include "opt_ath.h" /* * This is needed for register operations which are performed * by the driver - eg, calls to ath_hal_gettsf32(). * * It's also required for any AH_DEBUG checks in here, eg the * module dependencies. */ #include "opt_ah.h" #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for mp_ncpus */ #include #include #include #include #include #include #include #include #include #include #include #ifdef IEEE80211_SUPPORT_SUPERG #include #endif #ifdef IEEE80211_SUPPORT_TDMA #include #endif #include #ifdef INET #include #include #endif #include #include /* XXX for softled */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef ATH_TX99_DIAG #include #endif #ifdef ATH_DEBUG_ALQ #include #endif /* * Only enable this if you're working on PS-POLL support. */ #define ATH_SW_PSQ /* * ATH_BCBUF determines the number of vap's that can transmit * beacons and also (currently) the number of vap's that can * have unique mac addresses/bssid. When staggering beacons * 4 is probably a good max as otherwise the beacons become * very closely spaced and there is limited time for cab q traffic * to go out. You can burst beacons instead but that is not good * for stations in power save and at some point you really want * another radio (and channel). * * The limit on the number of mac addresses is tied to our use of * the U/L bit and tracking addresses in a byte; it would be * worthwhile to allow more for applications like proxy sta. */ CTASSERT(ATH_BCBUF <= 8); static struct ieee80211vap *ath_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void ath_vap_delete(struct ieee80211vap *); static int ath_init(struct ath_softc *); static void ath_stop(struct ath_softc *); static int ath_reset_vap(struct ieee80211vap *, u_long); static int ath_transmit(struct ieee80211com *, struct mbuf *); static int ath_media_change(struct ifnet *); static void ath_watchdog(void *); static void ath_parent(struct ieee80211com *); static void ath_fatal_proc(void *, int); static void ath_bmiss_vap(struct ieee80211vap *); static void ath_bmiss_proc(void *, int); static void ath_key_update_begin(struct ieee80211vap *); static void ath_key_update_end(struct ieee80211vap *); static void ath_update_mcast_hw(struct ath_softc *); static void ath_update_mcast(struct ieee80211com *); static void ath_update_promisc(struct ieee80211com *); static void ath_updateslot(struct ieee80211com *); static void ath_bstuck_proc(void *, int); static void ath_reset_proc(void *, int); static int ath_desc_alloc(struct ath_softc *); static void ath_desc_free(struct ath_softc *); static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *, const uint8_t [IEEE80211_ADDR_LEN]); static void ath_node_cleanup(struct ieee80211_node *); static void ath_node_free(struct ieee80211_node *); static void ath_node_getsignal(const struct ieee80211_node *, int8_t *, int8_t *); static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int); static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype); static int ath_tx_setup(struct ath_softc *, int, int); static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *); static void ath_tx_cleanup(struct ath_softc *); static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched); static void ath_tx_proc_q0(void *, int); static void ath_tx_proc_q0123(void *, int); static void ath_tx_proc(void *, int); static void ath_txq_sched_tasklet(void *, int); static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *); static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *); static void ath_scan_start(struct ieee80211com *); static void ath_scan_end(struct ieee80211com *); static void ath_set_channel(struct ieee80211com *); #ifdef ATH_ENABLE_11N static void ath_update_chw(struct ieee80211com *); #endif /* ATH_ENABLE_11N */ static int ath_set_quiet_ie(struct ieee80211_node *, uint8_t *); static void ath_calibrate(void *); static int ath_newstate(struct ieee80211vap *, enum ieee80211_state, int); static void ath_setup_stationkey(struct ieee80211_node *); static void ath_newassoc(struct ieee80211_node *, int); static int ath_setregdomain(struct ieee80211com *, struct ieee80211_regdomain *, int, struct ieee80211_channel []); static void ath_getradiocaps(struct ieee80211com *, int, int *, struct ieee80211_channel []); static int ath_getchannels(struct ath_softc *); static int ath_rate_setup(struct ath_softc *, u_int mode); static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode); static void ath_announce(struct ath_softc *); static void ath_dfs_tasklet(void *, int); static void ath_node_powersave(struct ieee80211_node *, int); static int ath_node_set_tim(struct ieee80211_node *, int); static void ath_node_recv_pspoll(struct ieee80211_node *, struct mbuf *); #ifdef IEEE80211_SUPPORT_TDMA #include #endif SYSCTL_DECL(_hw_ath); /* XXX validate sysctl values */ static int ath_longcalinterval = 30; /* long cals every 30 secs */ SYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval, 0, "long chip calibration interval (secs)"); static int ath_shortcalinterval = 100; /* short cals every 100 ms */ SYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval, 0, "short chip calibration interval (msecs)"); static int ath_resetcalinterval = 20*60; /* reset cal state 20 mins */ SYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval, 0, "reset chip calibration results (secs)"); static int ath_anicalinterval = 100; /* ANI calibration - 100 msec */ SYSCTL_INT(_hw_ath, OID_AUTO, anical, CTLFLAG_RW, &ath_anicalinterval, 0, "ANI calibration (msecs)"); int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */ SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RWTUN, &ath_rxbuf, 0, "rx buffers allocated"); int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */ SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RWTUN, &ath_txbuf, 0, "tx buffers allocated"); int ath_txbuf_mgmt = ATH_MGMT_TXBUF; /* # mgmt tx buffers to allocate */ SYSCTL_INT(_hw_ath, OID_AUTO, txbuf_mgmt, CTLFLAG_RWTUN, &ath_txbuf_mgmt, 0, "tx (mgmt) buffers allocated"); int ath_bstuck_threshold = 4; /* max missed beacons */ SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold, 0, "max missed beacon xmits before chip reset"); MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers"); void ath_legacy_attach_comp_func(struct ath_softc *sc) { /* * Special case certain configurations. Note the * CAB queue is handled by these specially so don't * include them when checking the txq setup mask. */ switch (sc->sc_txqsetup &~ (1<sc_cabq->axq_qnum)) { case 0x01: TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc); break; case 0x0f: TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc); break; default: TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc); break; } } /* * Set the target power mode. * * If this is called during a point in time where * the hardware is being programmed elsewhere, it will * simply store it away and update it when all current * uses of the hardware are completed. * * If the chip is going into network sleep or power off, then * we will wait until all uses of the chip are done before * going into network sleep or power off. * * If the chip is being programmed full-awake, then immediately * program it full-awake so we can actually stay awake rather than * the chip potentially going to sleep underneath us. */ void _ath_power_setpower(struct ath_softc *sc, int power_state, int selfgen, const char *file, int line) { ATH_LOCK_ASSERT(sc); DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) state=%d, refcnt=%d, target=%d, cur=%d\n", __func__, file, line, power_state, sc->sc_powersave_refcnt, sc->sc_target_powerstate, sc->sc_cur_powerstate); sc->sc_target_powerstate = power_state; /* * Don't program the chip into network sleep if the chip * is being programmed elsewhere. * * However, if the chip is being programmed /awake/, force * the chip awake so we stay awake. */ if ((sc->sc_powersave_refcnt == 0 || power_state == HAL_PM_AWAKE) && power_state != sc->sc_cur_powerstate) { sc->sc_cur_powerstate = power_state; ath_hal_setpower(sc->sc_ah, power_state); /* * If the NIC is force-awake, then set the * self-gen frame state appropriately. * * If the nic is in network sleep or full-sleep, * we let the above call leave the self-gen * state as "sleep". */ if (selfgen && sc->sc_cur_powerstate == HAL_PM_AWAKE && sc->sc_target_selfgen_state != HAL_PM_AWAKE) { ath_hal_setselfgenpower(sc->sc_ah, sc->sc_target_selfgen_state); } } } /* * Set the current self-generated frames state. * * This is separate from the target power mode. The chip may be * awake but the desired state is "sleep", so frames sent to the * destination has PWRMGT=1 in the 802.11 header. The NIC also * needs to know to set PWRMGT=1 in self-generated frames. */ void _ath_power_set_selfgen(struct ath_softc *sc, int power_state, const char *file, int line) { ATH_LOCK_ASSERT(sc); DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) state=%d, refcnt=%d\n", __func__, file, line, power_state, sc->sc_target_selfgen_state); sc->sc_target_selfgen_state = power_state; /* * If the NIC is force-awake, then set the power state. * Network-state and full-sleep will already transition it to * mark self-gen frames as sleeping - and we can't * guarantee the NIC is awake to program the self-gen frame * setting anyway. */ if (sc->sc_cur_powerstate == HAL_PM_AWAKE) { ath_hal_setselfgenpower(sc->sc_ah, power_state); } } /* * Set the hardware power mode and take a reference. * * This doesn't update the target power mode in the driver; * it just updates the hardware power state. * * XXX it should only ever force the hardware awake; it should * never be called to set it asleep. */ void _ath_power_set_power_state(struct ath_softc *sc, int power_state, const char *file, int line) { ATH_LOCK_ASSERT(sc); DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) state=%d, refcnt=%d\n", __func__, file, line, power_state, sc->sc_powersave_refcnt); sc->sc_powersave_refcnt++; /* * Only do the power state change if we're not programming * it elsewhere. */ if (power_state != sc->sc_cur_powerstate) { ath_hal_setpower(sc->sc_ah, power_state); sc->sc_cur_powerstate = power_state; /* * Adjust the self-gen powerstate if appropriate. */ if (sc->sc_cur_powerstate == HAL_PM_AWAKE && sc->sc_target_selfgen_state != HAL_PM_AWAKE) { ath_hal_setselfgenpower(sc->sc_ah, sc->sc_target_selfgen_state); } } } /* * Restore the power save mode to what it once was. * * This will decrement the reference counter and once it hits * zero, it'll restore the powersave state. */ void _ath_power_restore_power_state(struct ath_softc *sc, const char *file, int line) { ATH_LOCK_ASSERT(sc); DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) refcnt=%d, target state=%d\n", __func__, file, line, sc->sc_powersave_refcnt, sc->sc_target_powerstate); if (sc->sc_powersave_refcnt == 0) device_printf(sc->sc_dev, "%s: refcnt=0?\n", __func__); else sc->sc_powersave_refcnt--; if (sc->sc_powersave_refcnt == 0 && sc->sc_target_powerstate != sc->sc_cur_powerstate) { sc->sc_cur_powerstate = sc->sc_target_powerstate; ath_hal_setpower(sc->sc_ah, sc->sc_target_powerstate); } /* * Adjust the self-gen powerstate if appropriate. */ if (sc->sc_cur_powerstate == HAL_PM_AWAKE && sc->sc_target_selfgen_state != HAL_PM_AWAKE) { ath_hal_setselfgenpower(sc->sc_ah, sc->sc_target_selfgen_state); } } /* * Configure the initial HAL configuration values based on bus * specific parameters. * * Some PCI IDs and other information may need tweaking. * * XXX TODO: ath9k and the Atheros HAL only program comm2g_switch_enable * if BT antenna diversity isn't enabled. * * So, let's also figure out how to enable BT diversity for AR9485. */ static void ath_setup_hal_config(struct ath_softc *sc, HAL_OPS_CONFIG *ah_config) { /* XXX TODO: only for PCI devices? */ if (sc->sc_pci_devinfo & (ATH_PCI_CUS198 | ATH_PCI_CUS230)) { ah_config->ath_hal_ext_lna_ctl_gpio = 0x200; /* bit 9 */ ah_config->ath_hal_ext_atten_margin_cfg = AH_TRUE; ah_config->ath_hal_min_gainidx = AH_TRUE; ah_config->ath_hal_ant_ctrl_comm2g_switch_enable = 0x000bbb88; /* XXX low_rssi_thresh */ /* XXX fast_div_bias */ device_printf(sc->sc_dev, "configuring for %s\n", (sc->sc_pci_devinfo & ATH_PCI_CUS198) ? "CUS198" : "CUS230"); } if (sc->sc_pci_devinfo & ATH_PCI_CUS217) device_printf(sc->sc_dev, "CUS217 card detected\n"); if (sc->sc_pci_devinfo & ATH_PCI_CUS252) device_printf(sc->sc_dev, "CUS252 card detected\n"); if (sc->sc_pci_devinfo & ATH_PCI_AR9565_1ANT) device_printf(sc->sc_dev, "WB335 1-ANT card detected\n"); if (sc->sc_pci_devinfo & ATH_PCI_AR9565_2ANT) device_printf(sc->sc_dev, "WB335 2-ANT card detected\n"); if (sc->sc_pci_devinfo & ATH_PCI_BT_ANT_DIV) device_printf(sc->sc_dev, "Bluetooth Antenna Diversity card detected\n"); if (sc->sc_pci_devinfo & ATH_PCI_KILLER) device_printf(sc->sc_dev, "Killer Wireless card detected\n"); #if 0 /* * Some WB335 cards do not support antenna diversity. Since * we use a hardcoded value for AR9565 instead of using the * EEPROM/OTP data, remove the combining feature from * the HW capabilities bitmap. */ if (sc->sc_pci_devinfo & (ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_AR9565_2ANT)) { if (!(sc->sc_pci_devinfo & ATH9K_PCI_BT_ANT_DIV)) pCap->hw_caps &= ~ATH9K_HW_CAP_ANT_DIV_COMB; } if (sc->sc_pci_devinfo & ATH9K_PCI_BT_ANT_DIV) { pCap->hw_caps |= ATH9K_HW_CAP_BT_ANT_DIV; device_printf(sc->sc_dev, "Set BT/WLAN RX diversity capability\n"); } #endif if (sc->sc_pci_devinfo & ATH_PCI_D3_L1_WAR) { ah_config->ath_hal_pcie_waen = 0x0040473b; device_printf(sc->sc_dev, "Enable WAR for ASPM D3/L1\n"); } #if 0 if (sc->sc_pci_devinfo & ATH9K_PCI_NO_PLL_PWRSAVE) { ah->config.no_pll_pwrsave = true; device_printf(sc->sc_dev, "Disable PLL PowerSave\n"); } #endif } /* * Attempt to fetch the MAC address from the kernel environment. * * Returns 0, macaddr in macaddr if successful; -1 otherwise. */ static int ath_fetch_mac_kenv(struct ath_softc *sc, uint8_t *macaddr) { char devid_str[32]; int local_mac = 0; char *local_macstr; /* * Fetch from the kenv rather than using hints. * * Hints would be nice but the transition to dynamic * hints/kenv doesn't happen early enough for this * to work reliably (eg on anything embedded.) */ snprintf(devid_str, 32, "hint.%s.%d.macaddr", device_get_name(sc->sc_dev), device_get_unit(sc->sc_dev)); if ((local_macstr = kern_getenv(devid_str)) != NULL) { uint32_t tmpmac[ETHER_ADDR_LEN]; int count; int i; /* Have a MAC address; should use it */ device_printf(sc->sc_dev, "Overriding MAC address from environment: '%s'\n", local_macstr); /* Extract out the MAC address */ count = sscanf(local_macstr, "%x%*c%x%*c%x%*c%x%*c%x%*c%x", &tmpmac[0], &tmpmac[1], &tmpmac[2], &tmpmac[3], &tmpmac[4], &tmpmac[5]); if (count == 6) { /* Valid! */ local_mac = 1; for (i = 0; i < ETHER_ADDR_LEN; i++) macaddr[i] = tmpmac[i]; } /* Done! */ freeenv(local_macstr); local_macstr = NULL; } if (local_mac) return (0); return (-1); } #define HAL_MODE_HT20 (HAL_MODE_11NG_HT20 | HAL_MODE_11NA_HT20) #define HAL_MODE_HT40 \ (HAL_MODE_11NG_HT40PLUS | HAL_MODE_11NG_HT40MINUS | \ HAL_MODE_11NA_HT40PLUS | HAL_MODE_11NA_HT40MINUS) int ath_attach(u_int16_t devid, struct ath_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ath_hal *ah = NULL; HAL_STATUS status; int error = 0, i; u_int wmodes; int rx_chainmask, tx_chainmask; HAL_OPS_CONFIG ah_config; DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid); ic->ic_softc = sc; ic->ic_name = device_get_nameunit(sc->sc_dev); /* * Configure the initial configuration data. * * This is stuff that may be needed early during attach * rather than done via configuration calls later. */ bzero(&ah_config, sizeof(ah_config)); ath_setup_hal_config(sc, &ah_config); ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, sc->sc_eepromdata, &ah_config, &status); if (ah == NULL) { device_printf(sc->sc_dev, "unable to attach hardware; HAL status %u\n", status); error = ENXIO; goto bad; } sc->sc_ah = ah; sc->sc_invalid = 0; /* ready to go, enable interrupt handling */ #ifdef ATH_DEBUG sc->sc_debug = ath_debug; #endif /* * Force the chip awake during setup, just to keep * the HAL/driver power tracking happy. * * There are some methods (eg ath_hal_setmac()) * that poke the hardware. */ ATH_LOCK(sc); ath_power_setpower(sc, HAL_PM_AWAKE, 1); ATH_UNLOCK(sc); /* * Setup the DMA/EDMA functions based on the current * hardware support. * * This is required before the descriptors are allocated. */ if (ath_hal_hasedma(sc->sc_ah)) { sc->sc_isedma = 1; ath_recv_setup_edma(sc); ath_xmit_setup_edma(sc); } else { ath_recv_setup_legacy(sc); ath_xmit_setup_legacy(sc); } if (ath_hal_hasmybeacon(sc->sc_ah)) { sc->sc_do_mybeacon = 1; } /* * Check if the MAC has multi-rate retry support. * We do this by trying to setup a fake extended * descriptor. MAC's that don't have support will * return false w/o doing anything. MAC's that do * support it will return true w/o doing anything. */ sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0); /* * Check if the device has hardware counters for PHY * errors. If so we need to enable the MIB interrupt * so we can act on stat triggers. */ if (ath_hal_hwphycounters(ah)) sc->sc_needmib = 1; /* * Get the hardware key cache size. */ sc->sc_keymax = ath_hal_keycachesize(ah); if (sc->sc_keymax > ATH_KEYMAX) { device_printf(sc->sc_dev, "Warning, using only %u of %u key cache slots\n", ATH_KEYMAX, sc->sc_keymax); sc->sc_keymax = ATH_KEYMAX; } /* * Reset the key cache since some parts do not * reset the contents on initial power up. */ for (i = 0; i < sc->sc_keymax; i++) ath_hal_keyreset(ah, i); /* * Collect the default channel list. */ error = ath_getchannels(sc); if (error != 0) goto bad; /* * Setup rate tables for all potential media types. */ ath_rate_setup(sc, IEEE80211_MODE_11A); ath_rate_setup(sc, IEEE80211_MODE_11B); ath_rate_setup(sc, IEEE80211_MODE_11G); ath_rate_setup(sc, IEEE80211_MODE_TURBO_A); ath_rate_setup(sc, IEEE80211_MODE_TURBO_G); ath_rate_setup(sc, IEEE80211_MODE_STURBO_A); ath_rate_setup(sc, IEEE80211_MODE_11NA); ath_rate_setup(sc, IEEE80211_MODE_11NG); ath_rate_setup(sc, IEEE80211_MODE_HALF); ath_rate_setup(sc, IEEE80211_MODE_QUARTER); /* NB: setup here so ath_rate_update is happy */ ath_setcurmode(sc, IEEE80211_MODE_11A); /* * Allocate TX descriptors and populate the lists. */ error = ath_desc_alloc(sc); if (error != 0) { device_printf(sc->sc_dev, "failed to allocate TX descriptors: %d\n", error); goto bad; } error = ath_txdma_setup(sc); if (error != 0) { device_printf(sc->sc_dev, "failed to allocate TX descriptors: %d\n", error); goto bad; } /* * Allocate RX descriptors and populate the lists. */ error = ath_rxdma_setup(sc); if (error != 0) { device_printf(sc->sc_dev, "failed to allocate RX descriptors: %d\n", error); goto bad; } callout_init_mtx(&sc->sc_cal_ch, &sc->sc_mtx, 0); callout_init_mtx(&sc->sc_wd_ch, &sc->sc_mtx, 0); ATH_TXBUF_LOCK_INIT(sc); sc->sc_tq = taskqueue_create("ath_taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->sc_tq); taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->sc_dev)); - TASK_INIT(&sc->sc_rxtask, 0, sc->sc_rx.recv_tasklet, sc); + NET_TASK_INIT(&sc->sc_rxtask, 0, sc->sc_rx.recv_tasklet, sc); TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc); TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc); TASK_INIT(&sc->sc_resettask,0, ath_reset_proc, sc); TASK_INIT(&sc->sc_txqtask, 0, ath_txq_sched_tasklet, sc); TASK_INIT(&sc->sc_fataltask, 0, ath_fatal_proc, sc); /* * Allocate hardware transmit queues: one queue for * beacon frames and one data queue for each QoS * priority. Note that the hal handles resetting * these queues at the needed time. * * XXX PS-Poll */ sc->sc_bhalq = ath_beaconq_setup(sc); if (sc->sc_bhalq == (u_int) -1) { device_printf(sc->sc_dev, "unable to setup a beacon xmit queue!\n"); error = EIO; goto bad2; } sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0); if (sc->sc_cabq == NULL) { device_printf(sc->sc_dev, "unable to setup CAB xmit queue!\n"); error = EIO; goto bad2; } /* NB: insure BK queue is the lowest priority h/w queue */ if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) { device_printf(sc->sc_dev, "unable to setup xmit queue for %s traffic!\n", ieee80211_wme_acnames[WME_AC_BK]); error = EIO; goto bad2; } if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) || !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) || !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) { /* * Not enough hardware tx queues to properly do WME; * just punt and assign them all to the same h/w queue. * We could do a better job of this if, for example, * we allocate queues when we switch from station to * AP mode. */ if (sc->sc_ac2q[WME_AC_VI] != NULL) ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]); if (sc->sc_ac2q[WME_AC_BE] != NULL) ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]); sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK]; sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK]; sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK]; } /* * Attach the TX completion function. * * The non-EDMA chips may have some special case optimisations; * this method gives everyone a chance to attach cleanly. */ sc->sc_tx.xmit_attach_comp_func(sc); /* * Setup rate control. Some rate control modules * call back to change the anntena state so expose * the necessary entry points. * XXX maybe belongs in struct ath_ratectrl? */ sc->sc_setdefantenna = ath_setdefantenna; sc->sc_rc = ath_rate_attach(sc); if (sc->sc_rc == NULL) { error = EIO; goto bad2; } /* Attach DFS module */ if (! ath_dfs_attach(sc)) { device_printf(sc->sc_dev, "%s: unable to attach DFS\n", __func__); error = EIO; goto bad2; } /* Attach spectral module */ if (ath_spectral_attach(sc) < 0) { device_printf(sc->sc_dev, "%s: unable to attach spectral\n", __func__); error = EIO; goto bad2; } /* Attach bluetooth coexistence module */ if (ath_btcoex_attach(sc) < 0) { device_printf(sc->sc_dev, "%s: unable to attach bluetooth coexistence\n", __func__); error = EIO; goto bad2; } /* Attach LNA diversity module */ if (ath_lna_div_attach(sc) < 0) { device_printf(sc->sc_dev, "%s: unable to attach LNA diversity\n", __func__); error = EIO; goto bad2; } /* Start DFS processing tasklet */ TASK_INIT(&sc->sc_dfstask, 0, ath_dfs_tasklet, sc); /* Configure LED state */ sc->sc_blinking = 0; sc->sc_ledstate = 1; sc->sc_ledon = 0; /* low true */ sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */ callout_init(&sc->sc_ledtimer, 1); /* * Don't setup hardware-based blinking. * * Although some NICs may have this configured in the * default reset register values, the user may wish * to alter which pins have which function. * * The reference driver attaches the MAC network LED to GPIO1 and * the MAC power LED to GPIO2. However, the DWA-552 cardbus * NIC has these reversed. */ sc->sc_hardled = (1 == 0); sc->sc_led_net_pin = -1; sc->sc_led_pwr_pin = -1; /* * Auto-enable soft led processing for IBM cards and for * 5211 minipci cards. Users can also manually enable/disable * support with a sysctl. */ sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID); ath_led_config(sc); ath_hal_setledstate(ah, HAL_LED_INIT); /* XXX not right but it's not used anywhere important */ ic->ic_phytype = IEEE80211_T_OFDM; ic->ic_opmode = IEEE80211_M_STA; ic->ic_caps = IEEE80211_C_STA /* station mode */ | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ | IEEE80211_C_HOSTAP /* hostap mode */ | IEEE80211_C_MONITOR /* monitor mode */ | IEEE80211_C_AHDEMO /* adhoc demo mode */ | IEEE80211_C_WDS /* 4-address traffic works */ | IEEE80211_C_MBSS /* mesh point link mode */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ #ifndef ATH_ENABLE_11N | IEEE80211_C_BGSCAN /* capable of bg scanning */ #endif | IEEE80211_C_TXFRAG /* handle tx frags */ #ifdef ATH_ENABLE_DFS | IEEE80211_C_DFS /* Enable radar detection */ #endif | IEEE80211_C_PMGT /* Station side power mgmt */ | IEEE80211_C_SWSLEEP ; /* * Query the hal to figure out h/w crypto support. */ if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP)) ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP; if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB)) ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB; if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM)) ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM; if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP)) ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP; if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) { ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP; /* * Check if h/w does the MIC and/or whether the * separate key cache entries are required to * handle both tx+rx MIC keys. */ if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC)) ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; /* * If the h/w supports storing tx+rx MIC keys * in one cache slot automatically enable use. */ if (ath_hal_hastkipsplit(ah) || !ath_hal_settkipsplit(ah, AH_FALSE)) sc->sc_splitmic = 1; /* * If the h/w can do TKIP MIC together with WME then * we use it; otherwise we force the MIC to be done * in software by the net80211 layer. */ if (ath_hal_haswmetkipmic(ah)) sc->sc_wmetkipmic = 1; } sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR); /* * Check for multicast key search support. */ if (ath_hal_hasmcastkeysearch(sc->sc_ah) && !ath_hal_getmcastkeysearch(sc->sc_ah)) { ath_hal_setmcastkeysearch(sc->sc_ah, 1); } sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah); /* * Mark key cache slots associated with global keys * as in use. If we knew TKIP was not to be used we * could leave the +32, +64, and +32+64 slots free. */ for (i = 0; i < IEEE80211_WEP_NKID; i++) { setbit(sc->sc_keymap, i); setbit(sc->sc_keymap, i+64); if (sc->sc_splitmic) { setbit(sc->sc_keymap, i+32); setbit(sc->sc_keymap, i+32+64); } } /* * TPC support can be done either with a global cap or * per-packet support. The latter is not available on * all parts. We're a bit pedantic here as all parts * support a global cap. */ if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah)) ic->ic_caps |= IEEE80211_C_TXPMGT; /* * Mark WME capability only if we have sufficient * hardware queues to do proper priority scheduling. */ if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK]) ic->ic_caps |= IEEE80211_C_WME; /* * Check for misc other capabilities. */ if (ath_hal_hasbursting(ah)) ic->ic_caps |= IEEE80211_C_BURST; sc->sc_hasbmask = ath_hal_hasbssidmask(ah); sc->sc_hasbmatch = ath_hal_hasbssidmatch(ah); sc->sc_hastsfadd = ath_hal_hastsfadjust(ah); sc->sc_rxslink = ath_hal_self_linked_final_rxdesc(ah); /* XXX TODO: just make this a "store tx/rx timestamp length" operation */ if (ath_hal_get_rx_tsf_prec(ah, &i)) { if (i == 32) { sc->sc_rxtsf32 = 1; } if (bootverbose) device_printf(sc->sc_dev, "RX timestamp: %d bits\n", i); } if (ath_hal_get_tx_tsf_prec(ah, &i)) { if (bootverbose) device_printf(sc->sc_dev, "TX timestamp: %d bits\n", i); } sc->sc_hasenforcetxop = ath_hal_hasenforcetxop(ah); sc->sc_rx_lnamixer = ath_hal_hasrxlnamixer(ah); sc->sc_hasdivcomb = ath_hal_hasdivantcomb(ah); /* * Some WB335 cards do not support antenna diversity. Since * we use a hardcoded value for AR9565 instead of using the * EEPROM/OTP data, remove the combining feature from * the HW capabilities bitmap. */ /* * XXX TODO: check reference driver and ath9k for what to do * here for WB335. I think we have to actually disable the * LNA div processing in the HAL and instead use the hard * coded values; and then use BT diversity. * * .. but also need to setup MCI too for WB335.. */ #if 0 if (sc->sc_pci_devinfo & (ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_AR9565_2ANT)) { device_printf(sc->sc_dev, "%s: WB335: disabling LNA mixer diversity\n", __func__); sc->sc_dolnadiv = 0; } #endif if (ath_hal_hasfastframes(ah)) ic->ic_caps |= IEEE80211_C_FF; wmodes = ath_hal_getwirelessmodes(ah); if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO)) ic->ic_caps |= IEEE80211_C_TURBOP; #ifdef IEEE80211_SUPPORT_TDMA if (ath_hal_macversion(ah) > 0x78) { ic->ic_caps |= IEEE80211_C_TDMA; /* capable of TDMA */ ic->ic_tdma_update = ath_tdma_update; } #endif /* * TODO: enforce that at least this many frames are available * in the txbuf list before allowing data frames (raw or * otherwise) to be transmitted. */ sc->sc_txq_data_minfree = 10; /* * Shorten this to 64 packets, or 1/4 ath_txbuf, whichever * is smaller. * * Anything bigger can potentially see the cabq consume * almost all buffers, starving everything else, only to * see most fail to transmit in the given beacon interval. */ sc->sc_txq_mcastq_maxdepth = MIN(64, ath_txbuf / 4); /* * How deep can the node software TX queue get whilst it's asleep. */ sc->sc_txq_node_psq_maxdepth = 16; /* * Default the maximum queue to 1/4'th the TX buffers, or * 64, whichever is smaller. */ sc->sc_txq_node_maxdepth = MIN(64, ath_txbuf / 4); /* Enable CABQ by default */ sc->sc_cabq_enable = 1; /* * Allow the TX and RX chainmasks to be overridden by * environment variables and/or device.hints. * * This must be done early - before the hardware is * calibrated or before the 802.11n stream calculation * is done. */ if (resource_int_value(device_get_name(sc->sc_dev), device_get_unit(sc->sc_dev), "rx_chainmask", &rx_chainmask) == 0) { device_printf(sc->sc_dev, "Setting RX chainmask to 0x%x\n", rx_chainmask); (void) ath_hal_setrxchainmask(sc->sc_ah, rx_chainmask); } if (resource_int_value(device_get_name(sc->sc_dev), device_get_unit(sc->sc_dev), "tx_chainmask", &tx_chainmask) == 0) { device_printf(sc->sc_dev, "Setting TX chainmask to 0x%x\n", tx_chainmask); (void) ath_hal_settxchainmask(sc->sc_ah, tx_chainmask); } /* * Query the TX/RX chainmask configuration. * * This is only relevant for 11n devices. */ ath_hal_getrxchainmask(ah, &sc->sc_rxchainmask); ath_hal_gettxchainmask(ah, &sc->sc_txchainmask); /* * Disable MRR with protected frames by default. * Only 802.11n series NICs can handle this. */ sc->sc_mrrprot = 0; /* XXX should be a capability */ /* * Query the enterprise mode information the HAL. */ if (ath_hal_getcapability(ah, HAL_CAP_ENTERPRISE_MODE, 0, &sc->sc_ent_cfg) == HAL_OK) sc->sc_use_ent = 1; #ifdef ATH_ENABLE_11N /* * Query HT capabilities */ if (ath_hal_getcapability(ah, HAL_CAP_HT, 0, NULL) == HAL_OK && (wmodes & (HAL_MODE_HT20 | HAL_MODE_HT40))) { uint32_t rxs, txs; uint32_t ldpc; device_printf(sc->sc_dev, "[HT] enabling HT modes\n"); sc->sc_mrrprot = 1; /* XXX should be a capability */ ic->ic_htcaps = IEEE80211_HTC_HT /* HT operation */ | IEEE80211_HTC_AMPDU /* A-MPDU tx/rx */ | IEEE80211_HTC_AMSDU /* A-MSDU tx/rx */ | IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */ | IEEE80211_HTCAP_SMPS_OFF; /* SM power save off */ /* * Enable short-GI for HT20 only if the hardware * advertises support. * Notably, anything earlier than the AR9287 doesn't. */ if ((ath_hal_getcapability(ah, HAL_CAP_HT20_SGI, 0, NULL) == HAL_OK) && (wmodes & HAL_MODE_HT20)) { device_printf(sc->sc_dev, "[HT] enabling short-GI in 20MHz mode\n"); ic->ic_htcaps |= IEEE80211_HTCAP_SHORTGI20; } if (wmodes & HAL_MODE_HT40) ic->ic_htcaps |= IEEE80211_HTCAP_CHWIDTH40 | IEEE80211_HTCAP_SHORTGI40; /* * TX/RX streams need to be taken into account when * negotiating which MCS rates it'll receive and * what MCS rates are available for TX. */ (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 0, &txs); (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 1, &rxs); ic->ic_txstream = txs; ic->ic_rxstream = rxs; /* * Setup TX and RX STBC based on what the HAL allows and * the currently configured chainmask set. * Ie - don't enable STBC TX if only one chain is enabled. * STBC RX is fine on a single RX chain; it just won't * provide any real benefit. */ if (ath_hal_getcapability(ah, HAL_CAP_RX_STBC, 0, NULL) == HAL_OK) { sc->sc_rx_stbc = 1; device_printf(sc->sc_dev, "[HT] 1 stream STBC receive enabled\n"); ic->ic_htcaps |= IEEE80211_HTCAP_RXSTBC_1STREAM; } if (txs > 1 && ath_hal_getcapability(ah, HAL_CAP_TX_STBC, 0, NULL) == HAL_OK) { sc->sc_tx_stbc = 1; device_printf(sc->sc_dev, "[HT] 1 stream STBC transmit enabled\n"); ic->ic_htcaps |= IEEE80211_HTCAP_TXSTBC; } (void) ath_hal_getcapability(ah, HAL_CAP_RTS_AGGR_LIMIT, 1, &sc->sc_rts_aggr_limit); if (sc->sc_rts_aggr_limit != (64 * 1024)) device_printf(sc->sc_dev, "[HT] RTS aggregates limited to %d KiB\n", sc->sc_rts_aggr_limit / 1024); /* * LDPC */ if ((ath_hal_getcapability(ah, HAL_CAP_LDPC, 0, &ldpc)) == HAL_OK && (ldpc == 1)) { sc->sc_has_ldpc = 1; device_printf(sc->sc_dev, "[HT] LDPC transmit/receive enabled\n"); ic->ic_htcaps |= IEEE80211_HTCAP_LDPC | IEEE80211_HTC_TXLDPC; } device_printf(sc->sc_dev, "[HT] %d RX streams; %d TX streams\n", rxs, txs); } #endif /* * Initial aggregation settings. */ sc->sc_hwq_limit_aggr = ATH_AGGR_MIN_QDEPTH; sc->sc_hwq_limit_nonaggr = ATH_NONAGGR_MIN_QDEPTH; sc->sc_tid_hwq_lo = ATH_AGGR_SCHED_LOW; sc->sc_tid_hwq_hi = ATH_AGGR_SCHED_HIGH; sc->sc_aggr_limit = ATH_AGGR_MAXSIZE; sc->sc_delim_min_pad = 0; /* * Check if the hardware requires PCI register serialisation. * Some of the Owl based MACs require this. */ if (mp_ncpus > 1 && ath_hal_getcapability(ah, HAL_CAP_SERIALISE_WAR, 0, NULL) == HAL_OK) { sc->sc_ah->ah_config.ah_serialise_reg_war = 1; device_printf(sc->sc_dev, "Enabling register serialisation\n"); } /* * Initialise the deferred completed RX buffer list. */ TAILQ_INIT(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP]); TAILQ_INIT(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP]); /* * Indicate we need the 802.11 header padded to a * 32-bit boundary for 4-address and QoS frames. */ ic->ic_flags |= IEEE80211_F_DATAPAD; /* * Query the hal about antenna support. */ sc->sc_defant = ath_hal_getdefantenna(ah); /* * Not all chips have the VEOL support we want to * use with IBSS beacons; check here for it. */ sc->sc_hasveol = ath_hal_hasveol(ah); /* get mac address from kenv first, then hardware */ if (ath_fetch_mac_kenv(sc, ic->ic_macaddr) == 0) { /* Tell the HAL now about the new MAC */ ath_hal_setmac(ah, ic->ic_macaddr); } else { ath_hal_getmac(ah, ic->ic_macaddr); } if (sc->sc_hasbmask) ath_hal_getbssidmask(ah, sc->sc_hwbssidmask); /* NB: used to size node table key mapping array */ ic->ic_max_keyix = sc->sc_keymax; /* call MI attach routine. */ ieee80211_ifattach(ic); ic->ic_setregdomain = ath_setregdomain; ic->ic_getradiocaps = ath_getradiocaps; sc->sc_opmode = HAL_M_STA; /* override default methods */ ic->ic_ioctl = ath_ioctl; ic->ic_parent = ath_parent; ic->ic_transmit = ath_transmit; ic->ic_newassoc = ath_newassoc; ic->ic_updateslot = ath_updateslot; ic->ic_wme.wme_update = ath_wme_update; ic->ic_vap_create = ath_vap_create; ic->ic_vap_delete = ath_vap_delete; ic->ic_raw_xmit = ath_raw_xmit; ic->ic_update_mcast = ath_update_mcast; ic->ic_update_promisc = ath_update_promisc; ic->ic_node_alloc = ath_node_alloc; sc->sc_node_free = ic->ic_node_free; ic->ic_node_free = ath_node_free; sc->sc_node_cleanup = ic->ic_node_cleanup; ic->ic_node_cleanup = ath_node_cleanup; ic->ic_node_getsignal = ath_node_getsignal; ic->ic_scan_start = ath_scan_start; ic->ic_scan_end = ath_scan_end; ic->ic_set_channel = ath_set_channel; #ifdef ATH_ENABLE_11N /* 802.11n specific - but just override anyway */ sc->sc_addba_request = ic->ic_addba_request; sc->sc_addba_response = ic->ic_addba_response; sc->sc_addba_stop = ic->ic_addba_stop; sc->sc_bar_response = ic->ic_bar_response; sc->sc_addba_response_timeout = ic->ic_addba_response_timeout; ic->ic_addba_request = ath_addba_request; ic->ic_addba_response = ath_addba_response; ic->ic_addba_response_timeout = ath_addba_response_timeout; ic->ic_addba_stop = ath_addba_stop; ic->ic_bar_response = ath_bar_response; ic->ic_update_chw = ath_update_chw; #endif /* ATH_ENABLE_11N */ ic->ic_set_quiet = ath_set_quiet_ie; #ifdef ATH_ENABLE_RADIOTAP_VENDOR_EXT /* * There's one vendor bitmap entry in the RX radiotap * header; make sure that's taken into account. */ ieee80211_radiotap_attachv(ic, &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 0, ATH_TX_RADIOTAP_PRESENT, &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 1, ATH_RX_RADIOTAP_PRESENT); #else /* * No vendor bitmap/extensions are present. */ ieee80211_radiotap_attach(ic, &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), ATH_TX_RADIOTAP_PRESENT, &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), ATH_RX_RADIOTAP_PRESENT); #endif /* ATH_ENABLE_RADIOTAP_VENDOR_EXT */ /* * Setup the ALQ logging if required */ #ifdef ATH_DEBUG_ALQ if_ath_alq_init(&sc->sc_alq, device_get_nameunit(sc->sc_dev)); if_ath_alq_setcfg(&sc->sc_alq, sc->sc_ah->ah_macVersion, sc->sc_ah->ah_macRev, sc->sc_ah->ah_phyRev, sc->sc_ah->ah_magic); #endif /* * Setup dynamic sysctl's now that country code and * regdomain are available from the hal. */ ath_sysctlattach(sc); ath_sysctl_stats_attach(sc); ath_sysctl_hal_attach(sc); if (bootverbose) ieee80211_announce(ic); ath_announce(sc); /* * Put it to sleep for now. */ ATH_LOCK(sc); ath_power_setpower(sc, HAL_PM_FULL_SLEEP, 1); ATH_UNLOCK(sc); return 0; bad2: ath_tx_cleanup(sc); ath_desc_free(sc); ath_txdma_teardown(sc); ath_rxdma_teardown(sc); bad: if (ah) ath_hal_detach(ah); sc->sc_invalid = 1; return error; } int ath_detach(struct ath_softc *sc) { /* * NB: the order of these is important: * o stop the chip so no more interrupts will fire * o call the 802.11 layer before detaching the hal to * insure callbacks into the driver to delete global * key cache entries can be handled * o free the taskqueue which drains any pending tasks * o reclaim the tx queue data structures after calling * the 802.11 layer as we'll get called back to reclaim * node state and potentially want to use them * o to cleanup the tx queues the hal is called, so detach * it last * Other than that, it's straightforward... */ /* * XXX Wake the hardware up first. ath_stop() will still * wake it up first, but I'd rather do it here just to * ensure it's awake. */ ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ath_power_setpower(sc, HAL_PM_AWAKE, 1); /* * Stop things cleanly. */ ath_stop(sc); ATH_UNLOCK(sc); ieee80211_ifdetach(&sc->sc_ic); taskqueue_free(sc->sc_tq); #ifdef ATH_TX99_DIAG if (sc->sc_tx99 != NULL) sc->sc_tx99->detach(sc->sc_tx99); #endif ath_rate_detach(sc->sc_rc); #ifdef ATH_DEBUG_ALQ if_ath_alq_tidyup(&sc->sc_alq); #endif ath_lna_div_detach(sc); ath_btcoex_detach(sc); ath_spectral_detach(sc); ath_dfs_detach(sc); ath_desc_free(sc); ath_txdma_teardown(sc); ath_rxdma_teardown(sc); ath_tx_cleanup(sc); ath_hal_detach(sc->sc_ah); /* NB: sets chip in full sleep */ return 0; } /* * MAC address handling for multiple BSS on the same radio. * The first vap uses the MAC address from the EEPROM. For * subsequent vap's we set the U/L bit (bit 1) in the MAC * address and use the next six bits as an index. */ static void assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone) { int i; if (clone && sc->sc_hasbmask) { /* NB: we only do this if h/w supports multiple bssid */ for (i = 0; i < 8; i++) if ((sc->sc_bssidmask & (1<sc_bssidmask |= 1<sc_hwbssidmask[0] &= ~mac[0]; if (i == 0) sc->sc_nbssid0++; } static void reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN]) { int i = mac[0] >> 2; uint8_t mask; if (i != 0 || --sc->sc_nbssid0 == 0) { sc->sc_bssidmask &= ~(1<sc_bssidmask & (1<sc_hwbssidmask[0] |= mask; } } /* * Assign a beacon xmit slot. We try to space out * assignments so when beacons are staggered the * traffic coming out of the cab q has maximal time * to go out before the next beacon is scheduled. */ static int assign_bslot(struct ath_softc *sc) { u_int slot, free; free = 0; for (slot = 0; slot < ATH_BCBUF; slot++) if (sc->sc_bslot[slot] == NULL) { if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL && sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL) return slot; free = slot; /* NB: keep looking for a double slot */ } return free; } static struct ieee80211vap * ath_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac0[IEEE80211_ADDR_LEN]) { struct ath_softc *sc = ic->ic_softc; struct ath_vap *avp; struct ieee80211vap *vap; uint8_t mac[IEEE80211_ADDR_LEN]; int needbeacon, error; enum ieee80211_opmode ic_opmode; avp = malloc(sizeof(struct ath_vap), M_80211_VAP, M_WAITOK | M_ZERO); needbeacon = 0; IEEE80211_ADDR_COPY(mac, mac0); ATH_LOCK(sc); ic_opmode = opmode; /* default to opmode of new vap */ switch (opmode) { case IEEE80211_M_STA: if (sc->sc_nstavaps != 0) { /* XXX only 1 for now */ device_printf(sc->sc_dev, "only 1 sta vap supported\n"); goto bad; } if (sc->sc_nvaps) { /* * With multiple vaps we must fall back * to s/w beacon miss handling. */ flags |= IEEE80211_CLONE_NOBEACONS; } if (flags & IEEE80211_CLONE_NOBEACONS) { /* * Station mode w/o beacons are implemented w/ AP mode. */ ic_opmode = IEEE80211_M_HOSTAP; } break; case IEEE80211_M_IBSS: if (sc->sc_nvaps != 0) { /* XXX only 1 for now */ device_printf(sc->sc_dev, "only 1 ibss vap supported\n"); goto bad; } needbeacon = 1; break; case IEEE80211_M_AHDEMO: #ifdef IEEE80211_SUPPORT_TDMA if (flags & IEEE80211_CLONE_TDMA) { if (sc->sc_nvaps != 0) { device_printf(sc->sc_dev, "only 1 tdma vap supported\n"); goto bad; } needbeacon = 1; flags |= IEEE80211_CLONE_NOBEACONS; } /* fall thru... */ #endif case IEEE80211_M_MONITOR: if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) { /* * Adopt existing mode. Adding a monitor or ahdemo * vap to an existing configuration is of dubious * value but should be ok. */ /* XXX not right for monitor mode */ ic_opmode = ic->ic_opmode; } break; case IEEE80211_M_HOSTAP: case IEEE80211_M_MBSS: needbeacon = 1; break; case IEEE80211_M_WDS: if (sc->sc_nvaps != 0 && ic->ic_opmode == IEEE80211_M_STA) { device_printf(sc->sc_dev, "wds not supported in sta mode\n"); goto bad; } /* * Silently remove any request for a unique * bssid; WDS vap's always share the local * mac address. */ flags &= ~IEEE80211_CLONE_BSSID; if (sc->sc_nvaps == 0) ic_opmode = IEEE80211_M_HOSTAP; else ic_opmode = ic->ic_opmode; break; default: device_printf(sc->sc_dev, "unknown opmode %d\n", opmode); goto bad; } /* * Check that a beacon buffer is available; the code below assumes it. */ if (needbeacon & TAILQ_EMPTY(&sc->sc_bbuf)) { device_printf(sc->sc_dev, "no beacon buffer available\n"); goto bad; } /* STA, AHDEMO? */ if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS || opmode == IEEE80211_M_STA) { assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID); ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); } vap = &avp->av_vap; /* XXX can't hold mutex across if_alloc */ ATH_UNLOCK(sc); error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); ATH_LOCK(sc); if (error != 0) { device_printf(sc->sc_dev, "%s: error %d creating vap\n", __func__, error); goto bad2; } /* h/w crypto support */ vap->iv_key_alloc = ath_key_alloc; vap->iv_key_delete = ath_key_delete; vap->iv_key_set = ath_key_set; vap->iv_key_update_begin = ath_key_update_begin; vap->iv_key_update_end = ath_key_update_end; /* override various methods */ avp->av_recv_mgmt = vap->iv_recv_mgmt; vap->iv_recv_mgmt = ath_recv_mgmt; vap->iv_reset = ath_reset_vap; vap->iv_update_beacon = ath_beacon_update; avp->av_newstate = vap->iv_newstate; vap->iv_newstate = ath_newstate; avp->av_bmiss = vap->iv_bmiss; vap->iv_bmiss = ath_bmiss_vap; avp->av_node_ps = vap->iv_node_ps; vap->iv_node_ps = ath_node_powersave; avp->av_set_tim = vap->iv_set_tim; vap->iv_set_tim = ath_node_set_tim; avp->av_recv_pspoll = vap->iv_recv_pspoll; vap->iv_recv_pspoll = ath_node_recv_pspoll; /* Set default parameters */ /* * Anything earlier than some AR9300 series MACs don't * support a smaller MPDU density. */ vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_8; /* * All NICs can handle the maximum size, however * AR5416 based MACs can only TX aggregates w/ RTS * protection when the total aggregate size is <= 8k. * However, for now that's enforced by the TX path. */ vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K; vap->iv_ampdu_limit = IEEE80211_HTCAP_MAXRXAMPDU_64K; avp->av_bslot = -1; if (needbeacon) { /* * Allocate beacon state and setup the q for buffered * multicast frames. We know a beacon buffer is * available because we checked above. */ avp->av_bcbuf = TAILQ_FIRST(&sc->sc_bbuf); TAILQ_REMOVE(&sc->sc_bbuf, avp->av_bcbuf, bf_list); if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) { /* * Assign the vap to a beacon xmit slot. As above * this cannot fail to find a free one. */ avp->av_bslot = assign_bslot(sc); KASSERT(sc->sc_bslot[avp->av_bslot] == NULL, ("beacon slot %u not empty", avp->av_bslot)); sc->sc_bslot[avp->av_bslot] = vap; sc->sc_nbcnvaps++; } if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) { /* * Multple vaps are to transmit beacons and we * have h/w support for TSF adjusting; enable * use of staggered beacons. */ sc->sc_stagbeacons = 1; } ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ); } ic->ic_opmode = ic_opmode; if (opmode != IEEE80211_M_WDS) { sc->sc_nvaps++; if (opmode == IEEE80211_M_STA) sc->sc_nstavaps++; if (opmode == IEEE80211_M_MBSS) sc->sc_nmeshvaps++; } switch (ic_opmode) { case IEEE80211_M_IBSS: sc->sc_opmode = HAL_M_IBSS; break; case IEEE80211_M_STA: sc->sc_opmode = HAL_M_STA; break; case IEEE80211_M_AHDEMO: #ifdef IEEE80211_SUPPORT_TDMA if (vap->iv_caps & IEEE80211_C_TDMA) { sc->sc_tdma = 1; /* NB: disable tsf adjust */ sc->sc_stagbeacons = 0; } /* * NB: adhoc demo mode is a pseudo mode; to the hal it's * just ap mode. */ /* fall thru... */ #endif case IEEE80211_M_HOSTAP: case IEEE80211_M_MBSS: sc->sc_opmode = HAL_M_HOSTAP; break; case IEEE80211_M_MONITOR: sc->sc_opmode = HAL_M_MONITOR; break; default: /* XXX should not happen */ break; } if (sc->sc_hastsfadd) { /* * Configure whether or not TSF adjust should be done. */ ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons); } if (flags & IEEE80211_CLONE_NOBEACONS) { /* * Enable s/w beacon miss handling. */ sc->sc_swbmiss = 1; } ATH_UNLOCK(sc); /* complete setup */ ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status, mac); return vap; bad2: reclaim_address(sc, mac); ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); bad: free(avp, M_80211_VAP); ATH_UNLOCK(sc); return NULL; } static void ath_vap_delete(struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; struct ath_softc *sc = ic->ic_softc; struct ath_hal *ah = sc->sc_ah; struct ath_vap *avp = ATH_VAP(vap); ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); if (sc->sc_running) { /* * Quiesce the hardware while we remove the vap. In * particular we need to reclaim all references to * the vap state by any frames pending on the tx queues. */ ath_hal_intrset(ah, 0); /* disable interrupts */ /* XXX Do all frames from all vaps/nodes need draining here? */ ath_stoprecv(sc, 1); /* stop recv side */ ath_draintxq(sc, ATH_RESET_DEFAULT); /* stop hw xmit side */ } /* .. leave the hardware awake for now. */ ieee80211_vap_detach(vap); /* * XXX Danger Will Robinson! Danger! * * Because ieee80211_vap_detach() can queue a frame (the station * diassociate message?) after we've drained the TXQ and * flushed the software TXQ, we will end up with a frame queued * to a node whose vap is about to be freed. * * To work around this, flush the hardware/software again. * This may be racy - the ath task may be running and the packet * may be being scheduled between sw->hw txq. Tsk. * * TODO: figure out why a new node gets allocated somewhere around * here (after the ath_tx_swq() call; and after an ath_stop() * call!) */ ath_draintxq(sc, ATH_RESET_DEFAULT); ATH_LOCK(sc); /* * Reclaim beacon state. Note this must be done before * the vap instance is reclaimed as we may have a reference * to it in the buffer for the beacon frame. */ if (avp->av_bcbuf != NULL) { if (avp->av_bslot != -1) { sc->sc_bslot[avp->av_bslot] = NULL; sc->sc_nbcnvaps--; } ath_beacon_return(sc, avp->av_bcbuf); avp->av_bcbuf = NULL; if (sc->sc_nbcnvaps == 0) { sc->sc_stagbeacons = 0; if (sc->sc_hastsfadd) ath_hal_settsfadjust(sc->sc_ah, 0); } /* * Reclaim any pending mcast frames for the vap. */ ath_tx_draintxq(sc, &avp->av_mcastq); } /* * Update bookkeeping. */ if (vap->iv_opmode == IEEE80211_M_STA) { sc->sc_nstavaps--; if (sc->sc_nstavaps == 0 && sc->sc_swbmiss) sc->sc_swbmiss = 0; } else if (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_STA || vap->iv_opmode == IEEE80211_M_MBSS) { reclaim_address(sc, vap->iv_myaddr); ath_hal_setbssidmask(ah, sc->sc_hwbssidmask); if (vap->iv_opmode == IEEE80211_M_MBSS) sc->sc_nmeshvaps--; } if (vap->iv_opmode != IEEE80211_M_WDS) sc->sc_nvaps--; #ifdef IEEE80211_SUPPORT_TDMA /* TDMA operation ceases when the last vap is destroyed */ if (sc->sc_tdma && sc->sc_nvaps == 0) { sc->sc_tdma = 0; sc->sc_swbmiss = 0; } #endif free(avp, M_80211_VAP); if (sc->sc_running) { /* * Restart rx+tx machines if still running (RUNNING will * be reset if we just destroyed the last vap). */ if (ath_startrecv(sc) != 0) device_printf(sc->sc_dev, "%s: unable to restart recv logic\n", __func__); if (sc->sc_beacons) { /* restart beacons */ #ifdef IEEE80211_SUPPORT_TDMA if (sc->sc_tdma) ath_tdma_config(sc, NULL); else #endif ath_beacon_config(sc, NULL); } ath_hal_intrset(ah, sc->sc_imask); } /* Ok, let the hardware asleep. */ ath_power_restore_power_state(sc); ATH_UNLOCK(sc); } void ath_suspend(struct ath_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; sc->sc_resume_up = ic->ic_nrunning != 0; ieee80211_suspend_all(ic); /* * NB: don't worry about putting the chip in low power * mode; pci will power off our socket on suspend and * CardBus detaches the device. * * XXX TODO: well, that's great, except for non-cardbus * devices! */ /* * XXX This doesn't wait until all pending taskqueue * items and parallel transmit/receive/other threads * are running! */ ath_hal_intrset(sc->sc_ah, 0); taskqueue_block(sc->sc_tq); ATH_LOCK(sc); callout_stop(&sc->sc_cal_ch); ATH_UNLOCK(sc); /* * XXX ensure sc_invalid is 1 */ /* Disable the PCIe PHY, complete with workarounds */ ath_hal_enablepcie(sc->sc_ah, 1, 1); } /* * Reset the key cache since some parts do not reset the * contents on resume. First we clear all entries, then * re-load keys that the 802.11 layer assumes are setup * in h/w. */ static void ath_reset_keycache(struct ath_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ath_hal *ah = sc->sc_ah; int i; ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); for (i = 0; i < sc->sc_keymax; i++) ath_hal_keyreset(ah, i); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); ieee80211_crypto_reload_keys(ic); } /* * Fetch the current chainmask configuration based on the current * operating channel and options. */ static void ath_update_chainmasks(struct ath_softc *sc, struct ieee80211_channel *chan) { /* * Set TX chainmask to the currently configured chainmask; * the TX chainmask depends upon the current operating mode. */ sc->sc_cur_rxchainmask = sc->sc_rxchainmask; if (IEEE80211_IS_CHAN_HT(chan)) { sc->sc_cur_txchainmask = sc->sc_txchainmask; } else { sc->sc_cur_txchainmask = 1; } DPRINTF(sc, ATH_DEBUG_RESET, "%s: TX chainmask is now 0x%x, RX is now 0x%x\n", __func__, sc->sc_cur_txchainmask, sc->sc_cur_rxchainmask); } void ath_resume(struct ath_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ath_hal *ah = sc->sc_ah; HAL_STATUS status; ath_hal_enablepcie(ah, 0, 0); /* * Must reset the chip before we reload the * keycache as we were powered down on suspend. */ ath_update_chainmasks(sc, sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan); ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask, sc->sc_cur_rxchainmask); /* Ensure we set the current power state to on */ ATH_LOCK(sc); ath_power_setselfgen(sc, HAL_PM_AWAKE); ath_power_set_power_state(sc, HAL_PM_AWAKE); ath_power_setpower(sc, HAL_PM_AWAKE, 1); ATH_UNLOCK(sc); ath_hal_reset(ah, sc->sc_opmode, sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan, AH_FALSE, HAL_RESET_NORMAL, &status); ath_reset_keycache(sc); ATH_RX_LOCK(sc); sc->sc_rx_stopped = 1; sc->sc_rx_resetted = 1; ATH_RX_UNLOCK(sc); /* Let DFS at it in case it's a DFS channel */ ath_dfs_radar_enable(sc, ic->ic_curchan); /* Let spectral at in case spectral is enabled */ ath_spectral_enable(sc, ic->ic_curchan); /* * Let bluetooth coexistence at in case it's needed for this channel */ ath_btcoex_enable(sc, ic->ic_curchan); /* * If we're doing TDMA, enforce the TXOP limitation for chips that * support it. */ if (sc->sc_hasenforcetxop && sc->sc_tdma) ath_hal_setenforcetxop(sc->sc_ah, 1); else ath_hal_setenforcetxop(sc->sc_ah, 0); /* Restore the LED configuration */ ath_led_config(sc); ath_hal_setledstate(ah, HAL_LED_INIT); if (sc->sc_resume_up) ieee80211_resume_all(ic); ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); /* XXX beacons ? */ } void ath_shutdown(struct ath_softc *sc) { ATH_LOCK(sc); ath_stop(sc); ATH_UNLOCK(sc); /* NB: no point powering down chip as we're about to reboot */ } /* * Interrupt handler. Most of the actual processing is deferred. */ void ath_intr(void *arg) { struct ath_softc *sc = arg; struct ath_hal *ah = sc->sc_ah; HAL_INT status = 0; uint32_t txqs; /* * If we're inside a reset path, just print a warning and * clear the ISR. The reset routine will finish it for us. */ ATH_PCU_LOCK(sc); if (sc->sc_inreset_cnt) { HAL_INT status; ath_hal_getisr(ah, &status); /* clear ISR */ ath_hal_intrset(ah, 0); /* disable further intr's */ DPRINTF(sc, ATH_DEBUG_ANY, "%s: in reset, ignoring: status=0x%x\n", __func__, status); ATH_PCU_UNLOCK(sc); return; } if (sc->sc_invalid) { /* * The hardware is not ready/present, don't touch anything. * Note this can happen early on if the IRQ is shared. */ DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__); ATH_PCU_UNLOCK(sc); return; } if (!ath_hal_intrpend(ah)) { /* shared irq, not for us */ ATH_PCU_UNLOCK(sc); return; } ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); if (sc->sc_ic.ic_nrunning == 0 && sc->sc_running == 0) { HAL_INT status; DPRINTF(sc, ATH_DEBUG_ANY, "%s: ic_nrunning %d sc_running %d\n", __func__, sc->sc_ic.ic_nrunning, sc->sc_running); ath_hal_getisr(ah, &status); /* clear ISR */ ath_hal_intrset(ah, 0); /* disable further intr's */ ATH_PCU_UNLOCK(sc); ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); return; } /* * Figure out the reason(s) for the interrupt. Note * that the hal returns a pseudo-ISR that may include * bits we haven't explicitly enabled so we mask the * value to insure we only process bits we requested. */ ath_hal_getisr(ah, &status); /* NB: clears ISR too */ DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status); ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, "ath_intr: mask=0x%.8x", status); #ifdef ATH_DEBUG_ALQ if_ath_alq_post_intr(&sc->sc_alq, status, ah->ah_intrstate, ah->ah_syncstate); #endif /* ATH_DEBUG_ALQ */ #ifdef ATH_KTR_INTR_DEBUG ATH_KTR(sc, ATH_KTR_INTERRUPTS, 5, "ath_intr: ISR=0x%.8x, ISR_S0=0x%.8x, ISR_S1=0x%.8x, ISR_S2=0x%.8x, ISR_S5=0x%.8x", ah->ah_intrstate[0], ah->ah_intrstate[1], ah->ah_intrstate[2], ah->ah_intrstate[3], ah->ah_intrstate[6]); #endif /* Squirrel away SYNC interrupt debugging */ if (ah->ah_syncstate != 0) { int i; for (i = 0; i < 32; i++) if (ah->ah_syncstate & (1 << i)) sc->sc_intr_stats.sync_intr[i]++; } status &= sc->sc_imask; /* discard unasked for bits */ /* Short-circuit un-handled interrupts */ if (status == 0x0) { ATH_PCU_UNLOCK(sc); ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); return; } /* * Take a note that we're inside the interrupt handler, so * the reset routines know to wait. */ sc->sc_intr_cnt++; ATH_PCU_UNLOCK(sc); /* * Handle the interrupt. We won't run concurrent with the reset * or channel change routines as they'll wait for sc_intr_cnt * to be 0 before continuing. */ if (status & HAL_INT_FATAL) { sc->sc_stats.ast_hardware++; ath_hal_intrset(ah, 0); /* disable intr's until reset */ taskqueue_enqueue(sc->sc_tq, &sc->sc_fataltask); } else { if (status & HAL_INT_SWBA) { /* * Software beacon alert--time to send a beacon. * Handle beacon transmission directly; deferring * this is too slow to meet timing constraints * under load. */ #ifdef IEEE80211_SUPPORT_TDMA if (sc->sc_tdma) { if (sc->sc_tdmaswba == 0) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); ath_tdma_beacon_send(sc, vap); sc->sc_tdmaswba = vap->iv_tdma->tdma_bintval; } else sc->sc_tdmaswba--; } else #endif { ath_beacon_proc(sc, 0); #ifdef IEEE80211_SUPPORT_SUPERG /* * Schedule the rx taskq in case there's no * traffic so any frames held on the staging * queue are aged and potentially flushed. */ sc->sc_rx.recv_sched(sc, 1); #endif } } if (status & HAL_INT_RXEOL) { int imask; ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_intr: RXEOL"); if (! sc->sc_isedma) { ATH_PCU_LOCK(sc); /* * NB: the hardware should re-read the link when * RXE bit is written, but it doesn't work at * least on older hardware revs. */ sc->sc_stats.ast_rxeol++; /* * Disable RXEOL/RXORN - prevent an interrupt * storm until the PCU logic can be reset. * In case the interface is reset some other * way before "sc_kickpcu" is called, don't * modify sc_imask - that way if it is reset * by a call to ath_reset() somehow, the * interrupt mask will be correctly reprogrammed. */ imask = sc->sc_imask; imask &= ~(HAL_INT_RXEOL | HAL_INT_RXORN); ath_hal_intrset(ah, imask); /* * Only blank sc_rxlink if we've not yet kicked * the PCU. * * This isn't entirely correct - the correct solution * would be to have a PCU lock and engage that for * the duration of the PCU fiddling; which would include * running the RX process. Otherwise we could end up * messing up the RX descriptor chain and making the * RX desc list much shorter. */ if (! sc->sc_kickpcu) sc->sc_rxlink = NULL; sc->sc_kickpcu = 1; ATH_PCU_UNLOCK(sc); } /* * Enqueue an RX proc to handle whatever * is in the RX queue. * This will then kick the PCU if required. */ sc->sc_rx.recv_sched(sc, 1); } if (status & HAL_INT_TXURN) { sc->sc_stats.ast_txurn++; /* bump tx trigger level */ ath_hal_updatetxtriglevel(ah, AH_TRUE); } /* * Handle both the legacy and RX EDMA interrupt bits. * Note that HAL_INT_RXLP is also HAL_INT_RXDESC. */ if (status & (HAL_INT_RX | HAL_INT_RXHP | HAL_INT_RXLP)) { sc->sc_stats.ast_rx_intr++; sc->sc_rx.recv_sched(sc, 1); } if (status & HAL_INT_TX) { sc->sc_stats.ast_tx_intr++; /* * Grab all the currently set bits in the HAL txq bitmap * and blank them. This is the only place we should be * doing this. */ if (! sc->sc_isedma) { ATH_PCU_LOCK(sc); txqs = 0xffffffff; ath_hal_gettxintrtxqs(sc->sc_ah, &txqs); ATH_KTR(sc, ATH_KTR_INTERRUPTS, 3, "ath_intr: TX; txqs=0x%08x, txq_active was 0x%08x, now 0x%08x", txqs, sc->sc_txq_active, sc->sc_txq_active | txqs); sc->sc_txq_active |= txqs; ATH_PCU_UNLOCK(sc); } taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask); } if (status & HAL_INT_BMISS) { sc->sc_stats.ast_bmiss++; taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask); } if (status & HAL_INT_GTT) sc->sc_stats.ast_tx_timeout++; if (status & HAL_INT_CST) sc->sc_stats.ast_tx_cst++; if (status & HAL_INT_MIB) { sc->sc_stats.ast_mib++; ATH_PCU_LOCK(sc); /* * Disable interrupts until we service the MIB * interrupt; otherwise it will continue to fire. */ ath_hal_intrset(ah, 0); /* * Let the hal handle the event. We assume it will * clear whatever condition caused the interrupt. */ ath_hal_mibevent(ah, &sc->sc_halstats); /* * Don't reset the interrupt if we've just * kicked the PCU, or we may get a nested * RXEOL before the rxproc has had a chance * to run. */ if (sc->sc_kickpcu == 0) ath_hal_intrset(ah, sc->sc_imask); ATH_PCU_UNLOCK(sc); } if (status & HAL_INT_RXORN) { /* NB: hal marks HAL_INT_FATAL when RXORN is fatal */ ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_intr: RXORN"); sc->sc_stats.ast_rxorn++; } if (status & HAL_INT_TSFOOR) { /* out of range beacon - wake the chip up, * but don't modify self-gen frame config */ device_printf(sc->sc_dev, "%s: TSFOOR\n", __func__); sc->sc_syncbeacon = 1; ATH_LOCK(sc); ath_power_setpower(sc, HAL_PM_AWAKE, 0); ATH_UNLOCK(sc); } if (status & HAL_INT_MCI) { ath_btcoex_mci_intr(sc); } } ATH_PCU_LOCK(sc); sc->sc_intr_cnt--; ATH_PCU_UNLOCK(sc); ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); } static void ath_fatal_proc(void *arg, int pending) { struct ath_softc *sc = arg; u_int32_t *state; u_int32_t len; void *sp; if (sc->sc_invalid) return; device_printf(sc->sc_dev, "hardware error; resetting\n"); /* * Fatal errors are unrecoverable. Typically these * are caused by DMA errors. Collect h/w state from * the hal so we can diagnose what's going on. */ if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) { KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len)); state = sp; device_printf(sc->sc_dev, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n", state[0], state[1] , state[2], state[3], state[4], state[5]); } ath_reset(sc, ATH_RESET_NOLOSS); } static void ath_bmiss_vap(struct ieee80211vap *vap) { struct ath_softc *sc = vap->iv_ic->ic_softc; /* * Workaround phantom bmiss interrupts by sanity-checking * the time of our last rx'd frame. If it is within the * beacon miss interval then ignore the interrupt. If it's * truly a bmiss we'll get another interrupt soon and that'll * be dispatched up for processing. Note this applies only * for h/w beacon miss events. */ /* * XXX TODO: Just read the TSF during the interrupt path; * that way we don't have to wake up again just to read it * again. */ ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) { u_int64_t lastrx = sc->sc_lastrx; u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah); /* XXX should take a locked ref to iv_bss */ u_int bmisstimeout = vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024; DPRINTF(sc, ATH_DEBUG_BEACON, "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n", __func__, (unsigned long long) tsf, (unsigned long long)(tsf - lastrx), (unsigned long long) lastrx, bmisstimeout); if (tsf - lastrx <= bmisstimeout) { sc->sc_stats.ast_bmiss_phantom++; ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); return; } } /* * Keep the hardware awake if it's asleep (and leave self-gen * frame config alone) until the next beacon, so we can resync * against the next beacon. * * This handles three common beacon miss cases in STA powersave mode - * (a) the beacon TBTT isnt a multiple of bintval; * (b) the beacon was missed; and * (c) the beacons are being delayed because the AP is busy and * isn't reliably able to meet its TBTT. */ ATH_LOCK(sc); ath_power_setpower(sc, HAL_PM_AWAKE, 0); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); DPRINTF(sc, ATH_DEBUG_BEACON, "%s: forced awake; force syncbeacon=1\n", __func__); /* * Attempt to force a beacon resync. */ sc->sc_syncbeacon = 1; ATH_VAP(vap)->av_bmiss(vap); } /* XXX this needs a force wakeup! */ int ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs) { uint32_t rsize; void *sp; if (!ath_hal_getdiagstate(ah, HAL_DIAG_CHECK_HANGS, &mask, sizeof(mask), &sp, &rsize)) return 0; KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize)); *hangs = *(uint32_t *)sp; return 1; } static void ath_bmiss_proc(void *arg, int pending) { struct ath_softc *sc = arg; uint32_t hangs; DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending); ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); ath_beacon_miss(sc); /* * Do a reset upon any becaon miss event. * * It may be a non-recognised RX clear hang which needs a reset * to clear. */ if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) { ath_reset(sc, ATH_RESET_NOLOSS); device_printf(sc->sc_dev, "bb hang detected (0x%x), resetting\n", hangs); } else { ath_reset(sc, ATH_RESET_NOLOSS); ieee80211_beacon_miss(&sc->sc_ic); } /* Force a beacon resync, in case they've drifted */ sc->sc_syncbeacon = 1; ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); } /* * Handle TKIP MIC setup to deal hardware that doesn't do MIC * calcs together with WME. If necessary disable the crypto * hardware and mark the 802.11 state so keys will be setup * with the MIC work done in software. */ static void ath_settkipmic(struct ath_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) { if (ic->ic_flags & IEEE80211_F_WME) { ath_hal_settkipmic(sc->sc_ah, AH_FALSE); ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC; } else { ath_hal_settkipmic(sc->sc_ah, AH_TRUE); ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; } } } static void ath_vap_clear_quiet_ie(struct ath_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap; struct ath_vap *avp; TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { avp = ATH_VAP(vap); /* Quiet time handling - ensure we resync */ memset(&avp->quiet_ie, 0, sizeof(avp->quiet_ie)); } } static int ath_init(struct ath_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ath_hal *ah = sc->sc_ah; HAL_STATUS status; ATH_LOCK_ASSERT(sc); /* * Force the sleep state awake. */ ath_power_setselfgen(sc, HAL_PM_AWAKE); ath_power_set_power_state(sc, HAL_PM_AWAKE); ath_power_setpower(sc, HAL_PM_AWAKE, 1); /* * Stop anything previously setup. This is safe * whether this is the first time through or not. */ ath_stop(sc); /* * The basic interface to setting the hardware in a good * state is ``reset''. On return the hardware is known to * be powered up and with interrupts disabled. This must * be followed by initialization of the appropriate bits * and then setup of the interrupt mask. */ ath_settkipmic(sc); ath_update_chainmasks(sc, ic->ic_curchan); ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask, sc->sc_cur_rxchainmask); if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE, HAL_RESET_NORMAL, &status)) { device_printf(sc->sc_dev, "unable to reset hardware; hal status %u\n", status); return (ENODEV); } ATH_RX_LOCK(sc); sc->sc_rx_stopped = 1; sc->sc_rx_resetted = 1; ATH_RX_UNLOCK(sc); /* Clear quiet IE state for each VAP */ ath_vap_clear_quiet_ie(sc); ath_chan_change(sc, ic->ic_curchan); /* Let DFS at it in case it's a DFS channel */ ath_dfs_radar_enable(sc, ic->ic_curchan); /* Let spectral at in case spectral is enabled */ ath_spectral_enable(sc, ic->ic_curchan); /* * Let bluetooth coexistence at in case it's needed for this channel */ ath_btcoex_enable(sc, ic->ic_curchan); /* * If we're doing TDMA, enforce the TXOP limitation for chips that * support it. */ if (sc->sc_hasenforcetxop && sc->sc_tdma) ath_hal_setenforcetxop(sc->sc_ah, 1); else ath_hal_setenforcetxop(sc->sc_ah, 0); /* * Likewise this is set during reset so update * state cached in the driver. */ sc->sc_diversity = ath_hal_getdiversity(ah); sc->sc_lastlongcal = ticks; sc->sc_resetcal = 1; sc->sc_lastcalreset = 0; sc->sc_lastani = ticks; sc->sc_lastshortcal = ticks; sc->sc_doresetcal = AH_FALSE; /* * Beacon timers were cleared here; give ath_newstate() * a hint that the beacon timers should be poked when * things transition to the RUN state. */ sc->sc_beacons = 0; /* * Setup the hardware after reset: the key cache * is filled as needed and the receive engine is * set going. Frame transmit is handled entirely * in the frame output path; there's nothing to do * here except setup the interrupt mask. */ if (ath_startrecv(sc) != 0) { device_printf(sc->sc_dev, "unable to start recv logic\n"); ath_power_restore_power_state(sc); return (ENODEV); } /* * Enable interrupts. */ sc->sc_imask = HAL_INT_RX | HAL_INT_TX | HAL_INT_RXORN | HAL_INT_TXURN | HAL_INT_FATAL | HAL_INT_GLOBAL; /* * Enable RX EDMA bits. Note these overlap with * HAL_INT_RX and HAL_INT_RXDESC respectively. */ if (sc->sc_isedma) sc->sc_imask |= (HAL_INT_RXHP | HAL_INT_RXLP); /* * If we're an EDMA NIC, we don't care about RXEOL. * Writing a new descriptor in will simply restart * RX DMA. */ if (! sc->sc_isedma) sc->sc_imask |= HAL_INT_RXEOL; /* * Enable MCI interrupt for MCI devices. */ if (sc->sc_btcoex_mci) sc->sc_imask |= HAL_INT_MCI; /* * Enable MIB interrupts when there are hardware phy counters. * Note we only do this (at the moment) for station mode. */ if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA) sc->sc_imask |= HAL_INT_MIB; /* * XXX add capability for this. * * If we're in STA mode (and maybe IBSS?) then register for * TSFOOR interrupts. */ if (ic->ic_opmode == IEEE80211_M_STA) sc->sc_imask |= HAL_INT_TSFOOR; /* Enable global TX timeout and carrier sense timeout if available */ if (ath_hal_gtxto_supported(ah)) sc->sc_imask |= HAL_INT_GTT; DPRINTF(sc, ATH_DEBUG_RESET, "%s: imask=0x%x\n", __func__, sc->sc_imask); sc->sc_running = 1; callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc); ath_hal_intrset(ah, sc->sc_imask); ath_power_restore_power_state(sc); return (0); } static void ath_stop(struct ath_softc *sc) { struct ath_hal *ah = sc->sc_ah; ATH_LOCK_ASSERT(sc); /* * Wake the hardware up before fiddling with it. */ ath_power_set_power_state(sc, HAL_PM_AWAKE); if (sc->sc_running) { /* * Shutdown the hardware and driver: * reset 802.11 state machine * turn off timers * disable interrupts * turn off the radio * clear transmit machinery * clear receive machinery * drain and release tx queues * reclaim beacon resources * power down hardware * * Note that some of this work is not possible if the * hardware is gone (invalid). */ #ifdef ATH_TX99_DIAG if (sc->sc_tx99 != NULL) sc->sc_tx99->stop(sc->sc_tx99); #endif callout_stop(&sc->sc_wd_ch); sc->sc_wd_timer = 0; sc->sc_running = 0; if (!sc->sc_invalid) { if (sc->sc_softled) { callout_stop(&sc->sc_ledtimer); ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon); sc->sc_blinking = 0; } ath_hal_intrset(ah, 0); } /* XXX we should stop RX regardless of whether it's valid */ if (!sc->sc_invalid) { ath_stoprecv(sc, 1); ath_hal_phydisable(ah); } else sc->sc_rxlink = NULL; ath_draintxq(sc, ATH_RESET_DEFAULT); ath_beacon_free(sc); /* XXX not needed */ } /* And now, restore the current power state */ ath_power_restore_power_state(sc); } /* * Wait until all pending TX/RX has completed. * * This waits until all existing transmit, receive and interrupts * have completed. It's assumed that the caller has first * grabbed the reset lock so it doesn't try to do overlapping * chip resets. */ #define MAX_TXRX_ITERATIONS 100 static void ath_txrx_stop_locked(struct ath_softc *sc) { int i = MAX_TXRX_ITERATIONS; ATH_UNLOCK_ASSERT(sc); ATH_PCU_LOCK_ASSERT(sc); /* * Sleep until all the pending operations have completed. * * The caller must ensure that reset has been incremented * or the pending operations may continue being queued. */ while (sc->sc_rxproc_cnt || sc->sc_txproc_cnt || sc->sc_txstart_cnt || sc->sc_intr_cnt) { if (i <= 0) break; msleep(sc, &sc->sc_pcu_mtx, 0, "ath_txrx_stop", msecs_to_ticks(10)); i--; } if (i <= 0) device_printf(sc->sc_dev, "%s: didn't finish after %d iterations\n", __func__, MAX_TXRX_ITERATIONS); } #undef MAX_TXRX_ITERATIONS #if 0 static void ath_txrx_stop(struct ath_softc *sc) { ATH_UNLOCK_ASSERT(sc); ATH_PCU_UNLOCK_ASSERT(sc); ATH_PCU_LOCK(sc); ath_txrx_stop_locked(sc); ATH_PCU_UNLOCK(sc); } #endif static void ath_txrx_start(struct ath_softc *sc) { taskqueue_unblock(sc->sc_tq); } /* * Grab the reset lock, and wait around until no one else * is trying to do anything with it. * * This is totally horrible but we can't hold this lock for * long enough to do TX/RX or we end up with net80211/ip stack * LORs and eventual deadlock. * * "dowait" signals whether to spin, waiting for the reset * lock count to reach 0. This should (for now) only be used * during the reset path, as the rest of the code may not * be locking-reentrant enough to behave correctly. * * Another, cleaner way should be found to serialise all of * these operations. */ #define MAX_RESET_ITERATIONS 25 static int ath_reset_grablock(struct ath_softc *sc, int dowait) { int w = 0; int i = MAX_RESET_ITERATIONS; ATH_PCU_LOCK_ASSERT(sc); do { if (sc->sc_inreset_cnt == 0) { w = 1; break; } if (dowait == 0) { w = 0; break; } ATH_PCU_UNLOCK(sc); /* * 1 tick is likely not enough time for long calibrations * to complete. So we should wait quite a while. */ pause("ath_reset_grablock", msecs_to_ticks(100)); i--; ATH_PCU_LOCK(sc); } while (i > 0); /* * We always increment the refcounter, regardless * of whether we succeeded to get it in an exclusive * way. */ sc->sc_inreset_cnt++; if (i <= 0) device_printf(sc->sc_dev, "%s: didn't finish after %d iterations\n", __func__, MAX_RESET_ITERATIONS); if (w == 0) device_printf(sc->sc_dev, "%s: warning, recursive reset path!\n", __func__); return w; } #undef MAX_RESET_ITERATIONS /* * Reset the hardware w/o losing operational state. This is * basically a more efficient way of doing ath_stop, ath_init, * followed by state transitions to the current 802.11 * operational state. Used to recover from various errors and * to reset or reload hardware state. */ int ath_reset(struct ath_softc *sc, ATH_RESET_TYPE reset_type) { struct ieee80211com *ic = &sc->sc_ic; struct ath_hal *ah = sc->sc_ah; HAL_STATUS status; int i; DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); /* Ensure ATH_LOCK isn't held; ath_rx_proc can't be locked */ ATH_PCU_UNLOCK_ASSERT(sc); ATH_UNLOCK_ASSERT(sc); /* Try to (stop any further TX/RX from occurring */ taskqueue_block(sc->sc_tq); /* * Wake the hardware up. */ ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); ATH_PCU_LOCK(sc); /* * Grab the reset lock before TX/RX is stopped. * * This is needed to ensure that when the TX/RX actually does finish, * no further TX/RX/reset runs in parallel with this. */ if (ath_reset_grablock(sc, 1) == 0) { device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", __func__); } /* disable interrupts */ ath_hal_intrset(ah, 0); /* * Now, ensure that any in progress TX/RX completes before we * continue. */ ath_txrx_stop_locked(sc); ATH_PCU_UNLOCK(sc); /* * Regardless of whether we're doing a no-loss flush or * not, stop the PCU and handle what's in the RX queue. * That way frames aren't dropped which shouldn't be. */ ath_stoprecv(sc, (reset_type != ATH_RESET_NOLOSS)); ath_rx_flush(sc); /* * Should now wait for pending TX/RX to complete * and block future ones from occurring. This needs to be * done before the TX queue is drained. */ ath_draintxq(sc, reset_type); /* stop xmit side */ ath_settkipmic(sc); /* configure TKIP MIC handling */ /* NB: indicate channel change so we do a full reset */ ath_update_chainmasks(sc, ic->ic_curchan); ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask, sc->sc_cur_rxchainmask); if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE, HAL_RESET_NORMAL, &status)) device_printf(sc->sc_dev, "%s: unable to reset hardware; hal status %u\n", __func__, status); sc->sc_diversity = ath_hal_getdiversity(ah); ATH_RX_LOCK(sc); sc->sc_rx_stopped = 1; sc->sc_rx_resetted = 1; ATH_RX_UNLOCK(sc); /* Quiet time handling - ensure we resync */ ath_vap_clear_quiet_ie(sc); /* Let DFS at it in case it's a DFS channel */ ath_dfs_radar_enable(sc, ic->ic_curchan); /* Let spectral at in case spectral is enabled */ ath_spectral_enable(sc, ic->ic_curchan); /* * Let bluetooth coexistence at in case it's needed for this channel */ ath_btcoex_enable(sc, ic->ic_curchan); /* * If we're doing TDMA, enforce the TXOP limitation for chips that * support it. */ if (sc->sc_hasenforcetxop && sc->sc_tdma) ath_hal_setenforcetxop(sc->sc_ah, 1); else ath_hal_setenforcetxop(sc->sc_ah, 0); if (ath_startrecv(sc) != 0) /* restart recv */ device_printf(sc->sc_dev, "%s: unable to start recv logic\n", __func__); /* * We may be doing a reset in response to an ioctl * that changes the channel so update any state that * might change as a result. */ ath_chan_change(sc, ic->ic_curchan); if (sc->sc_beacons) { /* restart beacons */ #ifdef IEEE80211_SUPPORT_TDMA if (sc->sc_tdma) ath_tdma_config(sc, NULL); else #endif ath_beacon_config(sc, NULL); } /* * Release the reset lock and re-enable interrupts here. * If an interrupt was being processed in ath_intr(), * it would disable interrupts at this point. So we have * to atomically enable interrupts and decrement the * reset counter - this way ath_intr() doesn't end up * disabling interrupts without a corresponding enable * in the rest or channel change path. * * Grab the TX reference in case we need to transmit. * That way a parallel transmit doesn't. */ ATH_PCU_LOCK(sc); sc->sc_inreset_cnt--; sc->sc_txstart_cnt++; /* XXX only do this if sc_inreset_cnt == 0? */ ath_hal_intrset(ah, sc->sc_imask); ATH_PCU_UNLOCK(sc); /* * TX and RX can be started here. If it were started with * sc_inreset_cnt > 0, the TX and RX path would abort. * Thus if this is a nested call through the reset or * channel change code, TX completion will occur but * RX completion and ath_start / ath_tx_start will not * run. */ /* Restart TX/RX as needed */ ath_txrx_start(sc); /* XXX TODO: we need to hold the tx refcount here! */ /* Restart TX completion and pending TX */ if (reset_type == ATH_RESET_NOLOSS) { for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { if (ATH_TXQ_SETUP(sc, i)) { ATH_TXQ_LOCK(&sc->sc_txq[i]); ath_txq_restart_dma(sc, &sc->sc_txq[i]); ATH_TXQ_UNLOCK(&sc->sc_txq[i]); ATH_TX_LOCK(sc); ath_txq_sched(sc, &sc->sc_txq[i]); ATH_TX_UNLOCK(sc); } } } ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); ATH_PCU_LOCK(sc); sc->sc_txstart_cnt--; ATH_PCU_UNLOCK(sc); /* Handle any frames in the TX queue */ /* * XXX should this be done by the caller, rather than * ath_reset() ? */ ath_tx_kick(sc); /* restart xmit */ return 0; } static int ath_reset_vap(struct ieee80211vap *vap, u_long cmd) { struct ieee80211com *ic = vap->iv_ic; struct ath_softc *sc = ic->ic_softc; struct ath_hal *ah = sc->sc_ah; switch (cmd) { case IEEE80211_IOC_TXPOWER: /* * If per-packet TPC is enabled, then we have nothing * to do; otherwise we need to force the global limit. * All this can happen directly; no need to reset. */ if (!ath_hal_gettpc(ah)) ath_hal_settxpowlimit(ah, ic->ic_txpowlimit); return 0; } /* XXX? Full or NOLOSS? */ return ath_reset(sc, ATH_RESET_FULL); } struct ath_buf * _ath_getbuf_locked(struct ath_softc *sc, ath_buf_type_t btype) { struct ath_buf *bf; ATH_TXBUF_LOCK_ASSERT(sc); if (btype == ATH_BUFTYPE_MGMT) bf = TAILQ_FIRST(&sc->sc_txbuf_mgmt); else bf = TAILQ_FIRST(&sc->sc_txbuf); if (bf == NULL) { sc->sc_stats.ast_tx_getnobuf++; } else { if (bf->bf_flags & ATH_BUF_BUSY) { sc->sc_stats.ast_tx_getbusybuf++; bf = NULL; } } if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0) { if (btype == ATH_BUFTYPE_MGMT) TAILQ_REMOVE(&sc->sc_txbuf_mgmt, bf, bf_list); else { TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list); sc->sc_txbuf_cnt--; /* * This shuldn't happen; however just to be * safe print a warning and fudge the txbuf * count. */ if (sc->sc_txbuf_cnt < 0) { device_printf(sc->sc_dev, "%s: sc_txbuf_cnt < 0?\n", __func__); sc->sc_txbuf_cnt = 0; } } } else bf = NULL; if (bf == NULL) { /* XXX should check which list, mgmt or otherwise */ DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__, TAILQ_FIRST(&sc->sc_txbuf) == NULL ? "out of xmit buffers" : "xmit buffer busy"); return NULL; } /* XXX TODO: should do this at buffer list initialisation */ /* XXX (then, ensure the buffer has the right flag set) */ bf->bf_flags = 0; if (btype == ATH_BUFTYPE_MGMT) bf->bf_flags |= ATH_BUF_MGMT; else bf->bf_flags &= (~ATH_BUF_MGMT); /* Valid bf here; clear some basic fields */ bf->bf_next = NULL; /* XXX just to be sure */ bf->bf_last = NULL; /* XXX again, just to be sure */ bf->bf_comp = NULL; /* XXX again, just to be sure */ bzero(&bf->bf_state, sizeof(bf->bf_state)); /* * Track the descriptor ID only if doing EDMA */ if (sc->sc_isedma) { bf->bf_descid = sc->sc_txbuf_descid; sc->sc_txbuf_descid++; } return bf; } /* * When retrying a software frame, buffers marked ATH_BUF_BUSY * can't be thrown back on the queue as they could still be * in use by the hardware. * * This duplicates the buffer, or returns NULL. * * The descriptor is also copied but the link pointers and * the DMA segments aren't copied; this frame should thus * be again passed through the descriptor setup/chain routines * so the link is correct. * * The caller must free the buffer using ath_freebuf(). */ struct ath_buf * ath_buf_clone(struct ath_softc *sc, struct ath_buf *bf) { struct ath_buf *tbf; tbf = ath_getbuf(sc, (bf->bf_flags & ATH_BUF_MGMT) ? ATH_BUFTYPE_MGMT : ATH_BUFTYPE_NORMAL); if (tbf == NULL) return NULL; /* XXX failure? Why? */ /* Copy basics */ tbf->bf_next = NULL; tbf->bf_nseg = bf->bf_nseg; tbf->bf_flags = bf->bf_flags & ATH_BUF_FLAGS_CLONE; tbf->bf_status = bf->bf_status; tbf->bf_m = bf->bf_m; tbf->bf_node = bf->bf_node; KASSERT((bf->bf_node != NULL), ("%s: bf_node=NULL!", __func__)); /* will be setup by the chain/setup function */ tbf->bf_lastds = NULL; /* for now, last == self */ tbf->bf_last = tbf; tbf->bf_comp = bf->bf_comp; /* NOTE: DMA segments will be setup by the setup/chain functions */ /* The caller has to re-init the descriptor + links */ /* * Free the DMA mapping here, before we NULL the mbuf. * We must only call bus_dmamap_unload() once per mbuf chain * or behaviour is undefined. */ if (bf->bf_m != NULL) { /* * XXX is this POSTWRITE call required? */ bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); } bf->bf_m = NULL; bf->bf_node = NULL; /* Copy state */ memcpy(&tbf->bf_state, &bf->bf_state, sizeof(bf->bf_state)); return tbf; } struct ath_buf * ath_getbuf(struct ath_softc *sc, ath_buf_type_t btype) { struct ath_buf *bf; ATH_TXBUF_LOCK(sc); bf = _ath_getbuf_locked(sc, btype); /* * If a mgmt buffer was requested but we're out of those, * try requesting a normal one. */ if (bf == NULL && btype == ATH_BUFTYPE_MGMT) bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL); ATH_TXBUF_UNLOCK(sc); if (bf == NULL) { DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__); sc->sc_stats.ast_tx_qstop++; } return bf; } /* * Transmit a single frame. * * net80211 will free the node reference if the transmit * fails, so don't free the node reference here. */ static int ath_transmit(struct ieee80211com *ic, struct mbuf *m) { struct ath_softc *sc = ic->ic_softc; struct ieee80211_node *ni; struct mbuf *next; struct ath_buf *bf; ath_bufhead frags; int retval = 0; /* * Tell the reset path that we're currently transmitting. */ ATH_PCU_LOCK(sc); if (sc->sc_inreset_cnt > 0) { DPRINTF(sc, ATH_DEBUG_XMIT, "%s: sc_inreset_cnt > 0; bailing\n", __func__); ATH_PCU_UNLOCK(sc); sc->sc_stats.ast_tx_qstop++; ATH_KTR(sc, ATH_KTR_TX, 0, "ath_start_task: OACTIVE, finish"); return (ENOBUFS); /* XXX should be EINVAL or? */ } sc->sc_txstart_cnt++; ATH_PCU_UNLOCK(sc); /* Wake the hardware up already */ ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); ATH_KTR(sc, ATH_KTR_TX, 0, "ath_transmit: start"); /* * Grab the TX lock - it's ok to do this here; we haven't * yet started transmitting. */ ATH_TX_LOCK(sc); /* * Node reference, if there's one. */ ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; /* * Enforce how deep a node queue can get. * * XXX it would be nicer if we kept an mbuf queue per * node and only whacked them into ath_bufs when we * are ready to schedule some traffic from them. * .. that may come later. * * XXX we should also track the per-node hardware queue * depth so it is easy to limit the _SUM_ of the swq and * hwq frames. Since we only schedule two HWQ frames * at a time, this should be OK for now. */ if ((!(m->m_flags & M_EAPOL)) && (ATH_NODE(ni)->an_swq_depth > sc->sc_txq_node_maxdepth)) { sc->sc_stats.ast_tx_nodeq_overflow++; retval = ENOBUFS; goto finish; } /* * Check how many TX buffers are available. * * If this is for non-EAPOL traffic, just leave some * space free in order for buffer cloning and raw * frame transmission to occur. * * If it's for EAPOL traffic, ignore this for now. * Management traffic will be sent via the raw transmit * method which bypasses this check. * * This is needed to ensure that EAPOL frames during * (re) keying have a chance to go out. * * See kern/138379 for more information. */ if ((!(m->m_flags & M_EAPOL)) && (sc->sc_txbuf_cnt <= sc->sc_txq_data_minfree)) { sc->sc_stats.ast_tx_nobuf++; retval = ENOBUFS; goto finish; } /* * Grab a TX buffer and associated resources. * * If it's an EAPOL frame, allocate a MGMT ath_buf. * That way even with temporary buffer exhaustion due to * the data path doesn't leave us without the ability * to transmit management frames. * * Otherwise allocate a normal buffer. */ if (m->m_flags & M_EAPOL) bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT); else bf = ath_getbuf(sc, ATH_BUFTYPE_NORMAL); if (bf == NULL) { /* * If we failed to allocate a buffer, fail. * * We shouldn't fail normally, due to the check * above. */ sc->sc_stats.ast_tx_nobuf++; retval = ENOBUFS; goto finish; } /* * At this point we have a buffer; so we need to free it * if we hit any error conditions. */ /* * Check for fragmentation. If this frame * has been broken up verify we have enough * buffers to send all the fragments so all * go out or none... */ TAILQ_INIT(&frags); if ((m->m_flags & M_FRAG) && !ath_txfrag_setup(sc, &frags, m, ni)) { DPRINTF(sc, ATH_DEBUG_XMIT, "%s: out of txfrag buffers\n", __func__); sc->sc_stats.ast_tx_nofrag++; if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); /* * XXXGL: is mbuf valid after ath_txfrag_setup? If yes, * we shouldn't free it but return back. */ ieee80211_free_mbuf(m); m = NULL; goto bad; } /* * At this point if we have any TX fragments, then we will * have bumped the node reference once for each of those. */ /* * XXX Is there anything actually _enforcing_ that the * fragments are being transmitted in one hit, rather than * being interleaved with other transmissions on that * hardware queue? * * The ATH TX output lock is the only thing serialising this * right now. */ /* * Calculate the "next fragment" length field in ath_buf * in order to let the transmit path know enough about * what to next write to the hardware. */ if (m->m_flags & M_FRAG) { struct ath_buf *fbf = bf; struct ath_buf *n_fbf = NULL; struct mbuf *fm = m->m_nextpkt; /* * We need to walk the list of fragments and set * the next size to the following buffer. * However, the first buffer isn't in the frag * list, so we have to do some gymnastics here. */ TAILQ_FOREACH(n_fbf, &frags, bf_list) { fbf->bf_nextfraglen = fm->m_pkthdr.len; fbf = n_fbf; fm = fm->m_nextpkt; } } nextfrag: /* * Pass the frame to the h/w for transmission. * Fragmented frames have each frag chained together * with m_nextpkt. We know there are sufficient ath_buf's * to send all the frags because of work done by * ath_txfrag_setup. We leave m_nextpkt set while * calling ath_tx_start so it can use it to extend the * the tx duration to cover the subsequent frag and * so it can reclaim all the mbufs in case of an error; * ath_tx_start clears m_nextpkt once it commits to * handing the frame to the hardware. * * Note: if this fails, then the mbufs are freed but * not the node reference. * * So, we now have to free the node reference ourselves here * and return OK up to the stack. */ next = m->m_nextpkt; if (ath_tx_start(sc, ni, bf, m)) { bad: if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); reclaim: bf->bf_m = NULL; bf->bf_node = NULL; ATH_TXBUF_LOCK(sc); ath_returnbuf_head(sc, bf); /* * Free the rest of the node references and * buffers for the fragment list. */ ath_txfrag_cleanup(sc, &frags, ni); ATH_TXBUF_UNLOCK(sc); /* * XXX: And free the node/return OK; ath_tx_start() may have * modified the buffer. We currently have no way to * signify that the mbuf was freed but there was an error. */ ieee80211_free_node(ni); retval = 0; goto finish; } /* * Check here if the node is in power save state. */ ath_tx_update_tim(sc, ni, 1); if (next != NULL) { /* * Beware of state changing between frags. * XXX check sta power-save state? */ if (ni->ni_vap->iv_state != IEEE80211_S_RUN) { DPRINTF(sc, ATH_DEBUG_XMIT, "%s: flush fragmented packet, state %s\n", __func__, ieee80211_state_name[ni->ni_vap->iv_state]); /* XXX dmamap */ ieee80211_free_mbuf(next); goto reclaim; } m = next; bf = TAILQ_FIRST(&frags); KASSERT(bf != NULL, ("no buf for txfrag")); TAILQ_REMOVE(&frags, bf, bf_list); goto nextfrag; } /* * Bump watchdog timer. */ sc->sc_wd_timer = 5; finish: ATH_TX_UNLOCK(sc); /* * Finished transmitting! */ ATH_PCU_LOCK(sc); sc->sc_txstart_cnt--; ATH_PCU_UNLOCK(sc); /* Sleep the hardware if required */ ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); ATH_KTR(sc, ATH_KTR_TX, 0, "ath_transmit: finished"); return (retval); } static int ath_media_change(struct ifnet *ifp) { int error = ieee80211_media_change(ifp); /* NB: only the fixed rate can change and that doesn't need a reset */ return (error == ENETRESET ? 0 : error); } /* * Block/unblock tx+rx processing while a key change is done. * We assume the caller serializes key management operations * so we only need to worry about synchronization with other * uses that originate in the driver. */ static void ath_key_update_begin(struct ieee80211vap *vap) { struct ath_softc *sc = vap->iv_ic->ic_softc; DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); taskqueue_block(sc->sc_tq); } static void ath_key_update_end(struct ieee80211vap *vap) { struct ath_softc *sc = vap->iv_ic->ic_softc; DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); taskqueue_unblock(sc->sc_tq); } static void ath_update_promisc(struct ieee80211com *ic) { struct ath_softc *sc = ic->ic_softc; u_int32_t rfilt; /* configure rx filter */ ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); rfilt = ath_calcrxfilter(sc); ath_hal_setrxfilter(sc->sc_ah, rfilt); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt); } static u_int ath_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { uint32_t val, *mfilt = arg; char *dl; uint8_t pos; /* calculate XOR of eight 6bit values */ dl = LLADDR(sdl); val = le32dec(dl + 0); pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; val = le32dec(dl + 3); pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; pos &= 0x3f; mfilt[pos / 32] |= (1 << (pos % 32)); return (1); } /* * Driver-internal mcast update call. * * Assumes the hardware is already awake. */ static void ath_update_mcast_hw(struct ath_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; u_int32_t mfilt[2]; /* calculate and install multicast filter */ if (ic->ic_allmulti == 0) { struct ieee80211vap *vap; /* * Merge multicast addresses to form the hardware filter. */ mfilt[0] = mfilt[1] = 0; TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) if_foreach_llmaddr(vap->iv_ifp, ath_hash_maddr, &mfilt); } else mfilt[0] = mfilt[1] = ~0; ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]); DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n", __func__, mfilt[0], mfilt[1]); } /* * Called from the net80211 layer - force the hardware * awake before operating. */ static void ath_update_mcast(struct ieee80211com *ic) { struct ath_softc *sc = ic->ic_softc; ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); ath_update_mcast_hw(sc); ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); } void ath_mode_init(struct ath_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ath_hal *ah = sc->sc_ah; u_int32_t rfilt; /* XXX power state? */ /* configure rx filter */ rfilt = ath_calcrxfilter(sc); ath_hal_setrxfilter(ah, rfilt); /* configure operational mode */ ath_hal_setopmode(ah); /* handle any link-level address change */ ath_hal_setmac(ah, ic->ic_macaddr); /* calculate and install multicast filter */ ath_update_mcast_hw(sc); } /* * Set the slot time based on the current setting. */ void ath_setslottime(struct ath_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ath_hal *ah = sc->sc_ah; u_int usec; if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan)) usec = 13; else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan)) usec = 21; else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) { /* honor short/long slot time only in 11g */ /* XXX shouldn't honor on pure g or turbo g channel */ if (ic->ic_flags & IEEE80211_F_SHSLOT) usec = HAL_SLOT_TIME_9; else usec = HAL_SLOT_TIME_20; } else usec = HAL_SLOT_TIME_9; DPRINTF(sc, ATH_DEBUG_RESET, "%s: chan %u MHz flags 0x%x %s slot, %u usec\n", __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags, ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec); /* Wake up the hardware first before updating the slot time */ ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ath_hal_setslottime(ah, usec); ath_power_restore_power_state(sc); sc->sc_updateslot = OK; ATH_UNLOCK(sc); } /* * Callback from the 802.11 layer to update the * slot time based on the current setting. */ static void ath_updateslot(struct ieee80211com *ic) { struct ath_softc *sc = ic->ic_softc; /* * When not coordinating the BSS, change the hardware * immediately. For other operation we defer the change * until beacon updates have propagated to the stations. * * XXX sc_updateslot isn't changed behind a lock? */ if (ic->ic_opmode == IEEE80211_M_HOSTAP || ic->ic_opmode == IEEE80211_M_MBSS) sc->sc_updateslot = UPDATE; else ath_setslottime(sc); } /* * Append the contents of src to dst; both queues * are assumed to be locked. */ void ath_txqmove(struct ath_txq *dst, struct ath_txq *src) { ATH_TXQ_LOCK_ASSERT(src); ATH_TXQ_LOCK_ASSERT(dst); TAILQ_CONCAT(&dst->axq_q, &src->axq_q, bf_list); dst->axq_link = src->axq_link; src->axq_link = NULL; dst->axq_depth += src->axq_depth; dst->axq_aggr_depth += src->axq_aggr_depth; src->axq_depth = 0; src->axq_aggr_depth = 0; } /* * Reset the hardware, with no loss. * * This can't be used for a general case reset. */ static void ath_reset_proc(void *arg, int pending) { struct ath_softc *sc = arg; #if 0 device_printf(sc->sc_dev, "%s: resetting\n", __func__); #endif ath_reset(sc, ATH_RESET_NOLOSS); } /* * Reset the hardware after detecting beacons have stopped. */ static void ath_bstuck_proc(void *arg, int pending) { struct ath_softc *sc = arg; uint32_t hangs = 0; if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) device_printf(sc->sc_dev, "bb hang detected (0x%x)\n", hangs); #ifdef ATH_DEBUG_ALQ if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_STUCK_BEACON)) if_ath_alq_post(&sc->sc_alq, ATH_ALQ_STUCK_BEACON, 0, NULL); #endif device_printf(sc->sc_dev, "stuck beacon; resetting (bmiss count %u)\n", sc->sc_bmisscount); sc->sc_stats.ast_bstuck++; /* * This assumes that there's no simultaneous channel mode change * occurring. */ ath_reset(sc, ATH_RESET_NOLOSS); } static int ath_desc_alloc(struct ath_softc *sc) { int error; error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf, "tx", sc->sc_tx_desclen, ath_txbuf, ATH_MAX_SCATTER); if (error != 0) { return error; } sc->sc_txbuf_cnt = ath_txbuf; error = ath_descdma_setup(sc, &sc->sc_txdma_mgmt, &sc->sc_txbuf_mgmt, "tx_mgmt", sc->sc_tx_desclen, ath_txbuf_mgmt, ATH_TXDESC); if (error != 0) { ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); return error; } /* * XXX mark txbuf_mgmt frames with ATH_BUF_MGMT, so the * flag doesn't have to be set in ath_getbuf_locked(). */ error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf, "beacon", sc->sc_tx_desclen, ATH_BCBUF, 1); if (error != 0) { ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt, &sc->sc_txbuf_mgmt); return error; } return 0; } static void ath_desc_free(struct ath_softc *sc) { if (sc->sc_bdma.dd_desc_len != 0) ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf); if (sc->sc_txdma.dd_desc_len != 0) ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); if (sc->sc_txdma_mgmt.dd_desc_len != 0) ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt, &sc->sc_txbuf_mgmt); } static struct ieee80211_node * ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) { struct ieee80211com *ic = vap->iv_ic; struct ath_softc *sc = ic->ic_softc; const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space; struct ath_node *an; an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO); if (an == NULL) { /* XXX stat+msg */ return NULL; } ath_rate_node_init(sc, an); /* Setup the mutex - there's no associd yet so set the name to NULL */ snprintf(an->an_name, sizeof(an->an_name), "%s: node %p", device_get_nameunit(sc->sc_dev), an); mtx_init(&an->an_mtx, an->an_name, NULL, MTX_DEF); /* XXX setup ath_tid */ ath_tx_tid_init(sc, an); an->an_node_stats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER; an->an_node_stats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER; an->an_node_stats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER; DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: an %p\n", __func__, mac, ":", an); return &an->an_node; } static void ath_node_cleanup(struct ieee80211_node *ni) { struct ieee80211com *ic = ni->ni_ic; struct ath_softc *sc = ic->ic_softc; DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: an %p\n", __func__, ni->ni_macaddr, ":", ATH_NODE(ni)); /* Cleanup ath_tid, free unused bufs, unlink bufs in TXQ */ ath_tx_node_flush(sc, ATH_NODE(ni)); ath_rate_node_cleanup(sc, ATH_NODE(ni)); sc->sc_node_cleanup(ni); } static void ath_node_free(struct ieee80211_node *ni) { struct ieee80211com *ic = ni->ni_ic; struct ath_softc *sc = ic->ic_softc; DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: an %p\n", __func__, ni->ni_macaddr, ":", ATH_NODE(ni)); mtx_destroy(&ATH_NODE(ni)->an_mtx); sc->sc_node_free(ni); } static void ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise) { struct ieee80211com *ic = ni->ni_ic; struct ath_softc *sc = ic->ic_softc; struct ath_hal *ah = sc->sc_ah; *rssi = ic->ic_node_getrssi(ni); if (ni->ni_chan != IEEE80211_CHAN_ANYC) *noise = ath_hal_getchannoise(ah, ni->ni_chan); else *noise = -95; /* nominally correct */ } /* * Set the default antenna. */ void ath_setdefantenna(struct ath_softc *sc, u_int antenna) { struct ath_hal *ah = sc->sc_ah; /* XXX block beacon interrupts */ ath_hal_setdefantenna(ah, antenna); if (sc->sc_defant != antenna) sc->sc_stats.ast_ant_defswitch++; sc->sc_defant = antenna; sc->sc_rxotherant = 0; } static void ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum) { txq->axq_qnum = qnum; txq->axq_ac = 0; txq->axq_depth = 0; txq->axq_aggr_depth = 0; txq->axq_intrcnt = 0; txq->axq_link = NULL; txq->axq_softc = sc; TAILQ_INIT(&txq->axq_q); TAILQ_INIT(&txq->axq_tidq); TAILQ_INIT(&txq->fifo.axq_q); ATH_TXQ_LOCK_INIT(sc, txq); } /* * Setup a h/w transmit queue. */ static struct ath_txq * ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) { struct ath_hal *ah = sc->sc_ah; HAL_TXQ_INFO qi; int qnum; memset(&qi, 0, sizeof(qi)); qi.tqi_subtype = subtype; qi.tqi_aifs = HAL_TXQ_USEDEFAULT; qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; /* * Enable interrupts only for EOL and DESC conditions. * We mark tx descriptors to receive a DESC interrupt * when a tx queue gets deep; otherwise waiting for the * EOL to reap descriptors. Note that this is done to * reduce interrupt load and this only defers reaping * descriptors, never transmitting frames. Aside from * reducing interrupts this also permits more concurrency. * The only potential downside is if the tx queue backs * up in which case the top half of the kernel may backup * due to a lack of tx descriptors. */ if (sc->sc_isedma) qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | HAL_TXQ_TXOKINT_ENABLE; else qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | HAL_TXQ_TXDESCINT_ENABLE; qnum = ath_hal_setuptxqueue(ah, qtype, &qi); if (qnum == -1) { /* * NB: don't print a message, this happens * normally on parts with too few tx queues */ return NULL; } if (qnum >= nitems(sc->sc_txq)) { device_printf(sc->sc_dev, "hal qnum %u out of range, max %zu!\n", qnum, nitems(sc->sc_txq)); ath_hal_releasetxqueue(ah, qnum); return NULL; } if (!ATH_TXQ_SETUP(sc, qnum)) { ath_txq_init(sc, &sc->sc_txq[qnum], qnum); sc->sc_txqsetup |= 1<sc_txq[qnum]; } /* * Setup a hardware data transmit queue for the specified * access control. The hal may not support all requested * queues in which case it will return a reference to a * previously setup queue. We record the mapping from ac's * to h/w queues for use by ath_tx_start and also track * the set of h/w queues being used to optimize work in the * transmit interrupt handler and related routines. */ static int ath_tx_setup(struct ath_softc *sc, int ac, int haltype) { struct ath_txq *txq; if (ac >= nitems(sc->sc_ac2q)) { device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n", ac, nitems(sc->sc_ac2q)); return 0; } txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype); if (txq != NULL) { txq->axq_ac = ac; sc->sc_ac2q[ac] = txq; return 1; } else return 0; } /* * Update WME parameters for a transmit queue. */ static int ath_txq_update(struct ath_softc *sc, int ac) { #define ATH_EXPONENT_TO_VALUE(v) ((1<sc_ic; struct ath_txq *txq = sc->sc_ac2q[ac]; struct chanAccParams chp; struct wmeParams *wmep; struct ath_hal *ah = sc->sc_ah; HAL_TXQ_INFO qi; ieee80211_wme_ic_getparams(ic, &chp); wmep = &chp.cap_wmeParams[ac]; ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi); #ifdef IEEE80211_SUPPORT_TDMA if (sc->sc_tdma) { /* * AIFS is zero so there's no pre-transmit wait. The * burst time defines the slot duration and is configured * through net80211. The QCU is setup to not do post-xmit * back off, lockout all lower-priority QCU's, and fire * off the DMA beacon alert timer which is setup based * on the slot configuration. */ qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE | HAL_TXQ_TXERRINT_ENABLE | HAL_TXQ_TXURNINT_ENABLE | HAL_TXQ_TXEOLINT_ENABLE | HAL_TXQ_DBA_GATED | HAL_TXQ_BACKOFF_DISABLE | HAL_TXQ_ARB_LOCKOUT_GLOBAL ; qi.tqi_aifs = 0; /* XXX +dbaprep? */ qi.tqi_readyTime = sc->sc_tdmaslotlen; qi.tqi_burstTime = qi.tqi_readyTime; } else { #endif /* * XXX shouldn't this just use the default flags * used in the previous queue setup? */ qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE | HAL_TXQ_TXERRINT_ENABLE | HAL_TXQ_TXDESCINT_ENABLE | HAL_TXQ_TXURNINT_ENABLE | HAL_TXQ_TXEOLINT_ENABLE ; qi.tqi_aifs = wmep->wmep_aifsn; qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); qi.tqi_readyTime = 0; qi.tqi_burstTime = IEEE80211_TXOP_TO_US(wmep->wmep_txopLimit); #ifdef IEEE80211_SUPPORT_TDMA } #endif DPRINTF(sc, ATH_DEBUG_RESET, "%s: Q%u qflags 0x%x aifs %u cwmin %u cwmax %u burstTime %u\n", __func__, txq->axq_qnum, qi.tqi_qflags, qi.tqi_aifs, qi.tqi_cwmin, qi.tqi_cwmax, qi.tqi_burstTime); if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) { device_printf(sc->sc_dev, "unable to update hardware queue " "parameters for %s traffic!\n", ieee80211_wme_acnames[ac]); return 0; } else { ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */ return 1; } #undef ATH_EXPONENT_TO_VALUE } /* * Callback from the 802.11 layer to update WME parameters. */ int ath_wme_update(struct ieee80211com *ic) { struct ath_softc *sc = ic->ic_softc; return !ath_txq_update(sc, WME_AC_BE) || !ath_txq_update(sc, WME_AC_BK) || !ath_txq_update(sc, WME_AC_VI) || !ath_txq_update(sc, WME_AC_VO) ? EIO : 0; } /* * Reclaim resources for a setup queue. */ static void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) { ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum); sc->sc_txqsetup &= ~(1<axq_qnum); ATH_TXQ_LOCK_DESTROY(txq); } /* * Reclaim all tx queue resources. */ static void ath_tx_cleanup(struct ath_softc *sc) { int i; ATH_TXBUF_LOCK_DESTROY(sc); for (i = 0; i < HAL_NUM_TX_QUEUES; i++) if (ATH_TXQ_SETUP(sc, i)) ath_tx_cleanupq(sc, &sc->sc_txq[i]); } /* * Return h/w rate index for an IEEE rate (w/o basic rate bit) * using the current rates in sc_rixmap. */ int ath_tx_findrix(const struct ath_softc *sc, uint8_t rate) { int rix = sc->sc_rixmap[rate]; /* NB: return lowest rix for invalid rate */ return (rix == 0xff ? 0 : rix); } static void ath_tx_update_stats(struct ath_softc *sc, struct ath_tx_status *ts, struct ath_buf *bf) { struct ieee80211_node *ni = bf->bf_node; struct ieee80211com *ic = &sc->sc_ic; int sr, lr, pri; if (ts->ts_status == 0) { u_int8_t txant = ts->ts_antenna; sc->sc_stats.ast_ant_tx[txant]++; sc->sc_ant_tx[txant]++; if (ts->ts_finaltsi != 0) sc->sc_stats.ast_tx_altrate++; /* XXX TODO: should do per-pri conuters */ pri = M_WME_GETAC(bf->bf_m); if (pri >= WME_AC_VO) ic->ic_wme.wme_hipri_traffic++; if ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) ni->ni_inact = ni->ni_inact_reload; } else { if (ts->ts_status & HAL_TXERR_XRETRY) sc->sc_stats.ast_tx_xretries++; if (ts->ts_status & HAL_TXERR_FIFO) sc->sc_stats.ast_tx_fifoerr++; if (ts->ts_status & HAL_TXERR_FILT) sc->sc_stats.ast_tx_filtered++; if (ts->ts_status & HAL_TXERR_XTXOP) sc->sc_stats.ast_tx_xtxop++; if (ts->ts_status & HAL_TXERR_TIMER_EXPIRED) sc->sc_stats.ast_tx_timerexpired++; if (bf->bf_m->m_flags & M_FF) sc->sc_stats.ast_ff_txerr++; } /* XXX when is this valid? */ if (ts->ts_flags & HAL_TX_DESC_CFG_ERR) sc->sc_stats.ast_tx_desccfgerr++; /* * This can be valid for successful frame transmission! * If there's a TX FIFO underrun during aggregate transmission, * the MAC will pad the rest of the aggregate with delimiters. * If a BA is returned, the frame is marked as "OK" and it's up * to the TX completion code to notice which frames weren't * successfully transmitted. */ if (ts->ts_flags & HAL_TX_DATA_UNDERRUN) sc->sc_stats.ast_tx_data_underrun++; if (ts->ts_flags & HAL_TX_DELIM_UNDERRUN) sc->sc_stats.ast_tx_delim_underrun++; sr = ts->ts_shortretry; lr = ts->ts_longretry; sc->sc_stats.ast_tx_shortretry += sr; sc->sc_stats.ast_tx_longretry += lr; } /* * The default completion. If fail is 1, this means * "please don't retry the frame, and just return -1 status * to the net80211 stack. */ void ath_tx_default_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) { struct ath_tx_status *ts = &bf->bf_status.ds_txstat; int st; if (fail == 1) st = -1; else st = ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) ? ts->ts_status : HAL_TXERR_XRETRY; #if 0 if (bf->bf_state.bfs_dobaw) device_printf(sc->sc_dev, "%s: bf %p: seqno %d: dobaw should've been cleared!\n", __func__, bf, SEQNO(bf->bf_state.bfs_seqno)); #endif if (bf->bf_next != NULL) device_printf(sc->sc_dev, "%s: bf %p: seqno %d: bf_next not NULL!\n", __func__, bf, SEQNO(bf->bf_state.bfs_seqno)); /* * Check if the node software queue is empty; if so * then clear the TIM. * * This needs to be done before the buffer is freed as * otherwise the node reference will have been released * and the node may not actually exist any longer. * * XXX I don't like this belonging here, but it's cleaner * to do it here right now then all the other places * where ath_tx_default_comp() is called. * * XXX TODO: during drain, ensure that the callback is * being called so we get a chance to update the TIM. */ if (bf->bf_node) { ATH_TX_LOCK(sc); ath_tx_update_tim(sc, bf->bf_node, 0); ATH_TX_UNLOCK(sc); } /* * Do any tx complete callback. Note this must * be done before releasing the node reference. * This will free the mbuf, release the net80211 * node and recycle the ath_buf. */ ath_tx_freebuf(sc, bf, st); } /* * Update rate control with the given completion status. */ void ath_tx_update_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_rc_series *rc, struct ath_tx_status *ts, int frmlen, int nframes, int nbad) { struct ath_node *an; /* Only for unicast frames */ if (ni == NULL) return; an = ATH_NODE(ni); ATH_NODE_UNLOCK_ASSERT(an); if ((ts->ts_status & HAL_TXERR_FILT) == 0) { ATH_NODE_LOCK(an); ath_rate_tx_complete(sc, an, rc, ts, frmlen, nframes, nbad); ATH_NODE_UNLOCK(an); } } /* * Process the completion of the given buffer. * * This calls the rate control update and then the buffer completion. * This will either free the buffer or requeue it. In any case, the * bf pointer should be treated as invalid after this function is called. */ void ath_tx_process_buf_completion(struct ath_softc *sc, struct ath_txq *txq, struct ath_tx_status *ts, struct ath_buf *bf) { struct ieee80211_node *ni = bf->bf_node; ATH_TX_UNLOCK_ASSERT(sc); ATH_TXQ_UNLOCK_ASSERT(txq); /* If unicast frame, update general statistics */ if (ni != NULL) { /* update statistics */ ath_tx_update_stats(sc, ts, bf); } /* * Call the completion handler. * The completion handler is responsible for * calling the rate control code. * * Frames with no completion handler get the * rate control code called here. */ if (bf->bf_comp == NULL) { if ((ts->ts_status & HAL_TXERR_FILT) == 0 && (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) { /* * XXX assume this isn't an aggregate * frame. */ ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc, ts, bf->bf_state.bfs_pktlen, 1, (ts->ts_status == 0 ? 0 : 1)); } ath_tx_default_comp(sc, bf, 0); } else bf->bf_comp(sc, bf, 0); } /* * Process completed xmit descriptors from the specified queue. * Kick the packet scheduler if needed. This can occur from this * particular task. */ static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched) { struct ath_hal *ah = sc->sc_ah; struct ath_buf *bf; struct ath_desc *ds; struct ath_tx_status *ts; struct ieee80211_node *ni; #ifdef IEEE80211_SUPPORT_SUPERG struct ieee80211com *ic = &sc->sc_ic; #endif /* IEEE80211_SUPPORT_SUPERG */ int nacked; HAL_STATUS status; DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n", __func__, txq->axq_qnum, (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), txq->axq_link); ATH_KTR(sc, ATH_KTR_TXCOMP, 4, "ath_tx_processq: txq=%u head %p link %p depth %p", txq->axq_qnum, (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), txq->axq_link, txq->axq_depth); nacked = 0; for (;;) { ATH_TXQ_LOCK(txq); txq->axq_intrcnt = 0; /* reset periodic desc intr count */ bf = TAILQ_FIRST(&txq->axq_q); if (bf == NULL) { ATH_TXQ_UNLOCK(txq); break; } ds = bf->bf_lastds; /* XXX must be setup correctly! */ ts = &bf->bf_status.ds_txstat; status = ath_hal_txprocdesc(ah, ds, ts); #ifdef ATH_DEBUG if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) ath_printtxbuf(sc, bf, txq->axq_qnum, 0, status == HAL_OK); else if ((sc->sc_debug & ATH_DEBUG_RESET) && (dosched == 0)) ath_printtxbuf(sc, bf, txq->axq_qnum, 0, status == HAL_OK); #endif #ifdef ATH_DEBUG_ALQ if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXSTATUS)) { if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_TXSTATUS, sc->sc_tx_statuslen, (char *) ds); } #endif if (status == HAL_EINPROGRESS) { ATH_KTR(sc, ATH_KTR_TXCOMP, 3, "ath_tx_processq: txq=%u, bf=%p ds=%p, HAL_EINPROGRESS", txq->axq_qnum, bf, ds); ATH_TXQ_UNLOCK(txq); break; } ATH_TXQ_REMOVE(txq, bf, bf_list); /* * Sanity check. */ if (txq->axq_qnum != bf->bf_state.bfs_tx_queue) { device_printf(sc->sc_dev, "%s: TXQ=%d: bf=%p, bfs_tx_queue=%d\n", __func__, txq->axq_qnum, bf, bf->bf_state.bfs_tx_queue); } if (txq->axq_qnum != bf->bf_last->bf_state.bfs_tx_queue) { device_printf(sc->sc_dev, "%s: TXQ=%d: bf_last=%p, bfs_tx_queue=%d\n", __func__, txq->axq_qnum, bf->bf_last, bf->bf_last->bf_state.bfs_tx_queue); } #if 0 if (txq->axq_depth > 0) { /* * More frames follow. Mark the buffer busy * so it's not re-used while the hardware may * still re-read the link field in the descriptor. * * Use the last buffer in an aggregate as that * is where the hardware may be - intermediate * descriptors won't be "busy". */ bf->bf_last->bf_flags |= ATH_BUF_BUSY; } else txq->axq_link = NULL; #else bf->bf_last->bf_flags |= ATH_BUF_BUSY; #endif if (bf->bf_state.bfs_aggr) txq->axq_aggr_depth--; ni = bf->bf_node; ATH_KTR(sc, ATH_KTR_TXCOMP, 5, "ath_tx_processq: txq=%u, bf=%p, ds=%p, ni=%p, ts_status=0x%08x", txq->axq_qnum, bf, ds, ni, ts->ts_status); /* * If unicast frame was ack'd update RSSI, * including the last rx time used to * workaround phantom bmiss interrupts. */ if (ni != NULL && ts->ts_status == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) { nacked++; sc->sc_stats.ast_tx_rssi = ts->ts_rssi; ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi, ts->ts_rssi); ATH_RSSI_LPF(ATH_NODE(ni)->an_node_stats.ns_avgtxrssi, ts->ts_rssi); } ATH_TXQ_UNLOCK(txq); /* * Update statistics and call completion */ ath_tx_process_buf_completion(sc, txq, ts, bf); /* XXX at this point, bf and ni may be totally invalid */ } #ifdef IEEE80211_SUPPORT_SUPERG /* * Flush fast-frame staging queue when traffic slows. */ if (txq->axq_depth <= 1) ieee80211_ff_flush(ic, txq->axq_ac); #endif /* Kick the software TXQ scheduler */ if (dosched) { ATH_TX_LOCK(sc); ath_txq_sched(sc, txq); ATH_TX_UNLOCK(sc); } ATH_KTR(sc, ATH_KTR_TXCOMP, 1, "ath_tx_processq: txq=%u: done", txq->axq_qnum); return nacked; } #define TXQACTIVE(t, q) ( (t) & (1 << (q))) /* * Deferred processing of transmit interrupt; special-cased * for a single hardware transmit queue (e.g. 5210 and 5211). */ static void ath_tx_proc_q0(void *arg, int npending) { struct ath_softc *sc = arg; uint32_t txqs; ATH_PCU_LOCK(sc); sc->sc_txproc_cnt++; txqs = sc->sc_txq_active; sc->sc_txq_active &= ~txqs; ATH_PCU_UNLOCK(sc); ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); ATH_KTR(sc, ATH_KTR_TXCOMP, 1, "ath_tx_proc_q0: txqs=0x%08x", txqs); if (TXQACTIVE(txqs, 0) && ath_tx_processq(sc, &sc->sc_txq[0], 1)) /* XXX why is lastrx updated in tx code? */ sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) ath_tx_processq(sc, sc->sc_cabq, 1); sc->sc_wd_timer = 0; if (sc->sc_softled) ath_led_event(sc, sc->sc_txrix); ATH_PCU_LOCK(sc); sc->sc_txproc_cnt--; ATH_PCU_UNLOCK(sc); ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); ath_tx_kick(sc); } /* * Deferred processing of transmit interrupt; special-cased * for four hardware queues, 0-3 (e.g. 5212 w/ WME support). */ static void ath_tx_proc_q0123(void *arg, int npending) { struct ath_softc *sc = arg; int nacked; uint32_t txqs; ATH_PCU_LOCK(sc); sc->sc_txproc_cnt++; txqs = sc->sc_txq_active; sc->sc_txq_active &= ~txqs; ATH_PCU_UNLOCK(sc); ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); ATH_KTR(sc, ATH_KTR_TXCOMP, 1, "ath_tx_proc_q0123: txqs=0x%08x", txqs); /* * Process each active queue. */ nacked = 0; if (TXQACTIVE(txqs, 0)) nacked += ath_tx_processq(sc, &sc->sc_txq[0], 1); if (TXQACTIVE(txqs, 1)) nacked += ath_tx_processq(sc, &sc->sc_txq[1], 1); if (TXQACTIVE(txqs, 2)) nacked += ath_tx_processq(sc, &sc->sc_txq[2], 1); if (TXQACTIVE(txqs, 3)) nacked += ath_tx_processq(sc, &sc->sc_txq[3], 1); if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) ath_tx_processq(sc, sc->sc_cabq, 1); if (nacked) sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); sc->sc_wd_timer = 0; if (sc->sc_softled) ath_led_event(sc, sc->sc_txrix); ATH_PCU_LOCK(sc); sc->sc_txproc_cnt--; ATH_PCU_UNLOCK(sc); ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); ath_tx_kick(sc); } /* * Deferred processing of transmit interrupt. */ static void ath_tx_proc(void *arg, int npending) { struct ath_softc *sc = arg; int i, nacked; uint32_t txqs; ATH_PCU_LOCK(sc); sc->sc_txproc_cnt++; txqs = sc->sc_txq_active; sc->sc_txq_active &= ~txqs; ATH_PCU_UNLOCK(sc); ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); ATH_KTR(sc, ATH_KTR_TXCOMP, 1, "ath_tx_proc: txqs=0x%08x", txqs); /* * Process each active queue. */ nacked = 0; for (i = 0; i < HAL_NUM_TX_QUEUES; i++) if (ATH_TXQ_SETUP(sc, i) && TXQACTIVE(txqs, i)) nacked += ath_tx_processq(sc, &sc->sc_txq[i], 1); if (nacked) sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); sc->sc_wd_timer = 0; if (sc->sc_softled) ath_led_event(sc, sc->sc_txrix); ATH_PCU_LOCK(sc); sc->sc_txproc_cnt--; ATH_PCU_UNLOCK(sc); ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); ath_tx_kick(sc); } #undef TXQACTIVE /* * Deferred processing of TXQ rescheduling. */ static void ath_txq_sched_tasklet(void *arg, int npending) { struct ath_softc *sc = arg; int i; /* XXX is skipping ok? */ ATH_PCU_LOCK(sc); #if 0 if (sc->sc_inreset_cnt > 0) { device_printf(sc->sc_dev, "%s: sc_inreset_cnt > 0; skipping\n", __func__); ATH_PCU_UNLOCK(sc); return; } #endif sc->sc_txproc_cnt++; ATH_PCU_UNLOCK(sc); ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); ATH_TX_LOCK(sc); for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { if (ATH_TXQ_SETUP(sc, i)) { ath_txq_sched(sc, &sc->sc_txq[i]); } } ATH_TX_UNLOCK(sc); ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); ATH_PCU_LOCK(sc); sc->sc_txproc_cnt--; ATH_PCU_UNLOCK(sc); } void ath_returnbuf_tail(struct ath_softc *sc, struct ath_buf *bf) { ATH_TXBUF_LOCK_ASSERT(sc); if (bf->bf_flags & ATH_BUF_MGMT) TAILQ_INSERT_TAIL(&sc->sc_txbuf_mgmt, bf, bf_list); else { TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); sc->sc_txbuf_cnt++; if (sc->sc_txbuf_cnt > ath_txbuf) { device_printf(sc->sc_dev, "%s: sc_txbuf_cnt > %d?\n", __func__, ath_txbuf); sc->sc_txbuf_cnt = ath_txbuf; } } } void ath_returnbuf_head(struct ath_softc *sc, struct ath_buf *bf) { ATH_TXBUF_LOCK_ASSERT(sc); if (bf->bf_flags & ATH_BUF_MGMT) TAILQ_INSERT_HEAD(&sc->sc_txbuf_mgmt, bf, bf_list); else { TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list); sc->sc_txbuf_cnt++; if (sc->sc_txbuf_cnt > ATH_TXBUF) { device_printf(sc->sc_dev, "%s: sc_txbuf_cnt > %d?\n", __func__, ATH_TXBUF); sc->sc_txbuf_cnt = ATH_TXBUF; } } } /* * Free the holding buffer if it exists */ void ath_txq_freeholdingbuf(struct ath_softc *sc, struct ath_txq *txq) { ATH_TXBUF_UNLOCK_ASSERT(sc); ATH_TXQ_LOCK_ASSERT(txq); if (txq->axq_holdingbf == NULL) return; txq->axq_holdingbf->bf_flags &= ~ATH_BUF_BUSY; ATH_TXBUF_LOCK(sc); ath_returnbuf_tail(sc, txq->axq_holdingbf); ATH_TXBUF_UNLOCK(sc); txq->axq_holdingbf = NULL; } /* * Add this buffer to the holding queue, freeing the previous * one if it exists. */ static void ath_txq_addholdingbuf(struct ath_softc *sc, struct ath_buf *bf) { struct ath_txq *txq; txq = &sc->sc_txq[bf->bf_state.bfs_tx_queue]; ATH_TXBUF_UNLOCK_ASSERT(sc); ATH_TXQ_LOCK_ASSERT(txq); /* XXX assert ATH_BUF_BUSY is set */ /* XXX assert the tx queue is under the max number */ if (bf->bf_state.bfs_tx_queue > HAL_NUM_TX_QUEUES) { device_printf(sc->sc_dev, "%s: bf=%p: invalid tx queue (%d)\n", __func__, bf, bf->bf_state.bfs_tx_queue); bf->bf_flags &= ~ATH_BUF_BUSY; ath_returnbuf_tail(sc, bf); return; } ath_txq_freeholdingbuf(sc, txq); txq->axq_holdingbf = bf; } /* * Return a buffer to the pool and update the 'busy' flag on the * previous 'tail' entry. * * This _must_ only be called when the buffer is involved in a completed * TX. The logic is that if it was part of an active TX, the previous * buffer on the list is now not involved in a halted TX DMA queue, waiting * for restart (eg for TDMA.) * * The caller must free the mbuf and recycle the node reference. * * XXX This method of handling busy / holding buffers is insanely stupid. * It requires bf_state.bfs_tx_queue to be correctly assigned. It would * be much nicer if buffers in the processq() methods would instead be * always completed there (pushed onto a txq or ath_bufhead) so we knew * exactly what hardware queue they came from in the first place. */ void ath_freebuf(struct ath_softc *sc, struct ath_buf *bf) { struct ath_txq *txq; txq = &sc->sc_txq[bf->bf_state.bfs_tx_queue]; KASSERT((bf->bf_node == NULL), ("%s: bf->bf_node != NULL\n", __func__)); KASSERT((bf->bf_m == NULL), ("%s: bf->bf_m != NULL\n", __func__)); /* * If this buffer is busy, push it onto the holding queue. */ if (bf->bf_flags & ATH_BUF_BUSY) { ATH_TXQ_LOCK(txq); ath_txq_addholdingbuf(sc, bf); ATH_TXQ_UNLOCK(txq); return; } /* * Not a busy buffer, so free normally */ ATH_TXBUF_LOCK(sc); ath_returnbuf_tail(sc, bf); ATH_TXBUF_UNLOCK(sc); } /* * This is currently used by ath_tx_draintxq() and * ath_tx_tid_free_pkts(). * * It recycles a single ath_buf. */ void ath_tx_freebuf(struct ath_softc *sc, struct ath_buf *bf, int status) { struct ieee80211_node *ni = bf->bf_node; struct mbuf *m0 = bf->bf_m; /* * Make sure that we only sync/unload if there's an mbuf. * If not (eg we cloned a buffer), the unload will have already * occurred. */ if (bf->bf_m != NULL) { bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); } bf->bf_node = NULL; bf->bf_m = NULL; /* Free the buffer, it's not needed any longer */ ath_freebuf(sc, bf); /* Pass the buffer back to net80211 - completing it */ ieee80211_tx_complete(ni, m0, status); } static struct ath_buf * ath_tx_draintxq_get_one(struct ath_softc *sc, struct ath_txq *txq) { struct ath_buf *bf; ATH_TXQ_LOCK_ASSERT(txq); /* * Drain the FIFO queue first, then if it's * empty, move to the normal frame queue. */ bf = TAILQ_FIRST(&txq->fifo.axq_q); if (bf != NULL) { /* * Is it the last buffer in this set? * Decrement the FIFO counter. */ if (bf->bf_flags & ATH_BUF_FIFOEND) { if (txq->axq_fifo_depth == 0) { device_printf(sc->sc_dev, "%s: Q%d: fifo_depth=0, fifo.axq_depth=%d?\n", __func__, txq->axq_qnum, txq->fifo.axq_depth); } else txq->axq_fifo_depth--; } ATH_TXQ_REMOVE(&txq->fifo, bf, bf_list); return (bf); } /* * Debugging! */ if (txq->axq_fifo_depth != 0 || txq->fifo.axq_depth != 0) { device_printf(sc->sc_dev, "%s: Q%d: fifo_depth=%d, fifo.axq_depth=%d\n", __func__, txq->axq_qnum, txq->axq_fifo_depth, txq->fifo.axq_depth); } /* * Now drain the pending queue. */ bf = TAILQ_FIRST(&txq->axq_q); if (bf == NULL) { txq->axq_link = NULL; return (NULL); } ATH_TXQ_REMOVE(txq, bf, bf_list); return (bf); } void ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq) { #ifdef ATH_DEBUG struct ath_hal *ah = sc->sc_ah; #endif struct ath_buf *bf; u_int ix; /* * NB: this assumes output has been stopped and * we do not need to block ath_tx_proc */ for (ix = 0;; ix++) { ATH_TXQ_LOCK(txq); bf = ath_tx_draintxq_get_one(sc, txq); if (bf == NULL) { ATH_TXQ_UNLOCK(txq); break; } if (bf->bf_state.bfs_aggr) txq->axq_aggr_depth--; #ifdef ATH_DEBUG if (sc->sc_debug & ATH_DEBUG_RESET) { struct ieee80211com *ic = &sc->sc_ic; int status = 0; /* * EDMA operation has a TX completion FIFO * separate from the TX descriptor, so this * method of checking the "completion" status * is wrong. */ if (! sc->sc_isedma) { status = (ath_hal_txprocdesc(ah, bf->bf_lastds, &bf->bf_status.ds_txstat) == HAL_OK); } ath_printtxbuf(sc, bf, txq->axq_qnum, ix, status); ieee80211_dump_pkt(ic, mtod(bf->bf_m, const uint8_t *), bf->bf_m->m_len, 0, -1); } #endif /* ATH_DEBUG */ /* * Since we're now doing magic in the completion * functions, we -must- call it for aggregation * destinations or BAW tracking will get upset. */ /* * Clear ATH_BUF_BUSY; the completion handler * will free the buffer. */ ATH_TXQ_UNLOCK(txq); bf->bf_flags &= ~ATH_BUF_BUSY; if (bf->bf_comp) bf->bf_comp(sc, bf, 1); else ath_tx_default_comp(sc, bf, 1); } /* * Free the holding buffer if it exists */ ATH_TXQ_LOCK(txq); ath_txq_freeholdingbuf(sc, txq); ATH_TXQ_UNLOCK(txq); /* * Drain software queued frames which are on * active TIDs. */ ath_tx_txq_drain(sc, txq); } static void ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq) { struct ath_hal *ah = sc->sc_ah; ATH_TXQ_LOCK_ASSERT(txq); DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, active=%d, hwpending=%d, flags 0x%08x, " "link %p, holdingbf=%p\n", __func__, txq->axq_qnum, (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum), (int) (!! ath_hal_txqenabled(ah, txq->axq_qnum)), (int) ath_hal_numtxpending(ah, txq->axq_qnum), txq->axq_flags, txq->axq_link, txq->axq_holdingbf); (void) ath_hal_stoptxdma(ah, txq->axq_qnum); /* We've stopped TX DMA, so mark this as stopped. */ txq->axq_flags &= ~ATH_TXQ_PUTRUNNING; #ifdef ATH_DEBUG if ((sc->sc_debug & ATH_DEBUG_RESET) && (txq->axq_holdingbf != NULL)) { ath_printtxbuf(sc, txq->axq_holdingbf, txq->axq_qnum, 0, 0); } #endif } int ath_stoptxdma(struct ath_softc *sc) { struct ath_hal *ah = sc->sc_ah; int i; /* XXX return value */ if (sc->sc_invalid) return 0; if (!sc->sc_invalid) { /* don't touch the hardware if marked invalid */ DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", __func__, sc->sc_bhalq, (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq), NULL); /* stop the beacon queue */ (void) ath_hal_stoptxdma(ah, sc->sc_bhalq); /* Stop the data queues */ for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { if (ATH_TXQ_SETUP(sc, i)) { ATH_TXQ_LOCK(&sc->sc_txq[i]); ath_tx_stopdma(sc, &sc->sc_txq[i]); ATH_TXQ_UNLOCK(&sc->sc_txq[i]); } } } return 1; } #ifdef ATH_DEBUG void ath_tx_dump(struct ath_softc *sc, struct ath_txq *txq) { struct ath_hal *ah = sc->sc_ah; struct ath_buf *bf; int i = 0; if (! (sc->sc_debug & ATH_DEBUG_RESET)) return; device_printf(sc->sc_dev, "%s: Q%d: begin\n", __func__, txq->axq_qnum); TAILQ_FOREACH(bf, &txq->axq_q, bf_list) { ath_printtxbuf(sc, bf, txq->axq_qnum, i, ath_hal_txprocdesc(ah, bf->bf_lastds, &bf->bf_status.ds_txstat) == HAL_OK); i++; } device_printf(sc->sc_dev, "%s: Q%d: end\n", __func__, txq->axq_qnum); } #endif /* ATH_DEBUG */ /* * Drain the transmit queues and reclaim resources. */ void ath_legacy_tx_drain(struct ath_softc *sc, ATH_RESET_TYPE reset_type) { struct ath_hal *ah = sc->sc_ah; struct ath_buf *bf_last; int i; (void) ath_stoptxdma(sc); /* * Dump the queue contents */ for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { /* * XXX TODO: should we just handle the completed TX frames * here, whether or not the reset is a full one or not? */ if (ATH_TXQ_SETUP(sc, i)) { #ifdef ATH_DEBUG if (sc->sc_debug & ATH_DEBUG_RESET) ath_tx_dump(sc, &sc->sc_txq[i]); #endif /* ATH_DEBUG */ if (reset_type == ATH_RESET_NOLOSS) { ath_tx_processq(sc, &sc->sc_txq[i], 0); ATH_TXQ_LOCK(&sc->sc_txq[i]); /* * Free the holding buffer; DMA is now * stopped. */ ath_txq_freeholdingbuf(sc, &sc->sc_txq[i]); /* * Setup the link pointer to be the * _last_ buffer/descriptor in the list. * If there's nothing in the list, set it * to NULL. */ bf_last = ATH_TXQ_LAST(&sc->sc_txq[i], axq_q_s); if (bf_last != NULL) { ath_hal_gettxdesclinkptr(ah, bf_last->bf_lastds, &sc->sc_txq[i].axq_link); } else { sc->sc_txq[i].axq_link = NULL; } ATH_TXQ_UNLOCK(&sc->sc_txq[i]); } else ath_tx_draintxq(sc, &sc->sc_txq[i]); } } #ifdef ATH_DEBUG if (sc->sc_debug & ATH_DEBUG_RESET) { struct ath_buf *bf = TAILQ_FIRST(&sc->sc_bbuf); if (bf != NULL && bf->bf_m != NULL) { ath_printtxbuf(sc, bf, sc->sc_bhalq, 0, ath_hal_txprocdesc(ah, bf->bf_lastds, &bf->bf_status.ds_txstat) == HAL_OK); ieee80211_dump_pkt(&sc->sc_ic, mtod(bf->bf_m, const uint8_t *), bf->bf_m->m_len, 0, -1); } } #endif /* ATH_DEBUG */ sc->sc_wd_timer = 0; } /* * Update internal state after a channel change. */ static void ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan) { enum ieee80211_phymode mode; /* * Change channels and update the h/w rate map * if we're switching; e.g. 11a to 11b/g. */ mode = ieee80211_chan2mode(chan); if (mode != sc->sc_curmode) ath_setcurmode(sc, mode); sc->sc_curchan = chan; } /* * Set/change channels. If the channel is really being changed, * it's done by resetting the chip. To accomplish this we must * first cleanup any pending DMA, then restart stuff after a la * ath_init. */ static int ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan) { struct ieee80211com *ic = &sc->sc_ic; struct ath_hal *ah = sc->sc_ah; int ret = 0; /* Treat this as an interface reset */ ATH_PCU_UNLOCK_ASSERT(sc); ATH_UNLOCK_ASSERT(sc); /* (Try to) stop TX/RX from occurring */ taskqueue_block(sc->sc_tq); ATH_PCU_LOCK(sc); /* Disable interrupts */ ath_hal_intrset(ah, 0); /* Stop new RX/TX/interrupt completion */ if (ath_reset_grablock(sc, 1) == 0) { device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", __func__); } /* Stop pending RX/TX completion */ ath_txrx_stop_locked(sc); ATH_PCU_UNLOCK(sc); DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz, flags 0x%x)\n", __func__, ieee80211_chan2ieee(ic, chan), chan->ic_freq, chan->ic_flags); if (chan != sc->sc_curchan) { HAL_STATUS status; /* * To switch channels clear any pending DMA operations; * wait long enough for the RX fifo to drain, reset the * hardware at the new frequency, and then re-enable * the relevant bits of the h/w. */ #if 0 ath_hal_intrset(ah, 0); /* disable interrupts */ #endif ath_stoprecv(sc, 1); /* turn off frame recv */ /* * First, handle completed TX/RX frames. */ ath_rx_flush(sc); ath_draintxq(sc, ATH_RESET_NOLOSS); /* * Next, flush the non-scheduled frames. */ ath_draintxq(sc, ATH_RESET_FULL); /* clear pending tx frames */ ath_update_chainmasks(sc, chan); ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask, sc->sc_cur_rxchainmask); if (!ath_hal_reset(ah, sc->sc_opmode, chan, AH_TRUE, HAL_RESET_NORMAL, &status)) { device_printf(sc->sc_dev, "%s: unable to reset " "channel %u (%u MHz, flags 0x%x), hal status %u\n", __func__, ieee80211_chan2ieee(ic, chan), chan->ic_freq, chan->ic_flags, status); ret = EIO; goto finish; } sc->sc_diversity = ath_hal_getdiversity(ah); ATH_RX_LOCK(sc); sc->sc_rx_stopped = 1; sc->sc_rx_resetted = 1; ATH_RX_UNLOCK(sc); /* Quiet time handling - ensure we resync */ ath_vap_clear_quiet_ie(sc); /* Let DFS at it in case it's a DFS channel */ ath_dfs_radar_enable(sc, chan); /* Let spectral at in case spectral is enabled */ ath_spectral_enable(sc, chan); /* * Let bluetooth coexistence at in case it's needed for this * channel */ ath_btcoex_enable(sc, ic->ic_curchan); /* * If we're doing TDMA, enforce the TXOP limitation for chips * that support it. */ if (sc->sc_hasenforcetxop && sc->sc_tdma) ath_hal_setenforcetxop(sc->sc_ah, 1); else ath_hal_setenforcetxop(sc->sc_ah, 0); /* * Re-enable rx framework. */ if (ath_startrecv(sc) != 0) { device_printf(sc->sc_dev, "%s: unable to restart recv logic\n", __func__); ret = EIO; goto finish; } /* * Change channels and update the h/w rate map * if we're switching; e.g. 11a to 11b/g. */ ath_chan_change(sc, chan); /* * Reset clears the beacon timers; reset them * here if needed. */ if (sc->sc_beacons) { /* restart beacons */ #ifdef IEEE80211_SUPPORT_TDMA if (sc->sc_tdma) ath_tdma_config(sc, NULL); else #endif ath_beacon_config(sc, NULL); } /* * Re-enable interrupts. */ #if 0 ath_hal_intrset(ah, sc->sc_imask); #endif } finish: ATH_PCU_LOCK(sc); sc->sc_inreset_cnt--; /* XXX only do this if sc_inreset_cnt == 0? */ ath_hal_intrset(ah, sc->sc_imask); ATH_PCU_UNLOCK(sc); ath_txrx_start(sc); /* XXX ath_start? */ return ret; } /* * Periodically recalibrate the PHY to account * for temperature/environment changes. */ static void ath_calibrate(void *arg) { struct ath_softc *sc = arg; struct ath_hal *ah = sc->sc_ah; struct ieee80211com *ic = &sc->sc_ic; HAL_BOOL longCal, isCalDone = AH_TRUE; HAL_BOOL aniCal, shortCal = AH_FALSE; int nextcal; ATH_LOCK_ASSERT(sc); /* * Force the hardware awake for ANI work. */ ath_power_set_power_state(sc, HAL_PM_AWAKE); /* Skip trying to do this if we're in reset */ if (sc->sc_inreset_cnt) goto restart; if (ic->ic_flags & IEEE80211_F_SCAN) /* defer, off channel */ goto restart; longCal = (ticks - sc->sc_lastlongcal >= ath_longcalinterval*hz); aniCal = (ticks - sc->sc_lastani >= ath_anicalinterval*hz/1000); if (sc->sc_doresetcal) shortCal = (ticks - sc->sc_lastshortcal >= ath_shortcalinterval*hz/1000); DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: shortCal=%d; longCal=%d; aniCal=%d\n", __func__, shortCal, longCal, aniCal); if (aniCal) { sc->sc_stats.ast_ani_cal++; sc->sc_lastani = ticks; ath_hal_ani_poll(ah, sc->sc_curchan); } if (longCal) { sc->sc_stats.ast_per_cal++; sc->sc_lastlongcal = ticks; if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) { /* * Rfgain is out of bounds, reset the chip * to load new gain values. */ DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: rfgain change\n", __func__); sc->sc_stats.ast_per_rfgain++; sc->sc_resetcal = 0; sc->sc_doresetcal = AH_TRUE; taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask); callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); ath_power_restore_power_state(sc); return; } /* * If this long cal is after an idle period, then * reset the data collection state so we start fresh. */ if (sc->sc_resetcal) { (void) ath_hal_calreset(ah, sc->sc_curchan); sc->sc_lastcalreset = ticks; sc->sc_lastshortcal = ticks; sc->sc_resetcal = 0; sc->sc_doresetcal = AH_TRUE; } } /* Only call if we're doing a short/long cal, not for ANI calibration */ if (shortCal || longCal) { isCalDone = AH_FALSE; if (ath_hal_calibrateN(ah, sc->sc_curchan, longCal, &isCalDone)) { if (longCal) { /* * Calibrate noise floor data again in case of change. */ ath_hal_process_noisefloor(ah); } } else { DPRINTF(sc, ATH_DEBUG_ANY, "%s: calibration of channel %u failed\n", __func__, sc->sc_curchan->ic_freq); sc->sc_stats.ast_per_calfail++; } /* * XXX TODO: get the NF calibration results from the HAL. * If we failed NF cal then schedule a hard reset to potentially * un-freeze the PHY. * * Note we have to be careful here to not get stuck in an * infinite NIC restart. Ideally we'd not restart if we * failed the first NF cal - that /can/ fail sometimes in * a noisy environment. */ if (shortCal) sc->sc_lastshortcal = ticks; } if (!isCalDone) { restart: /* * Use a shorter interval to potentially collect multiple * data samples required to complete calibration. Once * we're told the work is done we drop back to a longer * interval between requests. We're more aggressive doing * work when operating as an AP to improve operation right * after startup. */ sc->sc_lastshortcal = ticks; nextcal = ath_shortcalinterval*hz/1000; if (sc->sc_opmode != HAL_M_HOSTAP) nextcal *= 10; sc->sc_doresetcal = AH_TRUE; } else { /* nextcal should be the shortest time for next event */ nextcal = ath_longcalinterval*hz; if (sc->sc_lastcalreset == 0) sc->sc_lastcalreset = sc->sc_lastlongcal; else if (ticks - sc->sc_lastcalreset >= ath_resetcalinterval*hz) sc->sc_resetcal = 1; /* setup reset next trip */ sc->sc_doresetcal = AH_FALSE; } /* ANI calibration may occur more often than short/long/resetcal */ if (ath_anicalinterval > 0) nextcal = MIN(nextcal, ath_anicalinterval*hz/1000); if (nextcal != 0) { DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: next +%u (%sisCalDone)\n", __func__, nextcal, isCalDone ? "" : "!"); callout_reset(&sc->sc_cal_ch, nextcal, ath_calibrate, sc); } else { DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n", __func__); /* NB: don't rearm timer */ } /* * Restore power state now that we're done. */ ath_power_restore_power_state(sc); } static void ath_scan_start(struct ieee80211com *ic) { struct ath_softc *sc = ic->ic_softc; struct ath_hal *ah = sc->sc_ah; u_int32_t rfilt; /* XXX calibration timer? */ /* XXXGL: is constant ieee80211broadcastaddr a correct choice? */ ATH_LOCK(sc); sc->sc_scanning = 1; sc->sc_syncbeacon = 0; rfilt = ath_calcrxfilter(sc); ATH_UNLOCK(sc); ATH_PCU_LOCK(sc); ath_hal_setrxfilter(ah, rfilt); ath_hal_setassocid(ah, ieee80211broadcastaddr, 0); ATH_PCU_UNLOCK(sc); DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n", __func__, rfilt, ether_sprintf(ieee80211broadcastaddr)); } static void ath_scan_end(struct ieee80211com *ic) { struct ath_softc *sc = ic->ic_softc; struct ath_hal *ah = sc->sc_ah; u_int32_t rfilt; ATH_LOCK(sc); sc->sc_scanning = 0; rfilt = ath_calcrxfilter(sc); ATH_UNLOCK(sc); ATH_PCU_LOCK(sc); ath_hal_setrxfilter(ah, rfilt); ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); ath_hal_process_noisefloor(ah); ATH_PCU_UNLOCK(sc); DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", __func__, rfilt, ether_sprintf(sc->sc_curbssid), sc->sc_curaid); } #ifdef ATH_ENABLE_11N /* * For now, just do a channel change. * * Later, we'll go through the hard slog of suspending tx/rx, changing rate * control state and resetting the hardware without dropping frames out * of the queue. * * The unfortunate trouble here is making absolutely sure that the * channel width change has propagated enough so the hardware * absolutely isn't handed bogus frames for it's current operating * mode. (Eg, 40MHz frames in 20MHz mode.) Since TX and RX can and * does occur in parallel, we need to make certain we've blocked * any further ongoing TX (and RX, that can cause raw TX) * before we do this. */ static void ath_update_chw(struct ieee80211com *ic) { struct ath_softc *sc = ic->ic_softc; //DPRINTF(sc, ATH_DEBUG_STATE, "%s: called\n", __func__); device_printf(sc->sc_dev, "%s: called\n", __func__); /* * XXX TODO: schedule a tasklet that stops things without freeing, * walks the now stopped TX queue(s) looking for frames to retry * as if we TX filtered them (whch may mean dropping non-ampdu frames!) * but okay) then place them back on the software queue so they * can have the rate control lookup done again. */ ath_set_channel(ic); } #endif /* ATH_ENABLE_11N */ /* * This is called by the beacon parsing routine in the receive * path to update the current quiet time information provided by * an AP. * * This is STA specific, it doesn't take the AP TBTT/beacon slot * offset into account. * * The quiet IE doesn't control the /now/ beacon interval - it * controls the upcoming beacon interval. So, when tbtt=1, * the quiet element programming shall be for the next beacon * interval. There's no tbtt=0 behaviour defined, so don't. * * Since we're programming the next quiet interval, we have * to keep in mind what we will see when the next beacon * is received with potentially a quiet IE. For example, if * quiet_period is 1, then we are always getting a quiet interval * each TBTT - so if we just program it in upon each beacon received, * it will constantly reflect the "next" TBTT and we will never * let the counter stay programmed correctly. * * So: * + the first time we see the quiet IE, program it and store * the details somewhere; * + if the quiet parameters don't change (ie, period/duration/offset) * then just leave the programming enabled; * + (we can "skip" beacons, so don't try to enforce tbttcount unless * you're willing to also do the skipped beacon math); * + if the quiet IE is removed, then halt quiet time. */ static int ath_set_quiet_ie(struct ieee80211_node *ni, uint8_t *ie) { struct ieee80211_quiet_ie *q; struct ieee80211vap *vap = ni->ni_vap; struct ath_vap *avp = ATH_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct ath_softc *sc = ic->ic_softc; if (vap->iv_opmode != IEEE80211_M_STA) return (0); /* Verify we have a quiet time IE */ if (ie == NULL) { DPRINTF(sc, ATH_DEBUG_QUIETIE, "%s: called; NULL IE, disabling\n", __func__); ath_hal_set_quiet(sc->sc_ah, 0, 0, 0, HAL_QUIET_DISABLE); memset(&avp->quiet_ie, 0, sizeof(avp->quiet_ie)); return (0); } /* If we do, verify it's actually legit */ if (ie[0] != IEEE80211_ELEMID_QUIET) return 0; if (ie[1] != 6) return 0; /* Note: this belongs in net80211, parsed out and everything */ q = (void *) ie; /* * Compare what we have stored to what we last saw. * If they're the same then don't program in anything. */ if ((q->period == avp->quiet_ie.period) && (le16dec(&q->duration) == le16dec(&avp->quiet_ie.duration)) && (le16dec(&q->offset) == le16dec(&avp->quiet_ie.offset))) return (0); DPRINTF(sc, ATH_DEBUG_QUIETIE, "%s: called; tbttcount=%d, period=%d, duration=%d, offset=%d\n", __func__, (int) q->tbttcount, (int) q->period, (int) le16dec(&q->duration), (int) le16dec(&q->offset)); /* * Don't program in garbage values. */ if ((le16dec(&q->duration) == 0) || (le16dec(&q->duration) >= ni->ni_intval)) { DPRINTF(sc, ATH_DEBUG_QUIETIE, "%s: invalid duration (%d)\n", __func__, le16dec(&q->duration)); return (0); } /* * Can have a 0 offset, but not a duration - so just check * they don't exceed the intval. */ if (le16dec(&q->duration) + le16dec(&q->offset) >= ni->ni_intval) { DPRINTF(sc, ATH_DEBUG_QUIETIE, "%s: invalid duration + offset (%d+%d)\n", __func__, le16dec(&q->duration), le16dec(&q->offset)); return (0); } if (q->tbttcount == 0) { DPRINTF(sc, ATH_DEBUG_QUIETIE, "%s: invalid tbttcount (0)\n", __func__); return (0); } if (q->period == 0) { DPRINTF(sc, ATH_DEBUG_QUIETIE, "%s: invalid period (0)\n", __func__); return (0); } /* * This is a new quiet time IE config, so wait until tbttcount * is equal to 1, and program it in. */ if (q->tbttcount == 1) { DPRINTF(sc, ATH_DEBUG_QUIETIE, "%s: programming\n", __func__); ath_hal_set_quiet(sc->sc_ah, q->period * ni->ni_intval, /* convert to TU */ le16dec(&q->duration), /* already in TU */ le16dec(&q->offset) + ni->ni_intval, HAL_QUIET_ENABLE | HAL_QUIET_ADD_CURRENT_TSF); /* * Note: no HAL_QUIET_ADD_SWBA_RESP_TIME; as this is for * STA mode */ /* Update local state */ memcpy(&avp->quiet_ie, ie, sizeof(struct ieee80211_quiet_ie)); } return (0); } static void ath_set_channel(struct ieee80211com *ic) { struct ath_softc *sc = ic->ic_softc; ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); (void) ath_chan_set(sc, ic->ic_curchan); /* * If we are returning to our bss channel then mark state * so the next recv'd beacon's tsf will be used to sync the * beacon timers. Note that since we only hear beacons in * sta/ibss mode this has no effect in other operating modes. */ ATH_LOCK(sc); if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan) sc->sc_syncbeacon = 1; ath_power_restore_power_state(sc); ATH_UNLOCK(sc); } /* * Walk the vap list and check if there any vap's in RUN state. */ static int ath_isanyrunningvaps(struct ieee80211vap *this) { struct ieee80211com *ic = this->iv_ic; struct ieee80211vap *vap; IEEE80211_LOCK_ASSERT(ic); TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { if (vap != this && vap->iv_state >= IEEE80211_S_RUN) return 1; } return 0; } static int ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct ieee80211com *ic = vap->iv_ic; struct ath_softc *sc = ic->ic_softc; struct ath_vap *avp = ATH_VAP(vap); struct ath_hal *ah = sc->sc_ah; struct ieee80211_node *ni = NULL; int i, error, stamode; u_int32_t rfilt; int csa_run_transition = 0; enum ieee80211_state ostate = vap->iv_state; static const HAL_LED_STATE leds[] = { HAL_LED_INIT, /* IEEE80211_S_INIT */ HAL_LED_SCAN, /* IEEE80211_S_SCAN */ HAL_LED_AUTH, /* IEEE80211_S_AUTH */ HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */ HAL_LED_RUN, /* IEEE80211_S_CAC */ HAL_LED_RUN, /* IEEE80211_S_RUN */ HAL_LED_RUN, /* IEEE80211_S_CSA */ HAL_LED_RUN, /* IEEE80211_S_SLEEP */ }; DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__, ieee80211_state_name[ostate], ieee80211_state_name[nstate]); /* * net80211 _should_ have the comlock asserted at this point. * There are some comments around the calls to vap->iv_newstate * which indicate that it (newstate) may end up dropping the * lock. This and the subsequent lock assert check after newstate * are an attempt to catch these and figure out how/why. */ IEEE80211_LOCK_ASSERT(ic); /* Before we touch the hardware - wake it up */ ATH_LOCK(sc); /* * If the NIC is in anything other than SLEEP state, * we need to ensure that self-generated frames are * set for PWRMGT=0. Otherwise we may end up with * strange situations. * * XXX TODO: is this actually the case? :-) */ if (nstate != IEEE80211_S_SLEEP) ath_power_setselfgen(sc, HAL_PM_AWAKE); /* * Now, wake the thing up. */ ath_power_set_power_state(sc, HAL_PM_AWAKE); /* * And stop the calibration callout whilst we have * ATH_LOCK held. */ callout_stop(&sc->sc_cal_ch); ATH_UNLOCK(sc); if (ostate == IEEE80211_S_CSA && nstate == IEEE80211_S_RUN) csa_run_transition = 1; ath_hal_setledstate(ah, leds[nstate]); /* set LED */ if (nstate == IEEE80211_S_SCAN) { /* * Scanning: turn off beacon miss and don't beacon. * Mark beacon state so when we reach RUN state we'll * [re]setup beacons. Unblock the task q thread so * deferred interrupt processing is done. */ /* Ensure we stay awake during scan */ ATH_LOCK(sc); ath_power_setselfgen(sc, HAL_PM_AWAKE); ath_power_setpower(sc, HAL_PM_AWAKE, 1); ATH_UNLOCK(sc); ath_hal_intrset(ah, sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS)); sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); sc->sc_beacons = 0; taskqueue_unblock(sc->sc_tq); } ni = ieee80211_ref_node(vap->iv_bss); rfilt = ath_calcrxfilter(sc); stamode = (vap->iv_opmode == IEEE80211_M_STA || vap->iv_opmode == IEEE80211_M_AHDEMO || vap->iv_opmode == IEEE80211_M_IBSS); /* * XXX Dont need to do this (and others) if we've transitioned * from SLEEP->RUN. */ if (stamode && nstate == IEEE80211_S_RUN) { sc->sc_curaid = ni->ni_associd; IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid); ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); } DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", __func__, rfilt, ether_sprintf(sc->sc_curbssid), sc->sc_curaid); ath_hal_setrxfilter(ah, rfilt); /* XXX is this to restore keycache on resume? */ if (vap->iv_opmode != IEEE80211_M_STA && (vap->iv_flags & IEEE80211_F_PRIVACY)) { for (i = 0; i < IEEE80211_WEP_NKID; i++) if (ath_hal_keyisvalid(ah, i)) ath_hal_keysetmac(ah, i, ni->ni_bssid); } /* * Invoke the parent method to do net80211 work. */ error = avp->av_newstate(vap, nstate, arg); if (error != 0) goto bad; /* * See above: ensure av_newstate() doesn't drop the lock * on us. */ IEEE80211_LOCK_ASSERT(ic); /* * XXX TODO: if nstate is _S_CAC, then we should disable * ACK processing until CAC is completed. */ /* * XXX TODO: if we're on a passive channel, then we should * not allow any ACKs or self-generated frames until we hear * a beacon. Unfortunately there isn't a notification from * net80211 so perhaps we could slot that particular check * into the mgmt receive path and just ensure that we clear * it on RX of beacons in passive mode (and only clear it * once, obviously.) */ /* * XXX TODO: net80211 should be tracking whether channels * have heard beacons and are thus considered "OK" for * transmitting - and then inform the driver about this * state change. That way if we hear an AP go quiet * (and nothing else is beaconing on a channel) the * channel can go back to being passive until another * beacon is heard. */ /* * XXX TODO: if nstate is _S_CAC, then we should disable * ACK processing until CAC is completed. */ /* * XXX TODO: if we're on a passive channel, then we should * not allow any ACKs or self-generated frames until we hear * a beacon. Unfortunately there isn't a notification from * net80211 so perhaps we could slot that particular check * into the mgmt receive path and just ensure that we clear * it on RX of beacons in passive mode (and only clear it * once, obviously.) */ /* * XXX TODO: net80211 should be tracking whether channels * have heard beacons and are thus considered "OK" for * transmitting - and then inform the driver about this * state change. That way if we hear an AP go quiet * (and nothing else is beaconing on a channel) the * channel can go back to being passive until another * beacon is heard. */ if (nstate == IEEE80211_S_RUN) { /* NB: collect bss node again, it may have changed */ ieee80211_free_node(ni); ni = ieee80211_ref_node(vap->iv_bss); DPRINTF(sc, ATH_DEBUG_STATE, "%s(RUN): iv_flags 0x%08x bintvl %d bssid %s " "capinfo 0x%04x chan %d\n", __func__, vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid), ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan)); switch (vap->iv_opmode) { #ifdef IEEE80211_SUPPORT_TDMA case IEEE80211_M_AHDEMO: if ((vap->iv_caps & IEEE80211_C_TDMA) == 0) break; /* fall thru... */ #endif case IEEE80211_M_HOSTAP: case IEEE80211_M_IBSS: case IEEE80211_M_MBSS: /* * TODO: Enable ACK processing (ie, clear AR_DIAG_ACK_DIS.) * For channels that are in CAC, we may have disabled * this during CAC to ensure we don't ACK frames * sent to us. */ /* * Allocate and setup the beacon frame. * * Stop any previous beacon DMA. This may be * necessary, for example, when an ibss merge * causes reconfiguration; there will be a state * transition from RUN->RUN that means we may * be called with beacon transmission active. */ ath_hal_stoptxdma(ah, sc->sc_bhalq); error = ath_beacon_alloc(sc, ni); if (error != 0) goto bad; /* * If joining an adhoc network defer beacon timer * configuration to the next beacon frame so we * have a current TSF to use. Otherwise we're * starting an ibss/bss so there's no need to delay; * if this is the first vap moving to RUN state, then * beacon state needs to be [re]configured. */ if (vap->iv_opmode == IEEE80211_M_IBSS && ni->ni_tstamp.tsf != 0) { sc->sc_syncbeacon = 1; } else if (!sc->sc_beacons) { #ifdef IEEE80211_SUPPORT_TDMA if (vap->iv_caps & IEEE80211_C_TDMA) ath_tdma_config(sc, vap); else #endif ath_beacon_config(sc, vap); sc->sc_beacons = 1; } break; case IEEE80211_M_STA: /* * Defer beacon timer configuration to the next * beacon frame so we have a current TSF to use * (any TSF collected when scanning is likely old). * However if it's due to a CSA -> RUN transition, * force a beacon update so we pick up a lack of * beacons from an AP in CAC and thus force a * scan. * * And, there's also corner cases here where * after a scan, the AP may have disappeared. * In that case, we may not receive an actual * beacon to update the beacon timer and thus we * won't get notified of the missing beacons. */ if (ostate != IEEE80211_S_RUN && ostate != IEEE80211_S_SLEEP) { DPRINTF(sc, ATH_DEBUG_BEACON, "%s: STA; syncbeacon=1\n", __func__); sc->sc_syncbeacon = 1; /* Quiet time handling - ensure we resync */ memset(&avp->quiet_ie, 0, sizeof(avp->quiet_ie)); if (csa_run_transition) ath_beacon_config(sc, vap); /* * PR: kern/175227 * * Reconfigure beacons during reset; as otherwise * we won't get the beacon timers reprogrammed * after a reset and thus we won't pick up a * beacon miss interrupt. * * Hopefully we'll see a beacon before the BMISS * timer fires (too often), leading to a STA * disassociation. */ sc->sc_beacons = 1; } break; case IEEE80211_M_MONITOR: /* * Monitor mode vaps have only INIT->RUN and RUN->RUN * transitions so we must re-enable interrupts here to * handle the case of a single monitor mode vap. */ ath_hal_intrset(ah, sc->sc_imask); break; case IEEE80211_M_WDS: break; default: break; } /* * Let the hal process statistics collected during a * scan so it can provide calibrated noise floor data. */ ath_hal_process_noisefloor(ah); /* * Reset rssi stats; maybe not the best place... */ sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER; sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER; sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER; /* * Force awake for RUN mode. */ ATH_LOCK(sc); ath_power_setselfgen(sc, HAL_PM_AWAKE); ath_power_setpower(sc, HAL_PM_AWAKE, 1); /* * Finally, start any timers and the task q thread * (in case we didn't go through SCAN state). */ if (ath_longcalinterval != 0) { /* start periodic recalibration timer */ callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); } else { DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n", __func__); } ATH_UNLOCK(sc); taskqueue_unblock(sc->sc_tq); } else if (nstate == IEEE80211_S_INIT) { /* Quiet time handling - ensure we resync */ memset(&avp->quiet_ie, 0, sizeof(avp->quiet_ie)); /* * If there are no vaps left in RUN state then * shutdown host/driver operation: * o disable interrupts * o disable the task queue thread * o mark beacon processing as stopped */ if (!ath_isanyrunningvaps(vap)) { sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); /* disable interrupts */ ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL); taskqueue_block(sc->sc_tq); sc->sc_beacons = 0; } /* * For at least STA mode we likely should clear the ANI * and NF calibration state and allow the NIC/HAL to figure * out optimal parameters at runtime. Otherwise if we * disassociate due to interference / deafness it may persist * when we reconnect. * * Note: may need to do this for other states too, not just * _S_INIT. */ #ifdef IEEE80211_SUPPORT_TDMA ath_hal_setcca(ah, AH_TRUE); #endif } else if (nstate == IEEE80211_S_SLEEP) { /* We're going to sleep, so transition appropriately */ /* For now, only do this if we're a single STA vap */ if (sc->sc_nvaps == 1 && vap->iv_opmode == IEEE80211_M_STA) { DPRINTF(sc, ATH_DEBUG_BEACON, "%s: syncbeacon=%d\n", __func__, sc->sc_syncbeacon); ATH_LOCK(sc); /* * Always at least set the self-generated * frame config to set PWRMGT=1. */ ath_power_setselfgen(sc, HAL_PM_NETWORK_SLEEP); /* * If we're not syncing beacons, transition * to NETWORK_SLEEP. * * We stay awake if syncbeacon > 0 in case * we need to listen for some beacons otherwise * our beacon timer config may be wrong. */ if (sc->sc_syncbeacon == 0) { ath_power_setpower(sc, HAL_PM_NETWORK_SLEEP, 1); } ATH_UNLOCK(sc); } /* * Note - the ANI/calibration timer isn't re-enabled during * network sleep for now. One unfortunate side-effect is that * the PHY/airtime statistics aren't gathered on the channel * but I haven't yet tested to see if reading those registers * CAN occur during network sleep. * * This should be revisited in a future commit, even if it's * just to split out the airtime polling from ANI/calibration. */ } else if (nstate == IEEE80211_S_SCAN) { /* Quiet time handling - ensure we resync */ memset(&avp->quiet_ie, 0, sizeof(avp->quiet_ie)); /* * If we're in scan mode then startpcureceive() is * hopefully being called with "reset ANI" for this channel; * but once we attempt to reassociate we program in the previous * ANI values and.. not do any calibration until we're running. * This may mean we stay deaf unless we can associate successfully. * * So do kick off the cal timer to get NF/ANI going. */ ATH_LOCK(sc); if (ath_longcalinterval != 0) { /* start periodic recalibration timer */ callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); } else { DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n", __func__); } ATH_UNLOCK(sc); } bad: ieee80211_free_node(ni); /* * Restore the power state - either to what it was, or * to network_sleep if it's alright. */ ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); return error; } /* * Allocate a key cache slot to the station so we can * setup a mapping from key index to node. The key cache * slot is needed for managing antenna state and for * compression when stations do not use crypto. We do * it uniliaterally here; if crypto is employed this slot * will be reassigned. */ static void ath_setup_stationkey(struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ath_softc *sc = vap->iv_ic->ic_softc; ieee80211_keyix keyix, rxkeyix; /* XXX should take a locked ref to vap->iv_bss */ if (!ath_key_alloc(vap, &ni->ni_ucastkey, &keyix, &rxkeyix)) { /* * Key cache is full; we'll fall back to doing * the more expensive lookup in software. Note * this also means no h/w compression. */ /* XXX msg+statistic */ } else { /* XXX locking? */ ni->ni_ucastkey.wk_keyix = keyix; ni->ni_ucastkey.wk_rxkeyix = rxkeyix; /* NB: must mark device key to get called back on delete */ ni->ni_ucastkey.wk_flags |= IEEE80211_KEY_DEVKEY; IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr); /* NB: this will create a pass-thru key entry */ ath_keyset(sc, vap, &ni->ni_ucastkey, vap->iv_bss); } } /* * Setup driver-specific state for a newly associated node. * Note that we're called also on a re-associate, the isnew * param tells us if this is the first time or not. */ static void ath_newassoc(struct ieee80211_node *ni, int isnew) { struct ath_node *an = ATH_NODE(ni); struct ieee80211vap *vap = ni->ni_vap; struct ath_softc *sc = vap->iv_ic->ic_softc; const struct ieee80211_txparam *tp = ni->ni_txparms; an->an_mcastrix = ath_tx_findrix(sc, tp->mcastrate); an->an_mgmtrix = ath_tx_findrix(sc, tp->mgmtrate); DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: reassoc; isnew=%d, is_powersave=%d\n", __func__, ni->ni_macaddr, ":", isnew, an->an_is_powersave); ATH_NODE_LOCK(an); ath_rate_newassoc(sc, an, isnew); ATH_NODE_UNLOCK(an); if (isnew && (vap->iv_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey && ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE) ath_setup_stationkey(ni); /* * If we're reassociating, make sure that any paused queues * get unpaused. * * Now, we may have frames in the hardware queue for this node. * So if we are reassociating and there are frames in the queue, * we need to go through the cleanup path to ensure that they're * marked as non-aggregate. */ if (! isnew) { DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: reassoc; is_powersave=%d\n", __func__, ni->ni_macaddr, ":", an->an_is_powersave); /* XXX for now, we can't hold the lock across assoc */ ath_tx_node_reassoc(sc, an); /* XXX for now, we can't hold the lock across wakeup */ if (an->an_is_powersave) ath_tx_node_wakeup(sc, an); } } static int ath_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *reg, int nchans, struct ieee80211_channel chans[]) { struct ath_softc *sc = ic->ic_softc; struct ath_hal *ah = sc->sc_ah; HAL_STATUS status; DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: rd %u cc %u location %c%s\n", __func__, reg->regdomain, reg->country, reg->location, reg->ecm ? " ecm" : ""); status = ath_hal_set_channels(ah, chans, nchans, reg->country, reg->regdomain); if (status != HAL_OK) { DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: failed, status %u\n", __func__, status); return EINVAL; /* XXX */ } return 0; } static void ath_getradiocaps(struct ieee80211com *ic, int maxchans, int *nchans, struct ieee80211_channel chans[]) { struct ath_softc *sc = ic->ic_softc; struct ath_hal *ah = sc->sc_ah; DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: use rd %u cc %d\n", __func__, SKU_DEBUG, CTRY_DEFAULT); /* XXX check return */ (void) ath_hal_getchannels(ah, chans, maxchans, nchans, HAL_MODE_ALL, CTRY_DEFAULT, SKU_DEBUG, AH_TRUE); } static int ath_getchannels(struct ath_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ath_hal *ah = sc->sc_ah; HAL_STATUS status; /* * Collect channel set based on EEPROM contents. */ status = ath_hal_init_channels(ah, ic->ic_channels, IEEE80211_CHAN_MAX, &ic->ic_nchans, HAL_MODE_ALL, CTRY_DEFAULT, SKU_NONE, AH_TRUE); if (status != HAL_OK) { device_printf(sc->sc_dev, "%s: unable to collect channel list from hal, status %d\n", __func__, status); return EINVAL; } (void) ath_hal_getregdomain(ah, &sc->sc_eerd); ath_hal_getcountrycode(ah, &sc->sc_eecc); /* NB: cannot fail */ /* XXX map Atheros sku's to net80211 SKU's */ /* XXX net80211 types too small */ ic->ic_regdomain.regdomain = (uint16_t) sc->sc_eerd; ic->ic_regdomain.country = (uint16_t) sc->sc_eecc; ic->ic_regdomain.isocc[0] = ' '; /* XXX don't know */ ic->ic_regdomain.isocc[1] = ' '; ic->ic_regdomain.ecm = 1; ic->ic_regdomain.location = 'I'; DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: eeprom rd %u cc %u (mapped rd %u cc %u) location %c%s\n", __func__, sc->sc_eerd, sc->sc_eecc, ic->ic_regdomain.regdomain, ic->ic_regdomain.country, ic->ic_regdomain.location, ic->ic_regdomain.ecm ? " ecm" : ""); return 0; } static int ath_rate_setup(struct ath_softc *sc, u_int mode) { struct ath_hal *ah = sc->sc_ah; const HAL_RATE_TABLE *rt; switch (mode) { case IEEE80211_MODE_11A: rt = ath_hal_getratetable(ah, HAL_MODE_11A); break; case IEEE80211_MODE_HALF: rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE); break; case IEEE80211_MODE_QUARTER: rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE); break; case IEEE80211_MODE_11B: rt = ath_hal_getratetable(ah, HAL_MODE_11B); break; case IEEE80211_MODE_11G: rt = ath_hal_getratetable(ah, HAL_MODE_11G); break; case IEEE80211_MODE_TURBO_A: rt = ath_hal_getratetable(ah, HAL_MODE_108A); break; case IEEE80211_MODE_TURBO_G: rt = ath_hal_getratetable(ah, HAL_MODE_108G); break; case IEEE80211_MODE_STURBO_A: rt = ath_hal_getratetable(ah, HAL_MODE_TURBO); break; case IEEE80211_MODE_11NA: rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20); break; case IEEE80211_MODE_11NG: rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20); break; default: DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n", __func__, mode); return 0; } sc->sc_rates[mode] = rt; return (rt != NULL); } static void ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode) { /* NB: on/off times from the Atheros NDIS driver, w/ permission */ static const struct { u_int rate; /* tx/rx 802.11 rate */ u_int16_t timeOn; /* LED on time (ms) */ u_int16_t timeOff; /* LED off time (ms) */ } blinkrates[] = { { 108, 40, 10 }, { 96, 44, 11 }, { 72, 50, 13 }, { 48, 57, 14 }, { 36, 67, 16 }, { 24, 80, 20 }, { 22, 100, 25 }, { 18, 133, 34 }, { 12, 160, 40 }, { 10, 200, 50 }, { 6, 240, 58 }, { 4, 267, 66 }, { 2, 400, 100 }, { 0, 500, 130 }, /* XXX half/quarter rates */ }; const HAL_RATE_TABLE *rt; int i, j; memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap)); rt = sc->sc_rates[mode]; KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode)); for (i = 0; i < rt->rateCount; i++) { uint8_t ieeerate = rt->info[i].dot11Rate & IEEE80211_RATE_VAL; if (rt->info[i].phy != IEEE80211_T_HT) sc->sc_rixmap[ieeerate] = i; else sc->sc_rixmap[ieeerate | IEEE80211_RATE_MCS] = i; } memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap)); for (i = 0; i < nitems(sc->sc_hwmap); i++) { if (i >= rt->rateCount) { sc->sc_hwmap[i].ledon = (500 * hz) / 1000; sc->sc_hwmap[i].ledoff = (130 * hz) / 1000; continue; } sc->sc_hwmap[i].ieeerate = rt->info[i].dot11Rate & IEEE80211_RATE_VAL; if (rt->info[i].phy == IEEE80211_T_HT) sc->sc_hwmap[i].ieeerate |= IEEE80211_RATE_MCS; sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD; if (rt->info[i].shortPreamble || rt->info[i].phy == IEEE80211_T_OFDM) sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE; sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags; for (j = 0; j < nitems(blinkrates)-1; j++) if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate) break; /* NB: this uses the last entry if the rate isn't found */ /* XXX beware of overlow */ sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000; sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000; } sc->sc_currates = rt; sc->sc_curmode = mode; /* * All protection frames are transmitted at 2Mb/s for * 11g, otherwise at 1Mb/s. */ if (mode == IEEE80211_MODE_11G) sc->sc_protrix = ath_tx_findrix(sc, 2*2); else sc->sc_protrix = ath_tx_findrix(sc, 2*1); /* NB: caller is responsible for resetting rate control state */ } static void ath_watchdog(void *arg) { struct ath_softc *sc = arg; struct ieee80211com *ic = &sc->sc_ic; int do_reset = 0; ATH_LOCK_ASSERT(sc); if (sc->sc_wd_timer != 0 && --sc->sc_wd_timer == 0) { uint32_t hangs; ath_power_set_power_state(sc, HAL_PM_AWAKE); if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) && hangs != 0) { device_printf(sc->sc_dev, "%s hang detected (0x%x)\n", hangs & 0xff ? "bb" : "mac", hangs); } else device_printf(sc->sc_dev, "device timeout\n"); do_reset = 1; counter_u64_add(ic->ic_oerrors, 1); sc->sc_stats.ast_watchdog++; ath_power_restore_power_state(sc); } /* * We can't hold the lock across the ath_reset() call. * * And since this routine can't hold a lock and sleep, * do the reset deferred. */ if (do_reset) { taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask); } callout_schedule(&sc->sc_wd_ch, hz); } static void ath_parent(struct ieee80211com *ic) { struct ath_softc *sc = ic->ic_softc; int error = EDOOFUS; ATH_LOCK(sc); if (ic->ic_nrunning > 0) { /* * To avoid rescanning another access point, * do not call ath_init() here. Instead, * only reflect promisc mode settings. */ if (sc->sc_running) { ath_power_set_power_state(sc, HAL_PM_AWAKE); ath_mode_init(sc); ath_power_restore_power_state(sc); } else if (!sc->sc_invalid) { /* * Beware of being called during attach/detach * to reset promiscuous mode. In that case we * will still be marked UP but not RUNNING. * However trying to re-init the interface * is the wrong thing to do as we've already * torn down much of our state. There's * probably a better way to deal with this. */ error = ath_init(sc); } } else { ath_stop(sc); if (!sc->sc_invalid) ath_power_setpower(sc, HAL_PM_FULL_SLEEP, 1); } ATH_UNLOCK(sc); if (error == 0) { #ifdef ATH_TX99_DIAG if (sc->sc_tx99 != NULL) sc->sc_tx99->start(sc->sc_tx99); else #endif ieee80211_start_all(ic); } } /* * Announce various information on device/driver attach. */ static void ath_announce(struct ath_softc *sc) { struct ath_hal *ah = sc->sc_ah; device_printf(sc->sc_dev, "%s mac %d.%d RF%s phy %d.%d\n", ath_hal_mac_name(ah), ah->ah_macVersion, ah->ah_macRev, ath_hal_rf_name(ah), ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf); device_printf(sc->sc_dev, "2GHz radio: 0x%.4x; 5GHz radio: 0x%.4x\n", ah->ah_analog2GhzRev, ah->ah_analog5GhzRev); if (bootverbose) { int i; for (i = 0; i <= WME_AC_VO; i++) { struct ath_txq *txq = sc->sc_ac2q[i]; device_printf(sc->sc_dev, "Use hw queue %u for %s traffic\n", txq->axq_qnum, ieee80211_wme_acnames[i]); } device_printf(sc->sc_dev, "Use hw queue %u for CAB traffic\n", sc->sc_cabq->axq_qnum); device_printf(sc->sc_dev, "Use hw queue %u for beacons\n", sc->sc_bhalq); } if (ath_rxbuf != ATH_RXBUF) device_printf(sc->sc_dev, "using %u rx buffers\n", ath_rxbuf); if (ath_txbuf != ATH_TXBUF) device_printf(sc->sc_dev, "using %u tx buffers\n", ath_txbuf); if (sc->sc_mcastkey && bootverbose) device_printf(sc->sc_dev, "using multicast key search\n"); } static void ath_dfs_tasklet(void *p, int npending) { struct ath_softc *sc = (struct ath_softc *) p; struct ieee80211com *ic = &sc->sc_ic; /* * If previous processing has found a radar event, * signal this to the net80211 layer to begin DFS * processing. */ if (ath_dfs_process_radar_event(sc, sc->sc_curchan)) { /* DFS event found, initiate channel change */ /* * XXX TODO: immediately disable ACK processing * on the current channel. This would be done * by setting AR_DIAG_ACK_DIS (AR5212; may be * different for others) until we are out of * CAC. */ /* * XXX doesn't currently tell us whether the event * XXX was found in the primary or extension * XXX channel! */ IEEE80211_LOCK(ic); ieee80211_dfs_notify_radar(ic, sc->sc_curchan); IEEE80211_UNLOCK(ic); } } /* * Enable/disable power save. This must be called with * no TX driver locks currently held, so it should only * be called from the RX path (which doesn't hold any * TX driver locks.) */ static void ath_node_powersave(struct ieee80211_node *ni, int enable) { #ifdef ATH_SW_PSQ struct ath_node *an = ATH_NODE(ni); struct ieee80211com *ic = ni->ni_ic; struct ath_softc *sc = ic->ic_softc; struct ath_vap *avp = ATH_VAP(ni->ni_vap); /* XXX and no TXQ locks should be held here */ DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: %6D: enable=%d\n", __func__, ni->ni_macaddr, ":", !! enable); /* Suspend or resume software queue handling */ if (enable) ath_tx_node_sleep(sc, an); else ath_tx_node_wakeup(sc, an); /* Update net80211 state */ avp->av_node_ps(ni, enable); #else struct ath_vap *avp = ATH_VAP(ni->ni_vap); /* Update net80211 state */ avp->av_node_ps(ni, enable); #endif/* ATH_SW_PSQ */ } /* * Notification from net80211 that the powersave queue state has * changed. * * Since the software queue also may have some frames: * * + if the node software queue has frames and the TID state * is 0, we set the TIM; * + if the node and the stack are both empty, we clear the TIM bit. * + If the stack tries to set the bit, always set it. * + If the stack tries to clear the bit, only clear it if the * software queue in question is also cleared. * * TODO: this is called during node teardown; so let's ensure this * is all correctly handled and that the TIM bit is cleared. * It may be that the node flush is called _AFTER_ the net80211 * stack clears the TIM. * * Here is the racy part. Since it's possible >1 concurrent, * overlapping TXes will appear complete with a TX completion in * another thread, it's possible that the concurrent TIM calls will * clash. We can't hold the node lock here because setting the * TIM grabs the net80211 comlock and this may cause a LOR. * The solution is either to totally serialise _everything_ at * this point (ie, all TX, completion and any reset/flush go into * one taskqueue) or a new "ath TIM lock" needs to be created that * just wraps the driver state change and this call to avp->av_set_tim(). * * The same race exists in the net80211 power save queue handling * as well. Since multiple transmitting threads may queue frames * into the driver, as well as ps-poll and the driver transmitting * frames (and thus clearing the psq), it's quite possible that * a packet entering the PSQ and a ps-poll being handled will * race, causing the TIM to be cleared and not re-set. */ static int ath_node_set_tim(struct ieee80211_node *ni, int enable) { #ifdef ATH_SW_PSQ struct ieee80211com *ic = ni->ni_ic; struct ath_softc *sc = ic->ic_softc; struct ath_node *an = ATH_NODE(ni); struct ath_vap *avp = ATH_VAP(ni->ni_vap); int changed = 0; ATH_TX_LOCK(sc); an->an_stack_psq = enable; /* * This will get called for all operating modes, * even if avp->av_set_tim is unset. * It's currently set for hostap/ibss modes; but * the same infrastructure is used for both STA * and AP/IBSS node power save. */ if (avp->av_set_tim == NULL) { ATH_TX_UNLOCK(sc); return (0); } /* * If setting the bit, always set it here. * If clearing the bit, only clear it if the * software queue is also empty. * * If the node has left power save, just clear the TIM * bit regardless of the state of the power save queue. * * XXX TODO: although atomics are used, it's quite possible * that a race will occur between this and setting/clearing * in another thread. TX completion will occur always in * one thread, however setting/clearing the TIM bit can come * from a variety of different process contexts! */ if (enable && an->an_tim_set == 1) { DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: %6D: enable=%d, tim_set=1, ignoring\n", __func__, ni->ni_macaddr, ":", enable); ATH_TX_UNLOCK(sc); } else if (enable) { DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: %6D: enable=%d, enabling TIM\n", __func__, ni->ni_macaddr, ":", enable); an->an_tim_set = 1; ATH_TX_UNLOCK(sc); changed = avp->av_set_tim(ni, enable); } else if (an->an_swq_depth == 0) { /* disable */ DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: %6D: enable=%d, an_swq_depth == 0, disabling\n", __func__, ni->ni_macaddr, ":", enable); an->an_tim_set = 0; ATH_TX_UNLOCK(sc); changed = avp->av_set_tim(ni, enable); } else if (! an->an_is_powersave) { /* * disable regardless; the node isn't in powersave now */ DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: %6D: enable=%d, an_pwrsave=0, disabling\n", __func__, ni->ni_macaddr, ":", enable); an->an_tim_set = 0; ATH_TX_UNLOCK(sc); changed = avp->av_set_tim(ni, enable); } else { /* * psq disable, node is currently in powersave, node * software queue isn't empty, so don't clear the TIM bit * for now. */ ATH_TX_UNLOCK(sc); DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: %6D: enable=%d, an_swq_depth > 0, ignoring\n", __func__, ni->ni_macaddr, ":", enable); changed = 0; } return (changed); #else struct ath_vap *avp = ATH_VAP(ni->ni_vap); /* * Some operating modes don't set av_set_tim(), so don't * update it here. */ if (avp->av_set_tim == NULL) return (0); return (avp->av_set_tim(ni, enable)); #endif /* ATH_SW_PSQ */ } /* * Set or update the TIM from the software queue. * * Check the software queue depth before attempting to do lock * anything; that avoids trying to obtain the lock. Then, * re-check afterwards to ensure nothing has changed in the * meantime. * * set: This is designed to be called from the TX path, after * a frame has been queued; to see if the swq > 0. * * clear: This is designed to be called from the buffer completion point * (right now it's ath_tx_default_comp()) where the state of * a software queue has changed. * * It makes sense to place it at buffer free / completion rather * than after each software queue operation, as there's no real * point in churning the TIM bit as the last frames in the software * queue are transmitted. If they fail and we retry them, we'd * just be setting the TIM bit again anyway. */ void ath_tx_update_tim(struct ath_softc *sc, struct ieee80211_node *ni, int enable) { #ifdef ATH_SW_PSQ struct ath_node *an; struct ath_vap *avp; /* Don't do this for broadcast/etc frames */ if (ni == NULL) return; an = ATH_NODE(ni); avp = ATH_VAP(ni->ni_vap); /* * And for operating modes without the TIM handler set, let's * just skip those. */ if (avp->av_set_tim == NULL) return; ATH_TX_LOCK_ASSERT(sc); if (enable) { if (an->an_is_powersave && an->an_tim_set == 0 && an->an_swq_depth != 0) { DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: %6D: swq_depth>0, tim_set=0, set!\n", __func__, ni->ni_macaddr, ":"); an->an_tim_set = 1; (void) avp->av_set_tim(ni, 1); } } else { /* * Don't bother grabbing the lock unless the queue is empty. */ if (an->an_swq_depth != 0) return; if (an->an_is_powersave && an->an_stack_psq == 0 && an->an_tim_set == 1 && an->an_swq_depth == 0) { DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: %6D: swq_depth=0, tim_set=1, psq_set=0," " clear!\n", __func__, ni->ni_macaddr, ":"); an->an_tim_set = 0; (void) avp->av_set_tim(ni, 0); } } #else return; #endif /* ATH_SW_PSQ */ } /* * Received a ps-poll frame from net80211. * * Here we get a chance to serve out a software-queued frame ourselves * before we punt it to net80211 to transmit us one itself - either * because there's traffic in the net80211 psq, or a NULL frame to * indicate there's nothing else. */ static void ath_node_recv_pspoll(struct ieee80211_node *ni, struct mbuf *m) { #ifdef ATH_SW_PSQ struct ath_node *an; struct ath_vap *avp; struct ieee80211com *ic = ni->ni_ic; struct ath_softc *sc = ic->ic_softc; int tid; /* Just paranoia */ if (ni == NULL) return; /* * Unassociated (temporary node) station. */ if (ni->ni_associd == 0) return; /* * We do have an active node, so let's begin looking into it. */ an = ATH_NODE(ni); avp = ATH_VAP(ni->ni_vap); /* * For now, we just call the original ps-poll method. * Once we're ready to flip this on: * * + Set leak to 1, as no matter what we're going to have * to send a frame; * + Check the software queue and if there's something in it, * schedule the highest TID thas has traffic from this node. * Then make sure we schedule the software scheduler to * run so it picks up said frame. * * That way whatever happens, we'll at least send _a_ frame * to the given node. * * Again, yes, it's crappy QoS if the node has multiple * TIDs worth of traffic - but let's get it working first * before we optimise it. * * Also yes, there's definitely latency here - we're not * direct dispatching to the hardware in this path (and * we're likely being called from the packet receive path, * so going back into TX may be a little hairy!) but again * I'd like to get this working first before optimising * turn-around time. */ ATH_TX_LOCK(sc); /* * Legacy - we're called and the node isn't asleep. * Immediately punt. */ if (! an->an_is_powersave) { DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: %6D: not in powersave?\n", __func__, ni->ni_macaddr, ":"); ATH_TX_UNLOCK(sc); avp->av_recv_pspoll(ni, m); return; } /* * We're in powersave. * * Leak a frame. */ an->an_leak_count = 1; /* * Now, if there's no frames in the node, just punt to * recv_pspoll. * * Don't bother checking if the TIM bit is set, we really * only care if there are any frames here! */ if (an->an_swq_depth == 0) { ATH_TX_UNLOCK(sc); DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: %6D: SWQ empty; punting to net80211\n", __func__, ni->ni_macaddr, ":"); avp->av_recv_pspoll(ni, m); return; } /* * Ok, let's schedule the highest TID that has traffic * and then schedule something. */ for (tid = IEEE80211_TID_SIZE - 1; tid >= 0; tid--) { struct ath_tid *atid = &an->an_tid[tid]; /* * No frames? Skip. */ if (atid->axq_depth == 0) continue; ath_tx_tid_sched(sc, atid); /* * XXX we could do a direct call to the TXQ * scheduler code here to optimise latency * at the expense of a REALLY deep callstack. */ ATH_TX_UNLOCK(sc); taskqueue_enqueue(sc->sc_tq, &sc->sc_txqtask); DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: %6D: leaking frame to TID %d\n", __func__, ni->ni_macaddr, ":", tid); return; } ATH_TX_UNLOCK(sc); /* * XXX nothing in the TIDs at this point? Eek. */ DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: %6D: TIDs empty, but ath_node showed traffic?!\n", __func__, ni->ni_macaddr, ":"); avp->av_recv_pspoll(ni, m); #else avp->av_recv_pspoll(ni, m); #endif /* ATH_SW_PSQ */ } MODULE_VERSION(ath_main, 1); MODULE_DEPEND(ath_main, wlan, 1, 1, 1); /* 802.11 media layer */ MODULE_DEPEND(ath_main, ath_rate, 1, 1, 1); MODULE_DEPEND(ath_main, ath_dfs, 1, 1, 1); MODULE_DEPEND(ath_main, ath_hal, 1, 1, 1); #if defined(IEEE80211_ALQ) || defined(AH_DEBUG_ALQ) || defined(ATH_DEBUG_ALQ) MODULE_DEPEND(ath_main, alq, 1, 1, 1); #endif diff --git a/sys/dev/ath/if_ath_rx.c b/sys/dev/ath/if_ath_rx.c index 13a42c5038d6..93a1d7455191 100644 --- a/sys/dev/ath/if_ath_rx.c +++ b/sys/dev/ath/if_ath_rx.c @@ -1,1534 +1,1531 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); /* * Driver for the Atheros Wireless LAN controller. * * This software is derived from work of Atsushi Onoe; his contribution * is greatly appreciated. */ #include "opt_inet.h" #include "opt_ath.h" /* * This is needed for register operations which are performed * by the driver - eg, calls to ath_hal_gettsf32(). * * It's also required for any AH_DEBUG checks in here, eg the * module dependencies. */ #include "opt_ah.h" #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for mp_ncpus */ #include #include #include #include #include #include #include #include #include #include #include #ifdef IEEE80211_SUPPORT_SUPERG #include #endif #ifdef IEEE80211_SUPPORT_TDMA #include #endif #include #ifdef INET #include #include #endif #include #include /* XXX for softled */ #include #include #include #include #include #include #include #include #include #include #include #include #ifdef ATH_TX99_DIAG #include #endif #ifdef ATH_DEBUG_ALQ #include #endif #include /* * Calculate the receive filter according to the * operating mode and state: * * o always accept unicast, broadcast, and multicast traffic * o accept PHY error frames when hardware doesn't have MIB support * to count and we need them for ANI (sta mode only until recently) * and we are not scanning (ANI is disabled) * NB: older hal's add rx filter bits out of sight and we need to * blindly preserve them * o probe request frames are accepted only when operating in * hostap, adhoc, mesh, or monitor modes * o enable promiscuous mode * - when in monitor mode * - if interface marked PROMISC (assumes bridge setting is filtered) * o accept beacons: * - when operating in station mode for collecting rssi data when * the station is otherwise quiet, or * - when operating in adhoc mode so the 802.11 layer creates * node table entries for peers, * - when scanning * - when doing s/w beacon miss (e.g. for ap+sta) * - when operating in ap mode in 11g to detect overlapping bss that * require protection * - when operating in mesh mode to detect neighbors * o accept control frames: * - when in monitor mode * XXX HT protection for 11n */ u_int32_t ath_calcrxfilter(struct ath_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; u_int32_t rfilt; rfilt = HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST; if (!sc->sc_needmib && !sc->sc_scanning) rfilt |= HAL_RX_FILTER_PHYERR; if (ic->ic_opmode != IEEE80211_M_STA) rfilt |= HAL_RX_FILTER_PROBEREQ; /* XXX ic->ic_monvaps != 0? */ if (ic->ic_opmode == IEEE80211_M_MONITOR || ic->ic_promisc > 0) rfilt |= HAL_RX_FILTER_PROM; /* * Only listen to all beacons if we're scanning. * * Otherwise we only really need to hear beacons from * our own BSSID. * * IBSS? software beacon miss? Just receive all beacons. * We need to hear beacons/probe requests from everyone so * we can merge ibss. */ if (ic->ic_opmode == IEEE80211_M_IBSS || sc->sc_swbmiss) { rfilt |= HAL_RX_FILTER_BEACON; } else if (ic->ic_opmode == IEEE80211_M_STA) { if (sc->sc_do_mybeacon && ! sc->sc_scanning) { rfilt |= HAL_RX_FILTER_MYBEACON; } else { /* scanning, non-mybeacon chips */ rfilt |= HAL_RX_FILTER_BEACON; } } /* * NB: We don't recalculate the rx filter when * ic_protmode changes; otherwise we could do * this only when ic_protmode != NONE. */ if (ic->ic_opmode == IEEE80211_M_HOSTAP && IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) rfilt |= HAL_RX_FILTER_BEACON; /* * Enable hardware PS-POLL RX only for hostap mode; * STA mode sends PS-POLL frames but never * receives them. */ if (ath_hal_getcapability(sc->sc_ah, HAL_CAP_PSPOLL, 0, NULL) == HAL_OK && ic->ic_opmode == IEEE80211_M_HOSTAP) rfilt |= HAL_RX_FILTER_PSPOLL; if (sc->sc_nmeshvaps) { rfilt |= HAL_RX_FILTER_BEACON; if (sc->sc_hasbmatch) rfilt |= HAL_RX_FILTER_BSSID; else rfilt |= HAL_RX_FILTER_PROM; } if (ic->ic_opmode == IEEE80211_M_MONITOR) rfilt |= HAL_RX_FILTER_CONTROL; /* * Enable RX of compressed BAR frames only when doing * 802.11n. Required for A-MPDU. */ if (IEEE80211_IS_CHAN_HT(ic->ic_curchan)) rfilt |= HAL_RX_FILTER_COMPBAR; /* * Enable radar PHY errors if requested by the * DFS module. */ if (sc->sc_dodfs) rfilt |= HAL_RX_FILTER_PHYRADAR; /* * Enable spectral PHY errors if requested by the * spectral module. */ if (sc->sc_dospectral) rfilt |= HAL_RX_FILTER_PHYRADAR; DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x, %s\n", __func__, rfilt, ieee80211_opmode_name[ic->ic_opmode]); return rfilt; } static int ath_legacy_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) { struct ath_hal *ah = sc->sc_ah; int error; struct mbuf *m; struct ath_desc *ds; /* XXX TODO: ATH_RX_LOCK_ASSERT(sc); */ m = bf->bf_m; if (m == NULL) { /* * NB: by assigning a page to the rx dma buffer we * implicitly satisfy the Atheros requirement that * this buffer be cache-line-aligned and sized to be * multiple of the cache line size. Not doing this * causes weird stuff to happen (for the 5210 at least). */ m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { DPRINTF(sc, ATH_DEBUG_ANY, "%s: no mbuf/cluster\n", __func__); sc->sc_stats.ast_rx_nombuf++; return ENOMEM; } m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); if (error != 0) { DPRINTF(sc, ATH_DEBUG_ANY, "%s: bus_dmamap_load_mbuf_sg failed; error %d\n", __func__, error); sc->sc_stats.ast_rx_busdma++; m_freem(m); return error; } KASSERT(bf->bf_nseg == 1, ("multi-segment packet; nseg %u", bf->bf_nseg)); bf->bf_m = m; } bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREREAD); /* * Setup descriptors. For receive we always terminate * the descriptor list with a self-linked entry so we'll * not get overrun under high load (as can happen with a * 5212 when ANI processing enables PHY error frames). * * To insure the last descriptor is self-linked we create * each descriptor as self-linked and add it to the end. As * each additional descriptor is added the previous self-linked * entry is ``fixed'' naturally. This should be safe even * if DMA is happening. When processing RX interrupts we * never remove/process the last, self-linked, entry on the * descriptor list. This insures the hardware always has * someplace to write a new frame. */ /* * 11N: we can no longer afford to self link the last descriptor. * MAC acknowledges BA status as long as it copies frames to host * buffer (or rx fifo). This can incorrectly acknowledge packets * to a sender if last desc is self-linked. */ ds = bf->bf_desc; if (sc->sc_rxslink) ds->ds_link = bf->bf_daddr; /* link to self */ else ds->ds_link = 0; /* terminate the list */ ds->ds_data = bf->bf_segs[0].ds_addr; ath_hal_setuprxdesc(ah, ds , m->m_len /* buffer size */ , 0 ); if (sc->sc_rxlink != NULL) *sc->sc_rxlink = bf->bf_daddr; sc->sc_rxlink = &ds->ds_link; return 0; } /* * Intercept management frames to collect beacon rssi data * and to do ibss merges. */ void ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, int subtype, const struct ieee80211_rx_stats *rxs, int rssi, int nf) { struct ieee80211vap *vap = ni->ni_vap; struct ath_softc *sc = vap->iv_ic->ic_softc; uint64_t tsf_beacon_old, tsf_beacon; uint64_t nexttbtt; int64_t tsf_delta; int32_t tsf_delta_bmiss; int32_t tsf_remainder; uint64_t tsf_beacon_target; int tsf_intval; tsf_beacon_old = ((uint64_t) le32dec(ni->ni_tstamp.data + 4)) << 32; tsf_beacon_old |= le32dec(ni->ni_tstamp.data); #define TU_TO_TSF(_tu) (((u_int64_t)(_tu)) << 10) tsf_intval = 1; if (ni->ni_intval > 0) { tsf_intval = TU_TO_TSF(ni->ni_intval); } #undef TU_TO_TSF /* * Call up first so subsequent work can use information * potentially stored in the node (e.g. for ibss merge). */ ATH_VAP(vap)->av_recv_mgmt(ni, m, subtype, rxs, rssi, nf); switch (subtype) { case IEEE80211_FC0_SUBTYPE_BEACON: /* * Always update the per-node beacon RSSI if we're hearing * beacons from that node. */ ATH_RSSI_LPF(ATH_NODE(ni)->an_node_stats.ns_avgbrssi, rssi); /* * Only do the following processing if it's for * the current BSS. * * In scan and IBSS mode we receive all beacons, * which means we need to filter out stuff * that isn't for us or we'll end up constantly * trying to sync / merge to BSSes that aren't * actually us. */ if (IEEE80211_ADDR_EQ(ni->ni_bssid, vap->iv_bss->ni_bssid)) { /* update rssi statistics for use by the hal */ /* XXX unlocked check against vap->iv_bss? */ ATH_RSSI_LPF(sc->sc_halstats.ns_avgbrssi, rssi); tsf_beacon = ((uint64_t) le32dec(ni->ni_tstamp.data + 4)) << 32; tsf_beacon |= le32dec(ni->ni_tstamp.data); nexttbtt = ath_hal_getnexttbtt(sc->sc_ah); /* * Let's calculate the delta and remainder, so we can see * if the beacon timer from the AP is varying by more than * a few TU. (Which would be a huge, huge problem.) */ tsf_delta = (long long) tsf_beacon - (long long) tsf_beacon_old; tsf_delta_bmiss = tsf_delta / tsf_intval; /* * If our delta is greater than half the beacon interval, * let's round the bmiss value up to the next beacon * interval. Ie, we're running really, really early * on the next beacon. */ if (tsf_delta % tsf_intval > (tsf_intval / 2)) tsf_delta_bmiss ++; tsf_beacon_target = tsf_beacon_old + (((unsigned long long) tsf_delta_bmiss) * (long long) tsf_intval); /* * The remainder using '%' is between 0 .. intval-1. * If we're actually running too fast, then the remainder * will be some large number just under intval-1. * So we need to look at whether we're running * before or after the target beacon interval * and if we are, modify how we do the remainder * calculation. */ if (tsf_beacon < tsf_beacon_target) { tsf_remainder = -(tsf_intval - ((tsf_beacon - tsf_beacon_old) % tsf_intval)); } else { tsf_remainder = (tsf_beacon - tsf_beacon_old) % tsf_intval; } DPRINTF(sc, ATH_DEBUG_BEACON, "%s: old_tsf=%llu (%u), new_tsf=%llu (%u), target_tsf=%llu (%u), delta=%lld, bmiss=%d, remainder=%d\n", __func__, (unsigned long long) tsf_beacon_old, (unsigned int) (tsf_beacon_old >> 10), (unsigned long long) tsf_beacon, (unsigned int ) (tsf_beacon >> 10), (unsigned long long) tsf_beacon_target, (unsigned int) (tsf_beacon_target >> 10), (long long) tsf_delta, tsf_delta_bmiss, tsf_remainder); DPRINTF(sc, ATH_DEBUG_BEACON, "%s: tsf=%llu (%u), nexttbtt=%llu (%u), delta=%d\n", __func__, (unsigned long long) tsf_beacon, (unsigned int) (tsf_beacon >> 10), (unsigned long long) nexttbtt, (unsigned int) (nexttbtt >> 10), (int32_t) tsf_beacon - (int32_t) nexttbtt + tsf_intval); /* We only do syncbeacon on STA VAPs; not on IBSS */ if (vap->iv_opmode == IEEE80211_M_STA && sc->sc_syncbeacon && ni == vap->iv_bss && (vap->iv_state == IEEE80211_S_RUN || vap->iv_state == IEEE80211_S_SLEEP)) { DPRINTF(sc, ATH_DEBUG_BEACON, "%s: syncbeacon=1; syncing\n", __func__); /* * Resync beacon timers using the tsf of the beacon * frame we just received. */ ath_beacon_config(sc, vap); sc->sc_syncbeacon = 0; } } /* fall thru... */ case IEEE80211_FC0_SUBTYPE_PROBE_RESP: if (vap->iv_opmode == IEEE80211_M_IBSS && vap->iv_state == IEEE80211_S_RUN && ieee80211_ibss_merge_check(ni)) { uint32_t rstamp = sc->sc_lastrs->rs_tstamp; uint64_t tsf = ath_extend_tsf(sc, rstamp, ath_hal_gettsf64(sc->sc_ah)); /* * Handle ibss merge as needed; check the tsf on the * frame before attempting the merge. The 802.11 spec * says the station should change it's bssid to match * the oldest station with the same ssid, where oldest * is determined by the tsf. Note that hardware * reconfiguration happens through callback to * ath_newstate as the state machine will go from * RUN -> RUN when this happens. */ if (le64toh(ni->ni_tstamp.tsf) >= tsf) { DPRINTF(sc, ATH_DEBUG_STATE, "ibss merge, rstamp %u tsf %ju " "tstamp %ju\n", rstamp, (uintmax_t)tsf, (uintmax_t)ni->ni_tstamp.tsf); (void) ieee80211_ibss_merge(ni); } } break; } } #ifdef ATH_ENABLE_RADIOTAP_VENDOR_EXT static void ath_rx_tap_vendor(struct ath_softc *sc, struct mbuf *m, const struct ath_rx_status *rs, u_int64_t tsf, int16_t nf) { /* Fill in the extension bitmap */ sc->sc_rx_th.wr_ext_bitmap = htole32(1 << ATH_RADIOTAP_VENDOR_HEADER); /* Fill in the vendor header */ sc->sc_rx_th.wr_vh.vh_oui[0] = 0x7f; sc->sc_rx_th.wr_vh.vh_oui[1] = 0x03; sc->sc_rx_th.wr_vh.vh_oui[2] = 0x00; /* XXX what should this be? */ sc->sc_rx_th.wr_vh.vh_sub_ns = 0; sc->sc_rx_th.wr_vh.vh_skip_len = htole16(sizeof(struct ath_radiotap_vendor_hdr)); /* General version info */ sc->sc_rx_th.wr_v.vh_version = 1; sc->sc_rx_th.wr_v.vh_rx_chainmask = sc->sc_rxchainmask; /* rssi */ sc->sc_rx_th.wr_v.rssi_ctl[0] = rs->rs_rssi_ctl[0]; sc->sc_rx_th.wr_v.rssi_ctl[1] = rs->rs_rssi_ctl[1]; sc->sc_rx_th.wr_v.rssi_ctl[2] = rs->rs_rssi_ctl[2]; sc->sc_rx_th.wr_v.rssi_ext[0] = rs->rs_rssi_ext[0]; sc->sc_rx_th.wr_v.rssi_ext[1] = rs->rs_rssi_ext[1]; sc->sc_rx_th.wr_v.rssi_ext[2] = rs->rs_rssi_ext[2]; /* evm */ sc->sc_rx_th.wr_v.evm[0] = rs->rs_evm0; sc->sc_rx_th.wr_v.evm[1] = rs->rs_evm1; sc->sc_rx_th.wr_v.evm[2] = rs->rs_evm2; /* These are only populated from the AR9300 or later */ sc->sc_rx_th.wr_v.evm[3] = rs->rs_evm3; sc->sc_rx_th.wr_v.evm[4] = rs->rs_evm4; /* direction */ sc->sc_rx_th.wr_v.vh_flags = ATH_VENDOR_PKT_RX; /* RX rate */ sc->sc_rx_th.wr_v.vh_rx_hwrate = rs->rs_rate; /* RX flags */ sc->sc_rx_th.wr_v.vh_rs_flags = rs->rs_flags; if (rs->rs_isaggr) sc->sc_rx_th.wr_v.vh_flags |= ATH_VENDOR_PKT_ISAGGR; if (rs->rs_moreaggr) sc->sc_rx_th.wr_v.vh_flags |= ATH_VENDOR_PKT_MOREAGGR; /* phyerr info */ if (rs->rs_status & HAL_RXERR_PHY) { sc->sc_rx_th.wr_v.vh_phyerr_code = rs->rs_phyerr; sc->sc_rx_th.wr_v.vh_flags |= ATH_VENDOR_PKT_RXPHYERR; } else { sc->sc_rx_th.wr_v.vh_phyerr_code = 0xff; } sc->sc_rx_th.wr_v.vh_rs_status = rs->rs_status; sc->sc_rx_th.wr_v.vh_rssi = rs->rs_rssi; } #endif /* ATH_ENABLE_RADIOTAP_VENDOR_EXT */ static void ath_rx_tap(struct ath_softc *sc, struct mbuf *m, const struct ath_rx_status *rs, u_int64_t tsf, int16_t nf) { #define CHAN_HT20 htole32(IEEE80211_CHAN_HT20) #define CHAN_HT40U htole32(IEEE80211_CHAN_HT40U) #define CHAN_HT40D htole32(IEEE80211_CHAN_HT40D) #define CHAN_HT (CHAN_HT20|CHAN_HT40U|CHAN_HT40D) const HAL_RATE_TABLE *rt; uint8_t rix; rt = sc->sc_currates; KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); rix = rt->rateCodeToIndex[rs->rs_rate]; sc->sc_rx_th.wr_rate = sc->sc_hwmap[rix].ieeerate; sc->sc_rx_th.wr_flags = sc->sc_hwmap[rix].rxflags; /* 802.11 specific flags */ sc->sc_rx_th.wr_chan_flags &= ~CHAN_HT; if (rs->rs_status & HAL_RXERR_PHY) { /* * PHY error - make sure the channel flags * reflect the actual channel configuration, * not the received frame. */ if (IEEE80211_IS_CHAN_HT40U(sc->sc_curchan)) sc->sc_rx_th.wr_chan_flags |= CHAN_HT40U; else if (IEEE80211_IS_CHAN_HT40D(sc->sc_curchan)) sc->sc_rx_th.wr_chan_flags |= CHAN_HT40D; else if (IEEE80211_IS_CHAN_HT20(sc->sc_curchan)) sc->sc_rx_th.wr_chan_flags |= CHAN_HT20; } else if (sc->sc_rx_th.wr_rate & IEEE80211_RATE_MCS) { /* HT rate */ struct ieee80211com *ic = &sc->sc_ic; if ((rs->rs_flags & HAL_RX_2040) == 0) sc->sc_rx_th.wr_chan_flags |= CHAN_HT20; else if (IEEE80211_IS_CHAN_HT40U(ic->ic_curchan)) sc->sc_rx_th.wr_chan_flags |= CHAN_HT40U; else sc->sc_rx_th.wr_chan_flags |= CHAN_HT40D; if (rs->rs_flags & HAL_RX_GI) sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_SHORTGI; } sc->sc_rx_th.wr_tsf = htole64(ath_extend_tsf(sc, rs->rs_tstamp, tsf)); if (rs->rs_status & HAL_RXERR_CRC) sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_BADFCS; /* XXX propagate other error flags from descriptor */ sc->sc_rx_th.wr_antnoise = nf; sc->sc_rx_th.wr_antsignal = nf + rs->rs_rssi; sc->sc_rx_th.wr_antenna = rs->rs_antenna; #undef CHAN_HT #undef CHAN_HT20 #undef CHAN_HT40U #undef CHAN_HT40D } static void ath_handle_micerror(struct ieee80211com *ic, struct ieee80211_frame *wh, int keyix) { struct ieee80211_node *ni; /* XXX recheck MIC to deal w/ chips that lie */ /* XXX discard MIC errors on !data frames */ ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh); if (ni != NULL) { ieee80211_notify_michael_failure(ni->ni_vap, wh, keyix); ieee80211_free_node(ni); } } /* * Process a single packet. * * The mbuf must already be synced, unmapped and removed from bf->bf_m * by this stage. * * The mbuf must be consumed by this routine - either passed up the * net80211 stack, put on the holding queue, or freed. */ int ath_rx_pkt(struct ath_softc *sc, struct ath_rx_status *rs, HAL_STATUS status, uint64_t tsf, int nf, HAL_RX_QUEUE qtype, struct ath_buf *bf, struct mbuf *m) { - struct epoch_tracker et; uint64_t rstamp; /* XXX TODO: make this an mbuf tag? */ struct ieee80211_rx_stats rxs; int len, type, i; struct ieee80211com *ic = &sc->sc_ic; struct ieee80211_node *ni; int is_good = 0; struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; /* * Calculate the correct 64 bit TSF given * the TSF64 register value and rs_tstamp. */ rstamp = ath_extend_tsf(sc, rs->rs_tstamp, tsf); /* 802.11 return codes - These aren't specifically errors */ if (rs->rs_flags & HAL_RX_GI) sc->sc_stats.ast_rx_halfgi++; if (rs->rs_flags & HAL_RX_2040) sc->sc_stats.ast_rx_2040++; if (rs->rs_flags & HAL_RX_DELIM_CRC_PRE) sc->sc_stats.ast_rx_pre_crc_err++; if (rs->rs_flags & HAL_RX_DELIM_CRC_POST) sc->sc_stats.ast_rx_post_crc_err++; if (rs->rs_flags & HAL_RX_DECRYPT_BUSY) sc->sc_stats.ast_rx_decrypt_busy_err++; if (rs->rs_flags & HAL_RX_HI_RX_CHAIN) sc->sc_stats.ast_rx_hi_rx_chain++; if (rs->rs_flags & HAL_RX_STBC) sc->sc_stats.ast_rx_stbc++; if (rs->rs_status != 0) { if (rs->rs_status & HAL_RXERR_CRC) sc->sc_stats.ast_rx_crcerr++; if (rs->rs_status & HAL_RXERR_FIFO) sc->sc_stats.ast_rx_fifoerr++; if (rs->rs_status & HAL_RXERR_PHY) { sc->sc_stats.ast_rx_phyerr++; /* Process DFS radar events */ if ((rs->rs_phyerr == HAL_PHYERR_RADAR) || (rs->rs_phyerr == HAL_PHYERR_FALSE_RADAR_EXT)) { /* Now pass it to the radar processing code */ ath_dfs_process_phy_err(sc, m, rstamp, rs); } /* Be suitably paranoid about receiving phy errors out of the stats array bounds */ if (rs->rs_phyerr < 64) sc->sc_stats.ast_rx_phy[rs->rs_phyerr]++; goto rx_error; /* NB: don't count in ierrors */ } if (rs->rs_status & HAL_RXERR_DECRYPT) { /* * Decrypt error. If the error occurred * because there was no hardware key, then * let the frame through so the upper layers * can process it. This is necessary for 5210 * parts which have no way to setup a ``clear'' * key cache entry. * * XXX do key cache faulting */ if (rs->rs_keyix == HAL_RXKEYIX_INVALID) goto rx_accept; sc->sc_stats.ast_rx_badcrypt++; } /* * Similar as above - if the failure was a keymiss * just punt it up to the upper layers for now. */ if (rs->rs_status & HAL_RXERR_KEYMISS) { sc->sc_stats.ast_rx_keymiss++; goto rx_accept; } if (rs->rs_status & HAL_RXERR_MIC) { sc->sc_stats.ast_rx_badmic++; /* * Do minimal work required to hand off * the 802.11 header for notification. */ /* XXX frag's and qos frames */ len = rs->rs_datalen; if (len >= sizeof (struct ieee80211_frame)) { ath_handle_micerror(ic, mtod(m, struct ieee80211_frame *), sc->sc_splitmic ? rs->rs_keyix-32 : rs->rs_keyix); } } counter_u64_add(ic->ic_ierrors, 1); rx_error: /* * Cleanup any pending partial frame. */ if (re->m_rxpending != NULL) { m_freem(re->m_rxpending); re->m_rxpending = NULL; } /* * When a tap is present pass error frames * that have been requested. By default we * pass decrypt+mic errors but others may be * interesting (e.g. crc). */ if (ieee80211_radiotap_active(ic) && (rs->rs_status & sc->sc_monpass)) { /* NB: bpf needs the mbuf length setup */ len = rs->rs_datalen; m->m_pkthdr.len = m->m_len = len; ath_rx_tap(sc, m, rs, rstamp, nf); #ifdef ATH_ENABLE_RADIOTAP_VENDOR_EXT ath_rx_tap_vendor(sc, m, rs, rstamp, nf); #endif /* ATH_ENABLE_RADIOTAP_VENDOR_EXT */ ieee80211_radiotap_rx_all(ic, m); } /* XXX pass MIC errors up for s/w reclaculation */ m_freem(m); m = NULL; goto rx_next; } rx_accept: len = rs->rs_datalen; m->m_len = len; if (rs->rs_more) { /* * Frame spans multiple descriptors; save * it for the next completed descriptor, it * will be used to construct a jumbogram. */ if (re->m_rxpending != NULL) { /* NB: max frame size is currently 2 clusters */ sc->sc_stats.ast_rx_toobig++; m_freem(re->m_rxpending); } m->m_pkthdr.len = len; re->m_rxpending = m; m = NULL; goto rx_next; } else if (re->m_rxpending != NULL) { /* * This is the second part of a jumbogram, * chain it to the first mbuf, adjust the * frame length, and clear the rxpending state. */ re->m_rxpending->m_next = m; re->m_rxpending->m_pkthdr.len += len; m = re->m_rxpending; re->m_rxpending = NULL; } else { /* * Normal single-descriptor receive; setup packet length. */ m->m_pkthdr.len = len; } /* * Validate rs->rs_antenna. * * Some users w/ AR9285 NICs have reported crashes * here because rs_antenna field is bogusly large. * Let's enforce the maximum antenna limit of 8 * (and it shouldn't be hard coded, but that's a * separate problem) and if there's an issue, print * out an error and adjust rs_antenna to something * sensible. * * This code should be removed once the actual * root cause of the issue has been identified. * For example, it may be that the rs_antenna * field is only valid for the last frame of * an aggregate and it just happens that it is * "mostly" right. (This is a general statement - * the majority of the statistics are only valid * for the last frame in an aggregate. */ if (rs->rs_antenna > 7) { device_printf(sc->sc_dev, "%s: rs_antenna > 7 (%d)\n", __func__, rs->rs_antenna); #ifdef ATH_DEBUG ath_printrxbuf(sc, bf, 0, status == HAL_OK); #endif /* ATH_DEBUG */ rs->rs_antenna = 0; /* XXX better than nothing */ } /* * If this is an AR9285/AR9485, then the receive and LNA * configuration is stored in RSSI[2] / EXTRSSI[2]. * We can extract this out to build a much better * receive antenna profile. * * Yes, this just blurts over the above RX antenna field * for now. It's fine, the AR9285 doesn't really use * that. * * Later on we should store away the fine grained LNA * information and keep separate counters just for * that. It'll help when debugging the AR9285/AR9485 * combined diversity code. */ if (sc->sc_rx_lnamixer) { rs->rs_antenna = 0; /* Bits 0:1 - the LNA configuration used */ rs->rs_antenna |= ((rs->rs_rssi_ctl[2] & HAL_RX_LNA_CFG_USED) >> HAL_RX_LNA_CFG_USED_S); /* Bit 2 - the external RX antenna switch */ if (rs->rs_rssi_ctl[2] & HAL_RX_LNA_EXTCFG) rs->rs_antenna |= 0x4; } sc->sc_stats.ast_ant_rx[rs->rs_antenna]++; /* * Populate the rx status block. When there are bpf * listeners we do the additional work to provide * complete status. Otherwise we fill in only the * material required by ieee80211_input. Note that * noise setting is filled in above. */ if (ieee80211_radiotap_active(ic)) { ath_rx_tap(sc, m, rs, rstamp, nf); #ifdef ATH_ENABLE_RADIOTAP_VENDOR_EXT ath_rx_tap_vendor(sc, m, rs, rstamp, nf); #endif /* ATH_ENABLE_RADIOTAP_VENDOR_EXT */ } /* * From this point on we assume the frame is at least * as large as ieee80211_frame_min; verify that. */ if (len < IEEE80211_MIN_LEN) { if (!ieee80211_radiotap_active(ic)) { DPRINTF(sc, ATH_DEBUG_RECV, "%s: short packet %d\n", __func__, len); sc->sc_stats.ast_rx_tooshort++; } else { /* NB: in particular this captures ack's */ ieee80211_radiotap_rx_all(ic, m); } m_freem(m); m = NULL; goto rx_next; } if (IFF_DUMPPKTS(sc, ATH_DEBUG_RECV)) { const HAL_RATE_TABLE *rt = sc->sc_currates; uint8_t rix = rt->rateCodeToIndex[rs->rs_rate]; ieee80211_dump_pkt(ic, mtod(m, caddr_t), len, sc->sc_hwmap[rix].ieeerate, rs->rs_rssi); } m_adj(m, -IEEE80211_CRC_LEN); /* * Locate the node for sender, track state, and then * pass the (referenced) node up to the 802.11 layer * for its use. */ ni = ieee80211_find_rxnode_withkey(ic, mtod(m, const struct ieee80211_frame_min *), rs->rs_keyix == HAL_RXKEYIX_INVALID ? IEEE80211_KEYIX_NONE : rs->rs_keyix); sc->sc_lastrs = rs; if (rs->rs_isaggr) sc->sc_stats.ast_rx_agg++; /* * Populate the per-chain RSSI values where appropriate. */ bzero(&rxs, sizeof(rxs)); rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI | IEEE80211_R_C_CHAIN | IEEE80211_R_C_NF | IEEE80211_R_C_RSSI | IEEE80211_R_TSF64 | IEEE80211_R_TSF_START; /* XXX TODO: validate */ rxs.c_rssi = rs->rs_rssi; rxs.c_nf = nf; rxs.c_chain = 3; /* XXX TODO: check */ rxs.c_rx_tsf = rstamp; for (i = 0; i < 3; i++) { rxs.c_rssi_ctl[i] = rs->rs_rssi_ctl[i]; rxs.c_rssi_ext[i] = rs->rs_rssi_ext[i]; /* * XXX note: we currently don't track * per-chain noisefloor. */ rxs.c_nf_ctl[i] = nf; rxs.c_nf_ext[i] = nf; } - NET_EPOCH_ENTER(et); if (ni != NULL) { /* * Only punt packets for ampdu reorder processing for * 11n nodes; net80211 enforces that M_AMPDU is only * set for 11n nodes. */ if (ni->ni_flags & IEEE80211_NODE_HT) m->m_flags |= M_AMPDU; /* * Inform rate control about the received RSSI. * It can then use this information to potentially drastically * alter the available rate based on the RSSI estimate. * * This is super important when associating to a far away station; * you don't want to waste time trying higher rates at some low * packet exchange rate (like during DHCP) just to establish * that higher MCS rates aren't available. */ ATH_RSSI_LPF(ATH_NODE(ni)->an_node_stats.ns_avgrssi, rs->rs_rssi); ath_rate_update_rx_rssi(sc, ATH_NODE(ni), ATH_RSSI(ATH_NODE(ni)->an_node_stats.ns_avgrssi)); /* * Sending station is known, dispatch directly. */ (void) ieee80211_add_rx_params(m, &rxs); type = ieee80211_input_mimo(ni, m); ieee80211_free_node(ni); m = NULL; /* * Arrange to update the last rx timestamp only for * frames from our ap when operating in station mode. * This assumes the rx key is always setup when * associated. */ if (ic->ic_opmode == IEEE80211_M_STA && rs->rs_keyix != HAL_RXKEYIX_INVALID) is_good = 1; } else { (void) ieee80211_add_rx_params(m, &rxs); type = ieee80211_input_mimo_all(ic, m); m = NULL; } - NET_EPOCH_EXIT(et); /* * At this point we have passed the frame up the stack; thus * the mbuf is no longer ours. */ /* * Track legacy station RX rssi and do any rx antenna management. */ ATH_RSSI_LPF(sc->sc_halstats.ns_avgrssi, rs->rs_rssi); if (sc->sc_diversity) { /* * When using fast diversity, change the default rx * antenna if diversity chooses the other antenna 3 * times in a row. */ if (sc->sc_defant != rs->rs_antenna) { if (++sc->sc_rxotherant >= 3) ath_setdefantenna(sc, rs->rs_antenna); } else sc->sc_rxotherant = 0; } /* Handle slow diversity if enabled */ if (sc->sc_dolnadiv) { ath_lna_rx_comb_scan(sc, rs, ticks, hz); } if (sc->sc_softled) { /* * Blink for any data frame. Otherwise do a * heartbeat-style blink when idle. The latter * is mainly for station mode where we depend on * periodic beacon frames to trigger the poll event. */ if (type == IEEE80211_FC0_TYPE_DATA) { const HAL_RATE_TABLE *rt = sc->sc_currates; ath_led_event(sc, rt->rateCodeToIndex[rs->rs_rate]); } else if (ticks - sc->sc_ledevent >= sc->sc_ledidle) ath_led_event(sc, 0); } rx_next: /* * Debugging - complain if we didn't NULL the mbuf pointer * here. */ if (m != NULL) { device_printf(sc->sc_dev, "%s: mbuf %p should've been freed!\n", __func__, m); } return (is_good); } #define ATH_RX_MAX 128 /* * XXX TODO: break out the "get buffers" from "call ath_rx_pkt()" like * the EDMA code does. * * XXX TODO: then, do all of the RX list management stuff inside * ATH_RX_LOCK() so we don't end up potentially racing. The EDMA * code is doing it right. */ static void ath_rx_proc(struct ath_softc *sc, int resched) { #define PA2DESC(_sc, _pa) \ ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) struct ath_buf *bf; struct ath_hal *ah = sc->sc_ah; #ifdef IEEE80211_SUPPORT_SUPERG struct ieee80211com *ic = &sc->sc_ic; #endif struct ath_desc *ds; struct ath_rx_status *rs; struct mbuf *m; int ngood; HAL_STATUS status; int16_t nf; u_int64_t tsf; int npkts = 0; int kickpcu = 0; int ret; /* XXX we must not hold the ATH_LOCK here */ ATH_UNLOCK_ASSERT(sc); ATH_PCU_UNLOCK_ASSERT(sc); ATH_PCU_LOCK(sc); sc->sc_rxproc_cnt++; kickpcu = sc->sc_kickpcu; ATH_PCU_UNLOCK(sc); ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: called\n", __func__); ngood = 0; nf = ath_hal_getchannoise(ah, sc->sc_curchan); sc->sc_stats.ast_rx_noise = nf; tsf = ath_hal_gettsf64(ah); do { /* * Don't process too many packets at a time; give the * TX thread time to also run - otherwise the TX * latency can jump by quite a bit, causing throughput * degredation. */ if (!kickpcu && npkts >= ATH_RX_MAX) break; bf = TAILQ_FIRST(&sc->sc_rxbuf); if (sc->sc_rxslink && bf == NULL) { /* NB: shouldn't happen */ device_printf(sc->sc_dev, "%s: no buffer!\n", __func__); break; } else if (bf == NULL) { /* * End of List: * this can happen for non-self-linked RX chains */ sc->sc_stats.ast_rx_hitqueueend++; break; } m = bf->bf_m; if (m == NULL) { /* NB: shouldn't happen */ /* * If mbuf allocation failed previously there * will be no mbuf; try again to re-populate it. */ /* XXX make debug msg */ device_printf(sc->sc_dev, "%s: no mbuf!\n", __func__); TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list); goto rx_proc_next; } ds = bf->bf_desc; if (ds->ds_link == bf->bf_daddr) { /* NB: never process the self-linked entry at the end */ sc->sc_stats.ast_rx_hitqueueend++; break; } /* XXX sync descriptor memory */ /* * Must provide the virtual address of the current * descriptor, the physical address, and the virtual * address of the next descriptor in the h/w chain. * This allows the HAL to look ahead to see if the * hardware is done with a descriptor by checking the * done bit in the following descriptor and the address * of the current descriptor the DMA engine is working * on. All this is necessary because of our use of * a self-linked list to avoid rx overruns. */ rs = &bf->bf_status.ds_rxstat; status = ath_hal_rxprocdesc(ah, ds, bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs); #ifdef ATH_DEBUG if (sc->sc_debug & ATH_DEBUG_RECV_DESC) ath_printrxbuf(sc, bf, 0, status == HAL_OK); #endif #ifdef ATH_DEBUG_ALQ if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS)) if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS, sc->sc_rx_statuslen, (char *) ds); #endif /* ATH_DEBUG_ALQ */ if (status == HAL_EINPROGRESS) break; TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list); npkts++; /* * Process a single frame. */ bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); bf->bf_m = NULL; if (ath_rx_pkt(sc, rs, status, tsf, nf, HAL_RX_QUEUE_HP, bf, m)) ngood++; rx_proc_next: /* * If there's a holding buffer, insert that onto * the RX list; the hardware is now definitely not pointing * to it now. */ ret = 0; if (sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf != NULL) { TAILQ_INSERT_TAIL(&sc->sc_rxbuf, sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf, bf_list); ret = ath_rxbuf_init(sc, sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf); } /* * Next, throw our buffer into the holding entry. The hardware * may use the descriptor to read the link pointer before * DMAing the next descriptor in to write out a packet. */ sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf = bf; } while (ret == 0); /* rx signal state monitoring */ ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan); if (ngood) sc->sc_lastrx = tsf; ATH_KTR(sc, ATH_KTR_RXPROC, 2, "ath_rx_proc: npkts=%d, ngood=%d", npkts, ngood); /* Queue DFS tasklet if needed */ if (resched && ath_dfs_tasklet_needed(sc, sc->sc_curchan)) taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask); /* * Now that all the RX frames were handled that * need to be handled, kick the PCU if there's * been an RXEOL condition. */ if (resched && kickpcu) { ATH_PCU_LOCK(sc); ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_rx_proc: kickpcu"); device_printf(sc->sc_dev, "%s: kickpcu; handled %d packets\n", __func__, npkts); /* * Go through the process of fully tearing down * the RX buffers and reinitialising them. * * There's a hardware bug that causes the RX FIFO * to get confused under certain conditions and * constantly write over the same frame, leading * the RX driver code here to get heavily confused. */ /* * XXX Has RX DMA stopped enough here to just call * ath_startrecv()? * XXX Do we need to use the holding buffer to restart * RX DMA by appending entries to the final * descriptor? Quite likely. */ #if 1 ath_startrecv(sc); #else /* * Disabled for now - it'd be nice to be able to do * this in order to limit the amount of CPU time spent * reinitialising the RX side (and thus minimise RX * drops) however there's a hardware issue that * causes things to get too far out of whack. */ /* * XXX can we hold the PCU lock here? * Are there any net80211 buffer calls involved? */ bf = TAILQ_FIRST(&sc->sc_rxbuf); ath_hal_putrxbuf(ah, bf->bf_daddr, HAL_RX_QUEUE_HP); ath_hal_rxena(ah); /* enable recv descriptors */ ath_mode_init(sc); /* set filters, etc. */ ath_hal_startpcurecv(ah, (!! sc->sc_scanning)); /* re-enable PCU/DMA engine */ #endif ath_hal_intrset(ah, sc->sc_imask); sc->sc_kickpcu = 0; ATH_PCU_UNLOCK(sc); } #ifdef IEEE80211_SUPPORT_SUPERG if (resched) ieee80211_ff_age_all(ic, 100); #endif /* * Put the hardware to sleep again if we're done with it. */ ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); /* * If we hit the maximum number of frames in this round, * reschedule for another immediate pass. This gives * the TX and TX completion routines time to run, which * will reduce latency. */ if (npkts >= ATH_RX_MAX) sc->sc_rx.recv_sched(sc, resched); ATH_PCU_LOCK(sc); sc->sc_rxproc_cnt--; ATH_PCU_UNLOCK(sc); } #undef PA2DESC #undef ATH_RX_MAX /* * Only run the RX proc if it's not already running. * Since this may get run as part of the reset/flush path, * the task can't clash with an existing, running tasklet. */ static void ath_legacy_rx_tasklet(void *arg, int npending) { struct ath_softc *sc = arg; ATH_KTR(sc, ATH_KTR_RXPROC, 1, "ath_rx_proc: pending=%d", npending); DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: pending %u\n", __func__, npending); ATH_PCU_LOCK(sc); if (sc->sc_inreset_cnt > 0) { device_printf(sc->sc_dev, "%s: sc_inreset_cnt > 0; skipping\n", __func__); ATH_PCU_UNLOCK(sc); return; } ATH_PCU_UNLOCK(sc); ath_rx_proc(sc, 1); } static void ath_legacy_flushrecv(struct ath_softc *sc) { ath_rx_proc(sc, 0); } static void ath_legacy_flush_rxpending(struct ath_softc *sc) { /* XXX ATH_RX_LOCK_ASSERT(sc); */ if (sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending != NULL) { m_freem(sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending); sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending = NULL; } if (sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending != NULL) { m_freem(sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending); sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending = NULL; } } static int ath_legacy_flush_rxholdbf(struct ath_softc *sc) { struct ath_buf *bf; /* XXX ATH_RX_LOCK_ASSERT(sc); */ /* * If there are RX holding buffers, free them here and return * them to the list. * * XXX should just verify that bf->bf_m is NULL, as it must * be at this point! */ bf = sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf; if (bf != NULL) { if (bf->bf_m != NULL) m_freem(bf->bf_m); bf->bf_m = NULL; TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); (void) ath_rxbuf_init(sc, bf); } sc->sc_rxedma[HAL_RX_QUEUE_HP].m_holdbf = NULL; bf = sc->sc_rxedma[HAL_RX_QUEUE_LP].m_holdbf; if (bf != NULL) { if (bf->bf_m != NULL) m_freem(bf->bf_m); bf->bf_m = NULL; TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); (void) ath_rxbuf_init(sc, bf); } sc->sc_rxedma[HAL_RX_QUEUE_LP].m_holdbf = NULL; return (0); } /* * Disable the receive h/w in preparation for a reset. */ static void ath_legacy_stoprecv(struct ath_softc *sc, int dodelay) { #define PA2DESC(_sc, _pa) \ ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) struct ath_hal *ah = sc->sc_ah; ATH_RX_LOCK(sc); ath_hal_stoppcurecv(ah); /* disable PCU */ ath_hal_setrxfilter(ah, 0); /* clear recv filter */ ath_hal_stopdmarecv(ah); /* disable DMA engine */ /* * TODO: see if this particular DELAY() is required; it may be * masking some missing FIFO flush or DMA sync. */ #if 0 if (dodelay) #endif DELAY(3000); /* 3ms is long enough for 1 frame */ #ifdef ATH_DEBUG if (sc->sc_debug & (ATH_DEBUG_RESET | ATH_DEBUG_FATAL)) { struct ath_buf *bf; u_int ix; device_printf(sc->sc_dev, "%s: rx queue %p, link %p\n", __func__, (caddr_t)(uintptr_t) ath_hal_getrxbuf(ah, HAL_RX_QUEUE_HP), sc->sc_rxlink); ix = 0; TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { struct ath_desc *ds = bf->bf_desc; struct ath_rx_status *rs = &bf->bf_status.ds_rxstat; HAL_STATUS status = ath_hal_rxprocdesc(ah, ds, bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs); if (status == HAL_OK || (sc->sc_debug & ATH_DEBUG_FATAL)) ath_printrxbuf(sc, bf, ix, status == HAL_OK); ix++; } } #endif (void) ath_legacy_flush_rxpending(sc); (void) ath_legacy_flush_rxholdbf(sc); sc->sc_rxlink = NULL; /* just in case */ ATH_RX_UNLOCK(sc); #undef PA2DESC } /* * XXX TODO: something was calling startrecv without calling * stoprecv. Let's figure out what/why. It was showing up * as a mbuf leak (rxpending) and ath_buf leak (holdbf.) */ /* * Enable the receive h/w following a reset. */ static int ath_legacy_startrecv(struct ath_softc *sc) { struct ath_hal *ah = sc->sc_ah; struct ath_buf *bf; ATH_RX_LOCK(sc); /* * XXX should verify these are already all NULL! */ sc->sc_rxlink = NULL; (void) ath_legacy_flush_rxpending(sc); (void) ath_legacy_flush_rxholdbf(sc); /* * Re-chain all of the buffers in the RX buffer list. */ TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { int error = ath_rxbuf_init(sc, bf); if (error != 0) { DPRINTF(sc, ATH_DEBUG_RECV, "%s: ath_rxbuf_init failed %d\n", __func__, error); return error; } } bf = TAILQ_FIRST(&sc->sc_rxbuf); ath_hal_putrxbuf(ah, bf->bf_daddr, HAL_RX_QUEUE_HP); ath_hal_rxena(ah); /* enable recv descriptors */ ath_mode_init(sc); /* set filters, etc. */ ath_hal_startpcurecv(ah, (!! sc->sc_scanning)); /* re-enable PCU/DMA engine */ ATH_RX_UNLOCK(sc); return 0; } static int ath_legacy_dma_rxsetup(struct ath_softc *sc) { int error; error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf, "rx", sizeof(struct ath_desc), ath_rxbuf, 1); if (error != 0) return (error); return (0); } static int ath_legacy_dma_rxteardown(struct ath_softc *sc) { if (sc->sc_rxdma.dd_desc_len != 0) ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); return (0); } static void ath_legacy_recv_sched(struct ath_softc *sc, int dosched) { taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); } static void ath_legacy_recv_sched_queue(struct ath_softc *sc, HAL_RX_QUEUE q, int dosched) { taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); } void ath_recv_setup_legacy(struct ath_softc *sc) { /* Sensible legacy defaults */ /* * XXX this should be changed to properly support the * exact RX descriptor size for each HAL. */ sc->sc_rx_statuslen = sizeof(struct ath_desc); sc->sc_rx.recv_start = ath_legacy_startrecv; sc->sc_rx.recv_stop = ath_legacy_stoprecv; sc->sc_rx.recv_flush = ath_legacy_flushrecv; sc->sc_rx.recv_tasklet = ath_legacy_rx_tasklet; sc->sc_rx.recv_rxbuf_init = ath_legacy_rxbuf_init; sc->sc_rx.recv_setup = ath_legacy_dma_rxsetup; sc->sc_rx.recv_teardown = ath_legacy_dma_rxteardown; sc->sc_rx.recv_sched = ath_legacy_recv_sched; sc->sc_rx.recv_sched_queue = ath_legacy_recv_sched_queue; } diff --git a/sys/dev/bge/if_bge.c b/sys/dev/bge/if_bge.c index b91ee5de9649..551c18f8bf4b 100644 --- a/sys/dev/bge/if_bge.c +++ b/sys/dev/bge/if_bge.c @@ -1,6858 +1,6855 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 2001 Wind River Systems * Copyright (c) 1997, 1998, 1999, 2001 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Broadcom BCM57xx(x)/BCM590x NetXtreme and NetLink family Ethernet driver * * The Broadcom BCM5700 is based on technology originally developed by * Alteon Networks as part of the Tigon I and Tigon II Gigabit Ethernet * MAC chips. The BCM5700, sometimes referred to as the Tigon III, has * two on-board MIPS R4000 CPUs and can have as much as 16MB of external * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo * frames, highly configurable RX filtering, and 16 RX and TX queues * (which, along with RX filter rules, can be used for QOS applications). * Other features, such as TCP segmentation, may be available as part * of value-added firmware updates. Unlike the Tigon I and Tigon II, * firmware images can be stored in hardware and need not be compiled * into the driver. * * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus. * * The BCM5701 is a single-chip solution incorporating both the BCM5700 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 * does not support external SSRAM. * * Broadcom also produces a variation of the BCM5700 under the "Altima" * brand name, which is functionally similar but lacks PCI-X support. * * Without external SSRAM, you can only have at most 4 TX rings, * and the use of the mini RX ring is disabled. This seems to imply * that these features are simply not available on the BCM5701. As a * result, this driver does not implement any support for the mini RX * ring. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "miidevs.h" #include #include #include #include #define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP) #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */ MODULE_DEPEND(bge, pci, 1, 1, 1); MODULE_DEPEND(bge, ether, 1, 1, 1); MODULE_DEPEND(bge, miibus, 1, 1, 1); /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" /* * Various supported device vendors/types and their names. Note: the * spec seems to indicate that the hardware still has Alteon's vendor * ID burned into it, though it will always be overriden by the vendor * ID in the EEPROM. Just to be safe, we cover all possibilities. */ static const struct bge_type { uint16_t bge_vid; uint16_t bge_did; } bge_devs[] = { { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 }, { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 }, { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 }, { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 }, { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 }, { APPLE_VENDORID, APPLE_DEVICE_BCM5701 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5717 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5717C }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5718 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5719 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5722 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5723 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5725 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5727 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5756 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5761 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5761E }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5761S }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5761SE }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5762 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5764 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5784 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5785F }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5785G }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5787F }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5906 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5906M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57760 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57761 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57762 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57764 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57765 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57766 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57767 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57780 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57781 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57782 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57785 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57786 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57787 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57788 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57790 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57791 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM57795 }, { SK_VENDORID, SK_DEVICEID_ALTIMA }, { TC_VENDORID, TC_DEVICEID_3C996 }, { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE4 }, { FJTSU_VENDORID, FJTSU_DEVICEID_PW008GE5 }, { FJTSU_VENDORID, FJTSU_DEVICEID_PP250450 }, { 0, 0 } }; static const struct bge_vendor { uint16_t v_id; const char *v_name; } bge_vendors[] = { { ALTEON_VENDORID, "Alteon" }, { ALTIMA_VENDORID, "Altima" }, { APPLE_VENDORID, "Apple" }, { BCOM_VENDORID, "Broadcom" }, { SK_VENDORID, "SysKonnect" }, { TC_VENDORID, "3Com" }, { FJTSU_VENDORID, "Fujitsu" }, { 0, NULL } }; static const struct bge_revision { uint32_t br_chipid; const char *br_name; } bge_revisions[] = { { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" }, { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" }, { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" }, { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" }, { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" }, { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" }, { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" }, { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" }, { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" }, { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" }, { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" }, { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" }, { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" }, { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" }, { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" }, { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" }, { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" }, { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" }, { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" }, { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" }, { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" }, { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" }, { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" }, { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" }, { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" }, { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" }, { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" }, { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" }, { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" }, { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" }, { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" }, { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" }, { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" }, { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" }, { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" }, { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" }, { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" }, { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" }, { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" }, { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" }, { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" }, { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" }, { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" }, { BGE_CHIPID_BCM5717_A0, "BCM5717 A0" }, { BGE_CHIPID_BCM5717_B0, "BCM5717 B0" }, { BGE_CHIPID_BCM5717_C0, "BCM5717 C0" }, { BGE_CHIPID_BCM5719_A0, "BCM5719 A0" }, { BGE_CHIPID_BCM5720_A0, "BCM5720 A0" }, { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" }, { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" }, { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" }, { BGE_CHIPID_BCM5722_A0, "BCM5722 A0" }, { BGE_CHIPID_BCM5761_A0, "BCM5761 A0" }, { BGE_CHIPID_BCM5761_A1, "BCM5761 A1" }, { BGE_CHIPID_BCM5762_A0, "BCM5762 A0" }, { BGE_CHIPID_BCM5784_A0, "BCM5784 A0" }, { BGE_CHIPID_BCM5784_A1, "BCM5784 A1" }, /* 5754 and 5787 share the same ASIC ID */ { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" }, { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" }, { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" }, { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" }, { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" }, { BGE_CHIPID_BCM57765_A0, "BCM57765 A0" }, { BGE_CHIPID_BCM57765_B0, "BCM57765 B0" }, { BGE_CHIPID_BCM57780_A0, "BCM57780 A0" }, { BGE_CHIPID_BCM57780_A1, "BCM57780 A1" }, { 0, NULL } }; /* * Some defaults for major revisions, so that newer steppings * that we don't know about have a shot at working. */ static const struct bge_revision bge_majorrevs[] = { { BGE_ASICREV_BCM5700, "unknown BCM5700" }, { BGE_ASICREV_BCM5701, "unknown BCM5701" }, { BGE_ASICREV_BCM5703, "unknown BCM5703" }, { BGE_ASICREV_BCM5704, "unknown BCM5704" }, { BGE_ASICREV_BCM5705, "unknown BCM5705" }, { BGE_ASICREV_BCM5750, "unknown BCM5750" }, { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" }, { BGE_ASICREV_BCM5752, "unknown BCM5752" }, { BGE_ASICREV_BCM5780, "unknown BCM5780" }, { BGE_ASICREV_BCM5714, "unknown BCM5714" }, { BGE_ASICREV_BCM5755, "unknown BCM5755" }, { BGE_ASICREV_BCM5761, "unknown BCM5761" }, { BGE_ASICREV_BCM5784, "unknown BCM5784" }, { BGE_ASICREV_BCM5785, "unknown BCM5785" }, /* 5754 and 5787 share the same ASIC ID */ { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" }, { BGE_ASICREV_BCM5906, "unknown BCM5906" }, { BGE_ASICREV_BCM57765, "unknown BCM57765" }, { BGE_ASICREV_BCM57766, "unknown BCM57766" }, { BGE_ASICREV_BCM57780, "unknown BCM57780" }, { BGE_ASICREV_BCM5717, "unknown BCM5717" }, { BGE_ASICREV_BCM5719, "unknown BCM5719" }, { BGE_ASICREV_BCM5720, "unknown BCM5720" }, { BGE_ASICREV_BCM5762, "unknown BCM5762" }, { 0, NULL } }; #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO) #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY) #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS) #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY) #define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS) #define BGE_IS_5755_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5755_PLUS) #define BGE_IS_5717_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5717_PLUS) #define BGE_IS_57765_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_57765_PLUS) static uint32_t bge_chipid(device_t); static const struct bge_vendor * bge_lookup_vendor(uint16_t); static const struct bge_revision * bge_lookup_rev(uint32_t); typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]); static int bge_probe(device_t); static int bge_attach(device_t); static int bge_detach(device_t); static int bge_suspend(device_t); static int bge_resume(device_t); static void bge_release_resources(struct bge_softc *); static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int); static int bge_dma_alloc(struct bge_softc *); static void bge_dma_free(struct bge_softc *); static int bge_dma_ring_alloc(struct bge_softc *, bus_size_t, bus_size_t, bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, const char *); static void bge_devinfo(struct bge_softc *); static int bge_mbox_reorder(struct bge_softc *); static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]); static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]); static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]); static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]); static int bge_get_eaddr(struct bge_softc *, uint8_t[]); static void bge_txeof(struct bge_softc *, uint16_t); static void bge_rxcsum(struct bge_softc *, struct bge_rx_bd *, struct mbuf *); static int bge_rxeof(struct bge_softc *, uint16_t, int); static void bge_asf_driver_up (struct bge_softc *); static void bge_tick(void *); static void bge_stats_clear_regs(struct bge_softc *); static void bge_stats_update(struct bge_softc *); static void bge_stats_update_regs(struct bge_softc *); static struct mbuf *bge_check_short_dma(struct mbuf *); static struct mbuf *bge_setup_tso(struct bge_softc *, struct mbuf *, uint16_t *, uint16_t *); static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *); static void bge_intr(void *); static int bge_msi_intr(void *); static void bge_intr_task(void *, int); static void bge_start(if_t); static void bge_start_locked(if_t); static void bge_start_tx(struct bge_softc *, uint32_t); static int bge_ioctl(if_t, u_long, caddr_t); static void bge_init_locked(struct bge_softc *); static void bge_init(void *); static void bge_stop_block(struct bge_softc *, bus_size_t, uint32_t); static void bge_stop(struct bge_softc *); static void bge_watchdog(struct bge_softc *); static int bge_shutdown(device_t); static int bge_ifmedia_upd_locked(if_t); static int bge_ifmedia_upd(if_t); static void bge_ifmedia_sts(if_t, struct ifmediareq *); static uint64_t bge_get_counter(if_t, ift_counter); static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *); static int bge_read_nvram(struct bge_softc *, caddr_t, int, int); static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *); static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int); static void bge_setpromisc(struct bge_softc *); static void bge_setmulti(struct bge_softc *); static void bge_setvlan(struct bge_softc *); static __inline void bge_rxreuse_std(struct bge_softc *, int); static __inline void bge_rxreuse_jumbo(struct bge_softc *, int); static int bge_newbuf_std(struct bge_softc *, int); static int bge_newbuf_jumbo(struct bge_softc *, int); static int bge_init_rx_ring_std(struct bge_softc *); static void bge_free_rx_ring_std(struct bge_softc *); static int bge_init_rx_ring_jumbo(struct bge_softc *); static void bge_free_rx_ring_jumbo(struct bge_softc *); static void bge_free_tx_ring(struct bge_softc *); static int bge_init_tx_ring(struct bge_softc *); static int bge_chipinit(struct bge_softc *); static int bge_blockinit(struct bge_softc *); static uint32_t bge_dma_swap_options(struct bge_softc *); static int bge_has_eaddr(struct bge_softc *); static uint32_t bge_readmem_ind(struct bge_softc *, int); static void bge_writemem_ind(struct bge_softc *, int, int); static void bge_writembx(struct bge_softc *, int, int); #ifdef notdef static uint32_t bge_readreg_ind(struct bge_softc *, int); #endif static void bge_writemem_direct(struct bge_softc *, int, int); static void bge_writereg_ind(struct bge_softc *, int, int); static int bge_miibus_readreg(device_t, int, int); static int bge_miibus_writereg(device_t, int, int, int); static void bge_miibus_statchg(device_t); #ifdef DEVICE_POLLING static int bge_poll(if_t ifp, enum poll_cmd cmd, int count); #endif #define BGE_RESET_SHUTDOWN 0 #define BGE_RESET_START 1 #define BGE_RESET_SUSPEND 2 static void bge_sig_post_reset(struct bge_softc *, int); static void bge_sig_legacy(struct bge_softc *, int); static void bge_sig_pre_reset(struct bge_softc *, int); static void bge_stop_fw(struct bge_softc *); static int bge_reset(struct bge_softc *); static void bge_link_upd(struct bge_softc *); static void bge_ape_lock_init(struct bge_softc *); static void bge_ape_read_fw_ver(struct bge_softc *); static int bge_ape_lock(struct bge_softc *, int); static void bge_ape_unlock(struct bge_softc *, int); static void bge_ape_send_event(struct bge_softc *, uint32_t); static void bge_ape_driver_state_change(struct bge_softc *, int); /* * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may * leak information to untrusted users. It is also known to cause alignment * traps on certain architectures. */ #ifdef BGE_REGISTER_DEBUG static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS); static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS); static int bge_sysctl_ape_read(SYSCTL_HANDLER_ARGS); static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS); #endif static void bge_add_sysctls(struct bge_softc *); static void bge_add_sysctl_stats_regs(struct bge_softc *, struct sysctl_ctx_list *, struct sysctl_oid_list *); static void bge_add_sysctl_stats(struct bge_softc *, struct sysctl_ctx_list *, struct sysctl_oid_list *); static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS); DEBUGNET_DEFINE(bge); static device_method_t bge_methods[] = { /* Device interface */ DEVMETHOD(device_probe, bge_probe), DEVMETHOD(device_attach, bge_attach), DEVMETHOD(device_detach, bge_detach), DEVMETHOD(device_shutdown, bge_shutdown), DEVMETHOD(device_suspend, bge_suspend), DEVMETHOD(device_resume, bge_resume), /* MII interface */ DEVMETHOD(miibus_readreg, bge_miibus_readreg), DEVMETHOD(miibus_writereg, bge_miibus_writereg), DEVMETHOD(miibus_statchg, bge_miibus_statchg), DEVMETHOD_END }; static driver_t bge_driver = { "bge", bge_methods, sizeof(struct bge_softc) }; static devclass_t bge_devclass; DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0); MODULE_PNP_INFO("U16:vendor;U16:device", pci, bge, bge_devs, nitems(bge_devs) - 1); DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0); static int bge_allow_asf = 1; static SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters"); SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RDTUN, &bge_allow_asf, 0, "Allow ASF mode if available"); static int bge_has_eaddr(struct bge_softc *sc) { return (1); } static uint32_t bge_readmem_ind(struct bge_softc *sc, int off) { device_t dev; uint32_t val; if (sc->bge_asicrev == BGE_ASICREV_BCM5906 && off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4) return (0); dev = sc->bge_dev; pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4); pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); return (val); } static void bge_writemem_ind(struct bge_softc *sc, int off, int val) { device_t dev; if (sc->bge_asicrev == BGE_ASICREV_BCM5906 && off >= BGE_STATS_BLOCK && off < BGE_SEND_RING_1_TO_4) return; dev = sc->bge_dev; pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4); pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); } #ifdef notdef static uint32_t bge_readreg_ind(struct bge_softc *sc, int off) { device_t dev; dev = sc->bge_dev; pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); return (pci_read_config(dev, BGE_PCI_REG_DATA, 4)); } #endif static void bge_writereg_ind(struct bge_softc *sc, int off, int val) { device_t dev; dev = sc->bge_dev; pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); pci_write_config(dev, BGE_PCI_REG_DATA, val, 4); } static void bge_writemem_direct(struct bge_softc *sc, int off, int val) { CSR_WRITE_4(sc, off, val); } static void bge_writembx(struct bge_softc *sc, int off, int val) { if (sc->bge_asicrev == BGE_ASICREV_BCM5906) off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI; CSR_WRITE_4(sc, off, val); if ((sc->bge_flags & BGE_FLAG_MBOX_REORDER) != 0) CSR_READ_4(sc, off); } /* * Clear all stale locks and select the lock for this driver instance. */ static void bge_ape_lock_init(struct bge_softc *sc) { uint32_t bit, regbase; int i; if (sc->bge_asicrev == BGE_ASICREV_BCM5761) regbase = BGE_APE_LOCK_GRANT; else regbase = BGE_APE_PER_LOCK_GRANT; /* Clear any stale locks. */ for (i = BGE_APE_LOCK_PHY0; i <= BGE_APE_LOCK_GPIO; i++) { switch (i) { case BGE_APE_LOCK_PHY0: case BGE_APE_LOCK_PHY1: case BGE_APE_LOCK_PHY2: case BGE_APE_LOCK_PHY3: bit = BGE_APE_LOCK_GRANT_DRIVER0; break; default: if (sc->bge_func_addr == 0) bit = BGE_APE_LOCK_GRANT_DRIVER0; else bit = (1 << sc->bge_func_addr); } APE_WRITE_4(sc, regbase + 4 * i, bit); } /* Select the PHY lock based on the device's function number. */ switch (sc->bge_func_addr) { case 0: sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY0; break; case 1: sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY1; break; case 2: sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY2; break; case 3: sc->bge_phy_ape_lock = BGE_APE_LOCK_PHY3; break; default: device_printf(sc->bge_dev, "PHY lock not supported on this function\n"); } } /* * Check for APE firmware, set flags, and print version info. */ static void bge_ape_read_fw_ver(struct bge_softc *sc) { const char *fwtype; uint32_t apedata, features; /* Check for a valid APE signature in shared memory. */ apedata = APE_READ_4(sc, BGE_APE_SEG_SIG); if (apedata != BGE_APE_SEG_SIG_MAGIC) { sc->bge_mfw_flags &= ~ BGE_MFW_ON_APE; return; } /* Check if APE firmware is running. */ apedata = APE_READ_4(sc, BGE_APE_FW_STATUS); if ((apedata & BGE_APE_FW_STATUS_READY) == 0) { device_printf(sc->bge_dev, "APE signature found " "but FW status not ready! 0x%08x\n", apedata); return; } sc->bge_mfw_flags |= BGE_MFW_ON_APE; /* Fetch the APE firwmare type and version. */ apedata = APE_READ_4(sc, BGE_APE_FW_VERSION); features = APE_READ_4(sc, BGE_APE_FW_FEATURES); if ((features & BGE_APE_FW_FEATURE_NCSI) != 0) { sc->bge_mfw_flags |= BGE_MFW_TYPE_NCSI; fwtype = "NCSI"; } else if ((features & BGE_APE_FW_FEATURE_DASH) != 0) { sc->bge_mfw_flags |= BGE_MFW_TYPE_DASH; fwtype = "DASH"; } else fwtype = "UNKN"; /* Print the APE firmware version. */ device_printf(sc->bge_dev, "APE FW version: %s v%d.%d.%d.%d\n", fwtype, (apedata & BGE_APE_FW_VERSION_MAJMSK) >> BGE_APE_FW_VERSION_MAJSFT, (apedata & BGE_APE_FW_VERSION_MINMSK) >> BGE_APE_FW_VERSION_MINSFT, (apedata & BGE_APE_FW_VERSION_REVMSK) >> BGE_APE_FW_VERSION_REVSFT, (apedata & BGE_APE_FW_VERSION_BLDMSK)); } static int bge_ape_lock(struct bge_softc *sc, int locknum) { uint32_t bit, gnt, req, status; int i, off; if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) return (0); /* Lock request/grant registers have different bases. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5761) { req = BGE_APE_LOCK_REQ; gnt = BGE_APE_LOCK_GRANT; } else { req = BGE_APE_PER_LOCK_REQ; gnt = BGE_APE_PER_LOCK_GRANT; } off = 4 * locknum; switch (locknum) { case BGE_APE_LOCK_GPIO: /* Lock required when using GPIO. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5761) return (0); if (sc->bge_func_addr == 0) bit = BGE_APE_LOCK_REQ_DRIVER0; else bit = (1 << sc->bge_func_addr); break; case BGE_APE_LOCK_GRC: /* Lock required to reset the device. */ if (sc->bge_func_addr == 0) bit = BGE_APE_LOCK_REQ_DRIVER0; else bit = (1 << sc->bge_func_addr); break; case BGE_APE_LOCK_MEM: /* Lock required when accessing certain APE memory. */ if (sc->bge_func_addr == 0) bit = BGE_APE_LOCK_REQ_DRIVER0; else bit = (1 << sc->bge_func_addr); break; case BGE_APE_LOCK_PHY0: case BGE_APE_LOCK_PHY1: case BGE_APE_LOCK_PHY2: case BGE_APE_LOCK_PHY3: /* Lock required when accessing PHYs. */ bit = BGE_APE_LOCK_REQ_DRIVER0; break; default: return (EINVAL); } /* Request a lock. */ APE_WRITE_4(sc, req + off, bit); /* Wait up to 1 second to acquire lock. */ for (i = 0; i < 20000; i++) { status = APE_READ_4(sc, gnt + off); if (status == bit) break; DELAY(50); } /* Handle any errors. */ if (status != bit) { device_printf(sc->bge_dev, "APE lock %d request failed! " "request = 0x%04x[0x%04x], status = 0x%04x[0x%04x]\n", locknum, req + off, bit & 0xFFFF, gnt + off, status & 0xFFFF); /* Revoke the lock request. */ APE_WRITE_4(sc, gnt + off, bit); return (EBUSY); } return (0); } static void bge_ape_unlock(struct bge_softc *sc, int locknum) { uint32_t bit, gnt; int off; if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) return; if (sc->bge_asicrev == BGE_ASICREV_BCM5761) gnt = BGE_APE_LOCK_GRANT; else gnt = BGE_APE_PER_LOCK_GRANT; off = 4 * locknum; switch (locknum) { case BGE_APE_LOCK_GPIO: if (sc->bge_asicrev == BGE_ASICREV_BCM5761) return; if (sc->bge_func_addr == 0) bit = BGE_APE_LOCK_GRANT_DRIVER0; else bit = (1 << sc->bge_func_addr); break; case BGE_APE_LOCK_GRC: if (sc->bge_func_addr == 0) bit = BGE_APE_LOCK_GRANT_DRIVER0; else bit = (1 << sc->bge_func_addr); break; case BGE_APE_LOCK_MEM: if (sc->bge_func_addr == 0) bit = BGE_APE_LOCK_GRANT_DRIVER0; else bit = (1 << sc->bge_func_addr); break; case BGE_APE_LOCK_PHY0: case BGE_APE_LOCK_PHY1: case BGE_APE_LOCK_PHY2: case BGE_APE_LOCK_PHY3: bit = BGE_APE_LOCK_GRANT_DRIVER0; break; default: return; } APE_WRITE_4(sc, gnt + off, bit); } /* * Send an event to the APE firmware. */ static void bge_ape_send_event(struct bge_softc *sc, uint32_t event) { uint32_t apedata; int i; /* NCSI does not support APE events. */ if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) return; /* Wait up to 1ms for APE to service previous event. */ for (i = 10; i > 0; i--) { if (bge_ape_lock(sc, BGE_APE_LOCK_MEM) != 0) break; apedata = APE_READ_4(sc, BGE_APE_EVENT_STATUS); if ((apedata & BGE_APE_EVENT_STATUS_EVENT_PENDING) == 0) { APE_WRITE_4(sc, BGE_APE_EVENT_STATUS, event | BGE_APE_EVENT_STATUS_EVENT_PENDING); bge_ape_unlock(sc, BGE_APE_LOCK_MEM); APE_WRITE_4(sc, BGE_APE_EVENT, BGE_APE_EVENT_1); break; } bge_ape_unlock(sc, BGE_APE_LOCK_MEM); DELAY(100); } if (i == 0) device_printf(sc->bge_dev, "APE event 0x%08x send timed out\n", event); } static void bge_ape_driver_state_change(struct bge_softc *sc, int kind) { uint32_t apedata, event; if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) == 0) return; switch (kind) { case BGE_RESET_START: /* If this is the first load, clear the load counter. */ apedata = APE_READ_4(sc, BGE_APE_HOST_SEG_SIG); if (apedata != BGE_APE_HOST_SEG_SIG_MAGIC) APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, 0); else { apedata = APE_READ_4(sc, BGE_APE_HOST_INIT_COUNT); APE_WRITE_4(sc, BGE_APE_HOST_INIT_COUNT, ++apedata); } APE_WRITE_4(sc, BGE_APE_HOST_SEG_SIG, BGE_APE_HOST_SEG_SIG_MAGIC); APE_WRITE_4(sc, BGE_APE_HOST_SEG_LEN, BGE_APE_HOST_SEG_LEN_MAGIC); /* Add some version info if bge(4) supports it. */ APE_WRITE_4(sc, BGE_APE_HOST_DRIVER_ID, BGE_APE_HOST_DRIVER_ID_MAGIC(1, 0)); APE_WRITE_4(sc, BGE_APE_HOST_BEHAVIOR, BGE_APE_HOST_BEHAV_NO_PHYLOCK); APE_WRITE_4(sc, BGE_APE_HOST_HEARTBEAT_INT_MS, BGE_APE_HOST_HEARTBEAT_INT_DISABLE); APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE, BGE_APE_HOST_DRVR_STATE_START); event = BGE_APE_EVENT_STATUS_STATE_START; break; case BGE_RESET_SHUTDOWN: APE_WRITE_4(sc, BGE_APE_HOST_DRVR_STATE, BGE_APE_HOST_DRVR_STATE_UNLOAD); event = BGE_APE_EVENT_STATUS_STATE_UNLOAD; break; case BGE_RESET_SUSPEND: event = BGE_APE_EVENT_STATUS_STATE_SUSPEND; break; default: return; } bge_ape_send_event(sc, event | BGE_APE_EVENT_STATUS_DRIVER_EVNT | BGE_APE_EVENT_STATUS_STATE_CHNGE); } /* * Map a single buffer address. */ static void bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct bge_dmamap_arg *ctx; if (error) return; KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg)); ctx = arg; ctx->bge_busaddr = segs->ds_addr; } static uint8_t bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) { uint32_t access, byte = 0; int i; /* Lock. */ CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); for (i = 0; i < 8000; i++) { if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1) break; DELAY(20); } if (i == 8000) return (1); /* Enable access. */ access = CSR_READ_4(sc, BGE_NVRAM_ACCESS); CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE); CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc); CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD); for (i = 0; i < BGE_TIMEOUT * 10; i++) { DELAY(10); if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) { DELAY(10); break; } } if (i == BGE_TIMEOUT * 10) { if_printf(sc->bge_ifp, "nvram read timed out\n"); return (1); } /* Get result. */ byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA); *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF; /* Disable access. */ CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access); /* Unlock. */ CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1); CSR_READ_4(sc, BGE_NVRAM_SWARB); return (0); } /* * Read a sequence of bytes from NVRAM. */ static int bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt) { int err = 0, i; uint8_t byte = 0; if (sc->bge_asicrev != BGE_ASICREV_BCM5906) return (1); for (i = 0; i < cnt; i++) { err = bge_nvram_getbyte(sc, off + i, &byte); if (err) break; *(dest + i) = byte; } return (err ? 1 : 0); } /* * Read a byte of data stored in the EEPROM at address 'addr.' The * BCM570x supports both the traditional bitbang interface and an * auto access interface for reading the EEPROM. We use the auto * access method. */ static uint8_t bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) { int i; uint32_t byte = 0; /* * Enable use of auto EEPROM access so we can avoid * having to use the bitbang method. */ BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); /* Reset the EEPROM, load the clock period. */ CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); DELAY(20); /* Issue the read EEPROM command. */ CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); /* Wait for completion */ for(i = 0; i < BGE_TIMEOUT * 10; i++) { DELAY(10); if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) break; } if (i == BGE_TIMEOUT * 10) { device_printf(sc->bge_dev, "EEPROM read timed out\n"); return (1); } /* Get result. */ byte = CSR_READ_4(sc, BGE_EE_DATA); *dest = (byte >> ((addr % 4) * 8)) & 0xFF; return (0); } /* * Read a sequence of bytes from the EEPROM. */ static int bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt) { int i, error = 0; uint8_t byte = 0; for (i = 0; i < cnt; i++) { error = bge_eeprom_getbyte(sc, off + i, &byte); if (error) break; *(dest + i) = byte; } return (error ? 1 : 0); } static int bge_miibus_readreg(device_t dev, int phy, int reg) { struct bge_softc *sc; uint32_t val; int i; sc = device_get_softc(dev); if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0) return (0); /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */ if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) { CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL); DELAY(80); } CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY | BGE_MIPHY(phy) | BGE_MIREG(reg)); /* Poll for the PHY register access to complete. */ for (i = 0; i < BGE_TIMEOUT; i++) { DELAY(10); val = CSR_READ_4(sc, BGE_MI_COMM); if ((val & BGE_MICOMM_BUSY) == 0) { DELAY(5); val = CSR_READ_4(sc, BGE_MI_COMM); break; } } if (i == BGE_TIMEOUT) { device_printf(sc->bge_dev, "PHY read timed out (phy %d, reg %d, val 0x%08x)\n", phy, reg, val); val = 0; } /* Restore the autopoll bit if necessary. */ if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) { CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode); DELAY(80); } bge_ape_unlock(sc, sc->bge_phy_ape_lock); if (val & BGE_MICOMM_READFAIL) return (0); return (val & 0xFFFF); } static int bge_miibus_writereg(device_t dev, int phy, int reg, int val) { struct bge_softc *sc; int i; sc = device_get_softc(dev); if (sc->bge_asicrev == BGE_ASICREV_BCM5906 && (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL)) return (0); if (bge_ape_lock(sc, sc->bge_phy_ape_lock) != 0) return (0); /* Clear the autopoll bit if set, otherwise may trigger PCI errors. */ if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) { CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode & ~BGE_MIMODE_AUTOPOLL); DELAY(80); } CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY | BGE_MIPHY(phy) | BGE_MIREG(reg) | val); for (i = 0; i < BGE_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) { DELAY(5); CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */ break; } } /* Restore the autopoll bit if necessary. */ if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) { CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode); DELAY(80); } bge_ape_unlock(sc, sc->bge_phy_ape_lock); if (i == BGE_TIMEOUT) device_printf(sc->bge_dev, "PHY write timed out (phy %d, reg %d, val 0x%04x)\n", phy, reg, val); return (0); } static void bge_miibus_statchg(device_t dev) { struct bge_softc *sc; struct mii_data *mii; uint32_t mac_mode, rx_mode, tx_mode; sc = device_get_softc(dev); if ((if_getdrvflags(sc->bge_ifp) & IFF_DRV_RUNNING) == 0) return; mii = device_get_softc(sc->bge_miibus); if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: sc->bge_link = 1; break; case IFM_1000_T: case IFM_1000_SX: case IFM_2500_SX: if (sc->bge_asicrev != BGE_ASICREV_BCM5906) sc->bge_link = 1; else sc->bge_link = 0; break; default: sc->bge_link = 0; break; } } else sc->bge_link = 0; if (sc->bge_link == 0) return; /* * APE firmware touches these registers to keep the MAC * connected to the outside world. Try to keep the * accesses atomic. */ /* Set the port mode (MII/GMII) to match the link speed. */ mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & ~(BGE_MACMODE_PORTMODE | BGE_MACMODE_HALF_DUPLEX); tx_mode = CSR_READ_4(sc, BGE_TX_MODE); rx_mode = CSR_READ_4(sc, BGE_RX_MODE); if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) mac_mode |= BGE_PORTMODE_GMII; else mac_mode |= BGE_PORTMODE_MII; /* Set MAC flow control behavior to match link flow control settings. */ tx_mode &= ~BGE_TXMODE_FLOWCTL_ENABLE; rx_mode &= ~BGE_RXMODE_FLOWCTL_ENABLE; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) tx_mode |= BGE_TXMODE_FLOWCTL_ENABLE; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) rx_mode |= BGE_RXMODE_FLOWCTL_ENABLE; } else mac_mode |= BGE_MACMODE_HALF_DUPLEX; CSR_WRITE_4(sc, BGE_MAC_MODE, mac_mode); DELAY(40); CSR_WRITE_4(sc, BGE_TX_MODE, tx_mode); CSR_WRITE_4(sc, BGE_RX_MODE, rx_mode); } /* * Intialize a standard receive ring descriptor. */ static int bge_newbuf_std(struct bge_softc *sc, int i) { struct mbuf *m; struct bge_rx_bd *r; bus_dma_segment_t segs[1]; bus_dmamap_t map; int error, nsegs; if (sc->bge_flags & BGE_FLAG_JUMBO_STD && (if_getmtu(sc->bge_ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN))) { m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = MJUM9BYTES; } else { m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = MCLBYTES; } if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) m_adj(m, ETHER_ALIGN); error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_rx_mtag, sc->bge_cdata.bge_rx_std_sparemap, m, segs, &nsegs, 0); if (error != 0) { m_freem(m); return (error); } if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag, sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag, sc->bge_cdata.bge_rx_std_dmamap[i]); } map = sc->bge_cdata.bge_rx_std_dmamap[i]; sc->bge_cdata.bge_rx_std_dmamap[i] = sc->bge_cdata.bge_rx_std_sparemap; sc->bge_cdata.bge_rx_std_sparemap = map; sc->bge_cdata.bge_rx_std_chain[i] = m; sc->bge_cdata.bge_rx_std_seglen[i] = segs[0].ds_len; r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std]; r->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr); r->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr); r->bge_flags = BGE_RXBDFLAG_END; r->bge_len = segs[0].ds_len; r->bge_idx = i; bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag, sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD); return (0); } /* * Initialize a jumbo receive ring descriptor. This allocates * a jumbo buffer from the pool managed internally by the driver. */ static int bge_newbuf_jumbo(struct bge_softc *sc, int i) { bus_dma_segment_t segs[BGE_NSEG_JUMBO]; bus_dmamap_t map; struct bge_extrx_bd *r; struct mbuf *m; int error, nsegs; MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) return (ENOBUFS); if (m_cljget(m, M_NOWAIT, MJUM9BYTES) == NULL) { m_freem(m); return (ENOBUFS); } m->m_len = m->m_pkthdr.len = MJUM9BYTES; if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) m_adj(m, ETHER_ALIGN); error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_sparemap, m, segs, &nsegs, 0); if (error != 0) { m_freem(m); return (error); } if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_dmamap[i]); } map = sc->bge_cdata.bge_rx_jumbo_dmamap[i]; sc->bge_cdata.bge_rx_jumbo_dmamap[i] = sc->bge_cdata.bge_rx_jumbo_sparemap; sc->bge_cdata.bge_rx_jumbo_sparemap = map; sc->bge_cdata.bge_rx_jumbo_chain[i] = m; sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = 0; sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = 0; sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = 0; sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = 0; /* * Fill in the extended RX buffer descriptor. */ r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo]; r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END; r->bge_idx = i; r->bge_len3 = r->bge_len2 = r->bge_len1 = 0; switch (nsegs) { case 4: r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr); r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr); r->bge_len3 = segs[3].ds_len; sc->bge_cdata.bge_rx_jumbo_seglen[i][3] = segs[3].ds_len; case 3: r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr); r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr); r->bge_len2 = segs[2].ds_len; sc->bge_cdata.bge_rx_jumbo_seglen[i][2] = segs[2].ds_len; case 2: r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr); r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr); r->bge_len1 = segs[1].ds_len; sc->bge_cdata.bge_rx_jumbo_seglen[i][1] = segs[1].ds_len; case 1: r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr); r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr); r->bge_len0 = segs[0].ds_len; sc->bge_cdata.bge_rx_jumbo_seglen[i][0] = segs[0].ds_len; break; default: panic("%s: %d segments\n", __func__, nsegs); } bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD); return (0); } static int bge_init_rx_ring_std(struct bge_softc *sc) { int error, i; bzero(sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ); sc->bge_std = 0; for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { if ((error = bge_newbuf_std(sc, i)) != 0) return (error); BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); } bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE); sc->bge_std = 0; bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, BGE_STD_RX_RING_CNT - 1); return (0); } static void bge_free_rx_ring_std(struct bge_softc *sc) { int i; for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag, sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag, sc->bge_cdata.bge_rx_std_dmamap[i]); m_freem(sc->bge_cdata.bge_rx_std_chain[i]); sc->bge_cdata.bge_rx_std_chain[i] = NULL; } bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i], sizeof(struct bge_rx_bd)); } } static int bge_init_rx_ring_jumbo(struct bge_softc *sc) { struct bge_rcb *rcb; int error, i; bzero(sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ); sc->bge_jumbo = 0; for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { if ((error = bge_newbuf_jumbo(sc, i)) != 0) return (error); BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); } bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE); sc->bge_jumbo = 0; /* Enable the jumbo receive producer ring. */ rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD); CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, BGE_JUMBO_RX_RING_CNT - 1); return (0); } static void bge_free_rx_ring_jumbo(struct bge_softc *sc) { int i; for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_dmamap[i]); m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; } bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i], sizeof(struct bge_extrx_bd)); } } static void bge_free_tx_ring(struct bge_softc *sc) { int i; if (sc->bge_ldata.bge_tx_ring == NULL) return; for (i = 0; i < BGE_TX_RING_CNT; i++) { if (sc->bge_cdata.bge_tx_chain[i] != NULL) { bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, sc->bge_cdata.bge_tx_dmamap[i], BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, sc->bge_cdata.bge_tx_dmamap[i]); m_freem(sc->bge_cdata.bge_tx_chain[i]); sc->bge_cdata.bge_tx_chain[i] = NULL; } bzero((char *)&sc->bge_ldata.bge_tx_ring[i], sizeof(struct bge_tx_bd)); } } static int bge_init_tx_ring(struct bge_softc *sc) { sc->bge_txcnt = 0; sc->bge_tx_saved_considx = 0; bzero(sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ); bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE); /* Initialize transmit producer index for host-memory send ring. */ sc->bge_tx_prodidx = 0; bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); /* 5700 b2 errata */ if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); /* NIC-memory send ring not used; initialize to zero. */ bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); /* 5700 b2 errata */ if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); return (0); } static void bge_setpromisc(struct bge_softc *sc) { if_t ifp; BGE_LOCK_ASSERT(sc); ifp = sc->bge_ifp; /* Enable or disable promiscuous mode as needed. */ if (if_getflags(ifp) & IFF_PROMISC) BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); else BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); } static u_int bge_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { uint32_t *hashes = arg; int h; h = ether_crc32_le(LLADDR(sdl), ETHER_ADDR_LEN) & 0x7F; hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); return (1); } static void bge_setmulti(struct bge_softc *sc) { if_t ifp; uint32_t hashes[4] = { 0, 0, 0, 0 }; int i; BGE_LOCK_ASSERT(sc); ifp = sc->bge_ifp; if (if_getflags(ifp) & IFF_ALLMULTI || if_getflags(ifp) & IFF_PROMISC) { for (i = 0; i < 4; i++) CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF); return; } /* First, zot all the existing filters. */ for (i = 0; i < 4; i++) CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0); if_foreach_llmaddr(ifp, bge_hash_maddr, hashes); for (i = 0; i < 4; i++) CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); } static void bge_setvlan(struct bge_softc *sc) { if_t ifp; BGE_LOCK_ASSERT(sc); ifp = sc->bge_ifp; /* Enable or disable VLAN tag stripping as needed. */ if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG); else BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG); } static void bge_sig_pre_reset(struct bge_softc *sc, int type) { /* * Some chips don't like this so only do this if ASF is enabled */ if (sc->bge_asf_mode) bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC); if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { switch (type) { case BGE_RESET_START: bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, BGE_FW_DRV_STATE_START); break; case BGE_RESET_SHUTDOWN: bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, BGE_FW_DRV_STATE_UNLOAD); break; case BGE_RESET_SUSPEND: bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, BGE_FW_DRV_STATE_SUSPEND); break; } } if (type == BGE_RESET_START || type == BGE_RESET_SUSPEND) bge_ape_driver_state_change(sc, type); } static void bge_sig_post_reset(struct bge_softc *sc, int type) { if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { switch (type) { case BGE_RESET_START: bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, BGE_FW_DRV_STATE_START_DONE); /* START DONE */ break; case BGE_RESET_SHUTDOWN: bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, BGE_FW_DRV_STATE_UNLOAD_DONE); break; } } if (type == BGE_RESET_SHUTDOWN) bge_ape_driver_state_change(sc, type); } static void bge_sig_legacy(struct bge_softc *sc, int type) { if (sc->bge_asf_mode) { switch (type) { case BGE_RESET_START: bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, BGE_FW_DRV_STATE_START); break; case BGE_RESET_SHUTDOWN: bge_writemem_ind(sc, BGE_SRAM_FW_DRV_STATE_MB, BGE_FW_DRV_STATE_UNLOAD); break; } } } static void bge_stop_fw(struct bge_softc *sc) { int i; if (sc->bge_asf_mode) { bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, BGE_FW_CMD_PAUSE); CSR_WRITE_4(sc, BGE_RX_CPU_EVENT, CSR_READ_4(sc, BGE_RX_CPU_EVENT) | BGE_RX_CPU_DRV_EVENT); for (i = 0; i < 100; i++ ) { if (!(CSR_READ_4(sc, BGE_RX_CPU_EVENT) & BGE_RX_CPU_DRV_EVENT)) break; DELAY(10); } } } static uint32_t bge_dma_swap_options(struct bge_softc *sc) { uint32_t dma_options; dma_options = BGE_MODECTL_WORDSWAP_NONFRAME | BGE_MODECTL_BYTESWAP_DATA | BGE_MODECTL_WORDSWAP_DATA; #if BYTE_ORDER == BIG_ENDIAN dma_options |= BGE_MODECTL_BYTESWAP_NONFRAME; #endif return (dma_options); } /* * Do endian, PCI and DMA initialization. */ static int bge_chipinit(struct bge_softc *sc) { uint32_t dma_rw_ctl, misc_ctl, mode_ctl; uint16_t val; int i; /* Set endianness before we access any non-PCI registers. */ misc_ctl = BGE_INIT; if (sc->bge_flags & BGE_FLAG_TAGGED_STATUS) misc_ctl |= BGE_PCIMISCCTL_TAGGED_STATUS; pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, misc_ctl, 4); /* * Clear the MAC statistics block in the NIC's * internal memory. */ for (i = BGE_STATS_BLOCK; i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t)) BGE_MEMWIN_WRITE(sc, i, 0); for (i = BGE_STATUS_BLOCK; i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t)) BGE_MEMWIN_WRITE(sc, i, 0); if (sc->bge_chiprev == BGE_CHIPREV_5704_BX) { /* * Fix data corruption caused by non-qword write with WB. * Fix master abort in PCI mode. * Fix PCI latency timer. */ val = pci_read_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, 2); val |= (1 << 10) | (1 << 12) | (1 << 13); pci_write_config(sc->bge_dev, BGE_PCI_MSI_DATA + 2, val, 2); } if (sc->bge_asicrev == BGE_ASICREV_BCM57765 || sc->bge_asicrev == BGE_ASICREV_BCM57766) { /* * For the 57766 and non Ax versions of 57765, bootcode * needs to setup the PCIE Fast Training Sequence (FTS) * value to prevent transmit hangs. */ if (sc->bge_chiprev != BGE_CHIPREV_57765_AX) { CSR_WRITE_4(sc, BGE_CPMU_PADRNG_CTL, CSR_READ_4(sc, BGE_CPMU_PADRNG_CTL) | BGE_CPMU_PADRNG_CTL_RDIV2); } } /* * Set up the PCI DMA control register. */ dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) | BGE_PCIDMARWCTL_WR_CMD_SHIFT(7); if (sc->bge_flags & BGE_FLAG_PCIE) { if (sc->bge_mps >= 256) dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(7); else dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); } else if (sc->bge_flags & BGE_FLAG_PCIX) { if (BGE_IS_5714_FAMILY(sc)) { /* 256 bytes for read and write. */ dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) | BGE_PCIDMARWCTL_WR_WAT_SHIFT(2); dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ? BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL : BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL; } else if (sc->bge_asicrev == BGE_ASICREV_BCM5703) { /* * In the BCM5703, the DMA read watermark should * be set to less than or equal to the maximum * memory read byte count of the PCI-X command * register. */ dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(4) | BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { /* 1536 bytes for read, 384 bytes for write. */ dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) | BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); } else { /* 384 bytes for read and write. */ dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) | BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) | 0x0F; } if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || sc->bge_asicrev == BGE_ASICREV_BCM5704) { uint32_t tmp; /* Set ONE_DMA_AT_ONCE for hardware workaround. */ tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F; if (tmp == 6 || tmp == 7) dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL; /* Set PCI-X DMA write workaround. */ dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE; } } else { /* Conventional PCI bus: 256 bytes for read and write. */ dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) | BGE_PCIDMARWCTL_WR_WAT_SHIFT(7); if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && sc->bge_asicrev != BGE_ASICREV_BCM5750) dma_rw_ctl |= 0x0F; } if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || sc->bge_asicrev == BGE_ASICREV_BCM5701) dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM | BGE_PCIDMARWCTL_ASRT_ALL_BE; if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || sc->bge_asicrev == BGE_ASICREV_BCM5704) dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA; if (BGE_IS_5717_PLUS(sc)) { dma_rw_ctl &= ~BGE_PCIDMARWCTL_DIS_CACHE_ALIGNMENT; if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) dma_rw_ctl &= ~BGE_PCIDMARWCTL_CRDRDR_RDMA_MRRS_MSK; /* * Enable HW workaround for controllers that misinterpret * a status tag update and leave interrupts permanently * disabled. */ if (!BGE_IS_57765_PLUS(sc) && sc->bge_asicrev != BGE_ASICREV_BCM5717 && sc->bge_asicrev != BGE_ASICREV_BCM5762) dma_rw_ctl |= BGE_PCIDMARWCTL_TAGGED_STATUS_WA; } pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4); /* * Set up general mode register. */ mode_ctl = bge_dma_swap_options(sc); if (sc->bge_asicrev == BGE_ASICREV_BCM5720 || sc->bge_asicrev == BGE_ASICREV_BCM5762) { /* Retain Host-2-BMC settings written by APE firmware. */ mode_ctl |= CSR_READ_4(sc, BGE_MODE_CTL) & (BGE_MODECTL_BYTESWAP_B2HRX_DATA | BGE_MODECTL_WORDSWAP_B2HRX_DATA | BGE_MODECTL_B2HRX_ENABLE | BGE_MODECTL_HTX2B_ENABLE); } mode_ctl |= BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS | BGE_MODECTL_TX_NO_PHDR_CSUM; /* * BCM5701 B5 have a bug causing data corruption when using * 64-bit DMA reads, which can be terminated early and then * completed later as 32-bit accesses, in combination with * certain bridges. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5701 && sc->bge_chipid == BGE_CHIPID_BCM5701_B5) mode_ctl |= BGE_MODECTL_FORCE_PCI32; /* * Tell the firmware the driver is running */ if (sc->bge_asf_mode & ASF_STACKUP) mode_ctl |= BGE_MODECTL_STACKUP; CSR_WRITE_4(sc, BGE_MODE_CTL, mode_ctl); /* * Disable memory write invalidate. Apparently it is not supported * properly by these devices. */ PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4); /* Set the timer prescaler (always 66 MHz). */ CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ); /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { DELAY(40); /* XXX */ /* Put PHY into ready state */ BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ); CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */ DELAY(40); } return (0); } static int bge_blockinit(struct bge_softc *sc) { struct bge_rcb *rcb; bus_size_t vrcb; bge_hostaddr taddr; uint32_t dmactl, rdmareg, val; int i, limit; /* * Initialize the memory window pointer register so that * we can access the first 32K of internal NIC RAM. This will * allow us to set up the TX send ring RCBs and the RX return * ring RCBs, plus other things which live in NIC memory. */ CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); /* Note: the BCM5704 has a smaller mbuf space than other chips. */ if (!(BGE_IS_5705_PLUS(sc))) { /* Configure mbuf memory pool */ CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1); if (sc->bge_asicrev == BGE_ASICREV_BCM5704) CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); else CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); /* Configure DMA resource pool */ CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, BGE_DMA_DESCRIPTORS); CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); } /* Configure mbuf pool watermarks */ if (BGE_IS_5717_PLUS(sc)) { CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); if (if_getmtu(sc->bge_ifp) > ETHERMTU) { CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x7e); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xea); } else { CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x2a); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0xa0); } } else if (!BGE_IS_5705_PLUS(sc)) { CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10); } else { CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); } /* Configure DMA resource watermarks */ CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); /* Enable buffer manager */ val = BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN; /* * Change the arbitration algorithm of TXMBUF read request to * round-robin instead of priority based for BCM5719. When * TXFIFO is almost empty, RDMA will hold its request until * TXFIFO is not almost empty. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5719) val |= BGE_BMANMODE_NO_TX_UNDERRUN; CSR_WRITE_4(sc, BGE_BMAN_MODE, val); /* Poll for buffer manager start indication */ for (i = 0; i < BGE_TIMEOUT; i++) { DELAY(10); if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) break; } if (i == BGE_TIMEOUT) { device_printf(sc->bge_dev, "buffer manager failed to start\n"); return (ENXIO); } /* Enable flow-through queues */ CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); /* Wait until queue initialization is complete */ for (i = 0; i < BGE_TIMEOUT; i++) { DELAY(10); if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) break; } if (i == BGE_TIMEOUT) { device_printf(sc->bge_dev, "flow-through queue init failed\n"); return (ENXIO); } /* * Summary of rings supported by the controller: * * Standard Receive Producer Ring * - This ring is used to feed receive buffers for "standard" * sized frames (typically 1536 bytes) to the controller. * * Jumbo Receive Producer Ring * - This ring is used to feed receive buffers for jumbo sized * frames (i.e. anything bigger than the "standard" frames) * to the controller. * * Mini Receive Producer Ring * - This ring is used to feed receive buffers for "mini" * sized frames to the controller. * - This feature required external memory for the controller * but was never used in a production system. Should always * be disabled. * * Receive Return Ring * - After the controller has placed an incoming frame into a * receive buffer that buffer is moved into a receive return * ring. The driver is then responsible to passing the * buffer up to the stack. Many versions of the controller * support multiple RR rings. * * Send Ring * - This ring is used for outgoing frames. Many versions of * the controller support multiple send rings. */ /* Initialize the standard receive producer ring control block. */ rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb; rcb->bge_hostaddr.bge_addr_lo = BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr); rcb->bge_hostaddr.bge_addr_hi = BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr); bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD); if (BGE_IS_5717_PLUS(sc)) { /* * Bits 31-16: Programmable ring size (2048, 1024, 512, .., 32) * Bits 15-2 : Maximum RX frame size * Bit 1 : 1 = Ring Disabled, 0 = Ring ENabled * Bit 0 : Reserved */ rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, BGE_MAX_FRAMELEN << 2); } else if (BGE_IS_5705_PLUS(sc)) { /* * Bits 31-16: Programmable ring size (512, 256, 128, 64, 32) * Bits 15-2 : Reserved (should be 0) * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled * Bit 0 : Reserved */ rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); } else { /* * Ring size is always XXX entries * Bits 31-16: Maximum RX frame size * Bits 15-2 : Reserved (should be 0) * Bit 1 : 1 = Ring Disabled, 0 = Ring Enabled * Bit 0 : Reserved */ rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); } if (sc->bge_asicrev == BGE_ASICREV_BCM5717 || sc->bge_asicrev == BGE_ASICREV_BCM5719 || sc->bge_asicrev == BGE_ASICREV_BCM5720) rcb->bge_nicaddr = BGE_STD_RX_RINGS_5717; else rcb->bge_nicaddr = BGE_STD_RX_RINGS; /* Write the standard receive producer ring control block. */ CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); /* Reset the standard receive producer ring producer index. */ bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0); /* * Initialize the jumbo RX producer ring control * block. We set the 'ring disabled' bit in the * flags field until we're actually ready to start * using this ring (i.e. once we set the MTU * high enough to require it). */ if (BGE_IS_JUMBO_CAPABLE(sc)) { rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; /* Get the jumbo receive producer ring RCB parameters. */ rcb->bge_hostaddr.bge_addr_lo = BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr); rcb->bge_hostaddr.bge_addr_hi = BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr); bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREREAD); rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED); if (sc->bge_asicrev == BGE_ASICREV_BCM5717 || sc->bge_asicrev == BGE_ASICREV_BCM5719 || sc->bge_asicrev == BGE_ASICREV_BCM5720) rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS_5717; else rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); /* Program the jumbo receive producer ring RCB parameters. */ CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); /* Reset the jumbo receive producer ring producer index. */ bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); } /* Disable the mini receive producer ring RCB. */ if (BGE_IS_5700_FAMILY(sc)) { rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb; rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED); CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); /* Reset the mini receive producer ring producer index. */ bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0); } /* Choose de-pipeline mode for BCM5906 A0, A1 and A2. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { if (sc->bge_chipid == BGE_CHIPID_BCM5906_A0 || sc->bge_chipid == BGE_CHIPID_BCM5906_A1 || sc->bge_chipid == BGE_CHIPID_BCM5906_A2) CSR_WRITE_4(sc, BGE_ISO_PKT_TX, (CSR_READ_4(sc, BGE_ISO_PKT_TX) & ~3) | 2); } /* * The BD ring replenish thresholds control how often the * hardware fetches new BD's from the producer rings in host * memory. Setting the value too low on a busy system can * starve the hardware and recue the throughpout. * * Set the BD ring replentish thresholds. The recommended * values are 1/8th the number of descriptors allocated to * each ring. * XXX The 5754 requires a lower threshold, so it might be a * requirement of all 575x family chips. The Linux driver sets * the lower threshold for all 5705 family chips as well, but there * are reports that it might not need to be so strict. * * XXX Linux does some extra fiddling here for the 5906 parts as * well. */ if (BGE_IS_5705_PLUS(sc)) val = 8; else val = BGE_STD_RX_RING_CNT / 8; CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val); if (BGE_IS_JUMBO_CAPABLE(sc)) CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); if (BGE_IS_5717_PLUS(sc)) { CSR_WRITE_4(sc, BGE_STD_REPLENISH_LWM, 32); CSR_WRITE_4(sc, BGE_JMB_REPLENISH_LWM, 16); } /* * Disable all send rings by setting the 'ring disabled' bit * in the flags field of all the TX send ring control blocks, * located in NIC memory. */ if (!BGE_IS_5705_PLUS(sc)) /* 5700 to 5704 had 16 send rings. */ limit = BGE_TX_RINGS_EXTSSRAM_MAX; else if (BGE_IS_57765_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5762) limit = 2; else if (BGE_IS_5717_PLUS(sc)) limit = 4; else limit = 1; vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; for (i = 0; i < limit; i++) { RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); vrcb += sizeof(struct bge_rcb); } /* Configure send ring RCB 0 (we use only the first ring) */ vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr); RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); if (sc->bge_asicrev == BGE_ASICREV_BCM5717 || sc->bge_asicrev == BGE_ASICREV_BCM5719 || sc->bge_asicrev == BGE_ASICREV_BCM5720) RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_SEND_RING_5717); else RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); /* * Disable all receive return rings by setting the * 'ring diabled' bit in the flags field of all the receive * return ring control blocks, located in NIC memory. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5717 || sc->bge_asicrev == BGE_ASICREV_BCM5719 || sc->bge_asicrev == BGE_ASICREV_BCM5720) { /* Should be 17, use 16 until we get an SRAM map. */ limit = 16; } else if (!BGE_IS_5705_PLUS(sc)) limit = BGE_RX_RINGS_MAX; else if (sc->bge_asicrev == BGE_ASICREV_BCM5755 || sc->bge_asicrev == BGE_ASICREV_BCM5762 || BGE_IS_57765_PLUS(sc)) limit = 4; else limit = 1; /* Disable all receive return rings. */ vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; for (i = 0; i < limit; i++) { RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0); RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0); RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, BGE_RCB_FLAG_RING_DISABLED); RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); bge_writembx(sc, BGE_MBX_RX_CONS0_LO + (i * (sizeof(uint64_t))), 0); vrcb += sizeof(struct bge_rcb); } /* * Set up receive return ring 0. Note that the NIC address * for RX return rings is 0x0. The return rings live entirely * within the host, so the nicaddr field in the RCB isn't used. */ vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr); RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); /* Set random backoff seed for TX */ CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, (IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] + IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] + IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5]) & BGE_TX_BACKOFF_SEED_MASK); /* Set inter-packet gap */ val = 0x2620; if (sc->bge_asicrev == BGE_ASICREV_BCM5720 || sc->bge_asicrev == BGE_ASICREV_BCM5762) val |= CSR_READ_4(sc, BGE_TX_LENGTHS) & (BGE_TXLEN_JMB_FRM_LEN_MSK | BGE_TXLEN_CNT_DN_VAL_MSK); CSR_WRITE_4(sc, BGE_TX_LENGTHS, val); /* * Specify which ring to use for packets that don't match * any RX rules. */ CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); /* * Configure number of RX lists. One interrupt distribution * list, sixteen active lists, one bad frames class. */ CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); /* Inialize RX list placement stats mask. */ CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); /* Disable host coalescing until we get it set up */ CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); /* Poll to make sure it's shut down. */ for (i = 0; i < BGE_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) break; } if (i == BGE_TIMEOUT) { device_printf(sc->bge_dev, "host coalescing engine failed to idle\n"); return (ENXIO); } /* Set up host coalescing defaults */ CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); if (!(BGE_IS_5705_PLUS(sc))) { CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); } CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1); CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1); /* Set up address of statistics block */ if (!(BGE_IS_5705_PLUS(sc))) { CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr)); CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr)); CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); } /* Set up address of status block */ CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr)); CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr)); /* Set up status block size. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && sc->bge_chipid != BGE_CHIPID_BCM5700_C0) { val = BGE_STATBLKSZ_FULL; bzero(sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ); } else { val = BGE_STATBLKSZ_32BYTE; bzero(sc->bge_ldata.bge_status_block, 32); } bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Turn on host coalescing state machine */ CSR_WRITE_4(sc, BGE_HCC_MODE, val | BGE_HCCMODE_ENABLE); /* Turn on RX BD completion state machine and enable attentions */ CSR_WRITE_4(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN); /* Turn on RX list placement state machine */ CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); /* Turn on RX list selector state machine. */ if (!(BGE_IS_5705_PLUS(sc))) CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); /* Turn on DMA, clear stats. */ val = BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB | BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR | BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB | BGE_MACMODE_FRMHDR_DMA_ENB; if (sc->bge_flags & BGE_FLAG_TBI) val |= BGE_PORTMODE_TBI; else if (sc->bge_flags & BGE_FLAG_MII_SERDES) val |= BGE_PORTMODE_GMII; else val |= BGE_PORTMODE_MII; /* Allow APE to send/receive frames. */ if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) val |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN; CSR_WRITE_4(sc, BGE_MAC_MODE, val); DELAY(40); /* Set misc. local control, enable interrupts on attentions */ BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); #ifdef notdef /* Assert GPIO pins for PHY reset */ BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 | BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2); BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 | BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2); #endif /* Turn on DMA completion state machine */ if (!(BGE_IS_5705_PLUS(sc))) CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS; /* Enable host coalescing bug fix. */ if (BGE_IS_5755_PLUS(sc)) val |= BGE_WDMAMODE_STATUS_TAG_FIX; /* Request larger DMA burst size to get better performance. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5785) val |= BGE_WDMAMODE_BURST_ALL_DATA; /* Turn on write DMA state machine */ CSR_WRITE_4(sc, BGE_WDMA_MODE, val); DELAY(40); /* Turn on read DMA state machine */ val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS; if (sc->bge_asicrev == BGE_ASICREV_BCM5717) val |= BGE_RDMAMODE_MULT_DMA_RD_DIS; if (sc->bge_asicrev == BGE_ASICREV_BCM5784 || sc->bge_asicrev == BGE_ASICREV_BCM5785 || sc->bge_asicrev == BGE_ASICREV_BCM57780) val |= BGE_RDMAMODE_BD_SBD_CRPT_ATTN | BGE_RDMAMODE_MBUF_RBD_CRPT_ATTN | BGE_RDMAMODE_MBUF_SBD_CRPT_ATTN; if (sc->bge_flags & BGE_FLAG_PCIE) val |= BGE_RDMAMODE_FIFO_LONG_BURST; if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) { val |= BGE_RDMAMODE_TSO4_ENABLE; if (sc->bge_flags & BGE_FLAG_TSO3 || sc->bge_asicrev == BGE_ASICREV_BCM5785 || sc->bge_asicrev == BGE_ASICREV_BCM57780) val |= BGE_RDMAMODE_TSO6_ENABLE; } if (sc->bge_asicrev == BGE_ASICREV_BCM5720 || sc->bge_asicrev == BGE_ASICREV_BCM5762) { val |= CSR_READ_4(sc, BGE_RDMA_MODE) & BGE_RDMAMODE_H2BNC_VLAN_DET; /* * Allow multiple outstanding read requests from * non-LSO read DMA engine. */ val &= ~BGE_RDMAMODE_MULT_DMA_RD_DIS; } if (sc->bge_asicrev == BGE_ASICREV_BCM5761 || sc->bge_asicrev == BGE_ASICREV_BCM5784 || sc->bge_asicrev == BGE_ASICREV_BCM5785 || sc->bge_asicrev == BGE_ASICREV_BCM57780 || BGE_IS_5717_PLUS(sc) || BGE_IS_57765_PLUS(sc)) { if (sc->bge_asicrev == BGE_ASICREV_BCM5762) rdmareg = BGE_RDMA_RSRVCTRL_REG2; else rdmareg = BGE_RDMA_RSRVCTRL; dmactl = CSR_READ_4(sc, rdmareg); /* * Adjust tx margin to prevent TX data corruption and * fix internal FIFO overflow. */ if (sc->bge_chipid == BGE_CHIPID_BCM5719_A0 || sc->bge_asicrev == BGE_ASICREV_BCM5762) { dmactl &= ~(BGE_RDMA_RSRVCTRL_FIFO_LWM_MASK | BGE_RDMA_RSRVCTRL_FIFO_HWM_MASK | BGE_RDMA_RSRVCTRL_TXMRGN_MASK); dmactl |= BGE_RDMA_RSRVCTRL_FIFO_LWM_1_5K | BGE_RDMA_RSRVCTRL_FIFO_HWM_1_5K | BGE_RDMA_RSRVCTRL_TXMRGN_320B; } /* * Enable fix for read DMA FIFO overruns. * The fix is to limit the number of RX BDs * the hardware would fetch at a fime. */ CSR_WRITE_4(sc, rdmareg, dmactl | BGE_RDMA_RSRVCTRL_FIFO_OFLW_FIX); } if (sc->bge_asicrev == BGE_ASICREV_BCM5719) { CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); } else if (sc->bge_asicrev == BGE_ASICREV_BCM5720) { /* * Allow 4KB burst length reads for non-LSO frames. * Enable 512B burst length reads for buffer descriptors. */ CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL) | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_512 | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); } else if (sc->bge_asicrev == BGE_ASICREV_BCM5762) { CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2, CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL_REG2) | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_BD_4K | BGE_RDMA_LSO_CRPTEN_CTRL_BLEN_LSO_4K); } CSR_WRITE_4(sc, BGE_RDMA_MODE, val); DELAY(40); if (sc->bge_flags & BGE_FLAG_RDMA_BUG) { for (i = 0; i < BGE_NUM_RDMA_CHANNELS / 2; i++) { val = CSR_READ_4(sc, BGE_RDMA_LENGTH + i * 4); if ((val & 0xFFFF) > BGE_FRAMELEN) break; if (((val >> 16) & 0xFFFF) > BGE_FRAMELEN) break; } if (i != BGE_NUM_RDMA_CHANNELS / 2) { val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL); if (sc->bge_asicrev == BGE_ASICREV_BCM5719) val |= BGE_RDMA_TX_LENGTH_WA_5719; else val |= BGE_RDMA_TX_LENGTH_WA_5720; CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val); } } /* Turn on RX data completion state machine */ CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); /* Turn on RX BD initiator state machine */ CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); /* Turn on RX data and RX BD initiator state machine */ CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); /* Turn on Mbuf cluster free state machine */ if (!(BGE_IS_5705_PLUS(sc))) CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); /* Turn on send BD completion state machine */ CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); /* Turn on send data completion state machine */ val = BGE_SDCMODE_ENABLE; if (sc->bge_asicrev == BGE_ASICREV_BCM5761) val |= BGE_SDCMODE_CDELAY; CSR_WRITE_4(sc, BGE_SDC_MODE, val); /* Turn on send data initiator state machine */ if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE | BGE_SDIMODE_HW_LSO_PRE_DMA); else CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); /* Turn on send BD initiator state machine */ CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); /* Turn on send BD selector state machine */ CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER); /* ack/clear link change events */ CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | BGE_MACSTAT_LINK_CHANGED); CSR_WRITE_4(sc, BGE_MI_STS, 0); /* * Enable attention when the link has changed state for * devices that use auto polling. */ if (sc->bge_flags & BGE_FLAG_TBI) { CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); } else { if (sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) { CSR_WRITE_4(sc, BGE_MI_MODE, sc->bge_mi_mode); DELAY(80); } if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && sc->bge_chipid != BGE_CHIPID_BCM5700_B2) CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_MI_INTERRUPT); } /* * Clear any pending link state attention. * Otherwise some link state change events may be lost until attention * is cleared by bge_intr() -> bge_link_upd() sequence. * It's not necessary on newer BCM chips - perhaps enabling link * state change attentions implies clearing pending attention. */ CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | BGE_MACSTAT_LINK_CHANGED); /* Enable link state change attentions. */ BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); return (0); } static const struct bge_revision * bge_lookup_rev(uint32_t chipid) { const struct bge_revision *br; for (br = bge_revisions; br->br_name != NULL; br++) { if (br->br_chipid == chipid) return (br); } for (br = bge_majorrevs; br->br_name != NULL; br++) { if (br->br_chipid == BGE_ASICREV(chipid)) return (br); } return (NULL); } static const struct bge_vendor * bge_lookup_vendor(uint16_t vid) { const struct bge_vendor *v; for (v = bge_vendors; v->v_name != NULL; v++) if (v->v_id == vid) return (v); return (NULL); } static uint32_t bge_chipid(device_t dev) { uint32_t id; id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> BGE_PCIMISCCTL_ASICREV_SHIFT; if (BGE_ASICREV(id) == BGE_ASICREV_USE_PRODID_REG) { /* * Find the ASCI revision. Different chips use different * registers. */ switch (pci_get_device(dev)) { case BCOM_DEVICEID_BCM5717C: /* 5717 C0 seems to belong to 5720 line. */ id = BGE_CHIPID_BCM5720_A0; break; case BCOM_DEVICEID_BCM5717: case BCOM_DEVICEID_BCM5718: case BCOM_DEVICEID_BCM5719: case BCOM_DEVICEID_BCM5720: case BCOM_DEVICEID_BCM5725: case BCOM_DEVICEID_BCM5727: case BCOM_DEVICEID_BCM5762: case BCOM_DEVICEID_BCM57764: case BCOM_DEVICEID_BCM57767: case BCOM_DEVICEID_BCM57787: id = pci_read_config(dev, BGE_PCI_GEN2_PRODID_ASICREV, 4); break; case BCOM_DEVICEID_BCM57761: case BCOM_DEVICEID_BCM57762: case BCOM_DEVICEID_BCM57765: case BCOM_DEVICEID_BCM57766: case BCOM_DEVICEID_BCM57781: case BCOM_DEVICEID_BCM57782: case BCOM_DEVICEID_BCM57785: case BCOM_DEVICEID_BCM57786: case BCOM_DEVICEID_BCM57791: case BCOM_DEVICEID_BCM57795: id = pci_read_config(dev, BGE_PCI_GEN15_PRODID_ASICREV, 4); break; default: id = pci_read_config(dev, BGE_PCI_PRODID_ASICREV, 4); } } return (id); } /* * Probe for a Broadcom chip. Check the PCI vendor and device IDs * against our list and return its name if we find a match. * * Note that since the Broadcom controller contains VPD support, we * try to get the device name string from the controller itself instead * of the compiled-in string. It guarantees we'll always announce the * right product name. We fall back to the compiled-in string when * VPD is unavailable or corrupt. */ static int bge_probe(device_t dev) { char buf[96]; char model[64]; const struct bge_revision *br; const char *pname; struct bge_softc *sc; const struct bge_type *t = bge_devs; const struct bge_vendor *v; uint32_t id; uint16_t did, vid; sc = device_get_softc(dev); sc->bge_dev = dev; vid = pci_get_vendor(dev); did = pci_get_device(dev); while(t->bge_vid != 0) { if ((vid == t->bge_vid) && (did == t->bge_did)) { id = bge_chipid(dev); br = bge_lookup_rev(id); if (bge_has_eaddr(sc) && pci_get_vpd_ident(dev, &pname) == 0) snprintf(model, sizeof(model), "%s", pname); else { v = bge_lookup_vendor(vid); snprintf(model, sizeof(model), "%s %s", v != NULL ? v->v_name : "Unknown", br != NULL ? br->br_name : "NetXtreme/NetLink Ethernet Controller"); } snprintf(buf, sizeof(buf), "%s, %sASIC rev. %#08x", model, br != NULL ? "" : "unknown ", id); device_set_desc_copy(dev, buf); return (BUS_PROBE_DEFAULT); } t++; } return (ENXIO); } static void bge_dma_free(struct bge_softc *sc) { int i; /* Destroy DMA maps for RX buffers. */ for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { if (sc->bge_cdata.bge_rx_std_dmamap[i]) bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag, sc->bge_cdata.bge_rx_std_dmamap[i]); } if (sc->bge_cdata.bge_rx_std_sparemap) bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag, sc->bge_cdata.bge_rx_std_sparemap); /* Destroy DMA maps for jumbo RX buffers. */ for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { if (sc->bge_cdata.bge_rx_jumbo_dmamap[i]) bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_dmamap[i]); } if (sc->bge_cdata.bge_rx_jumbo_sparemap) bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_sparemap); /* Destroy DMA maps for TX buffers. */ for (i = 0; i < BGE_TX_RING_CNT; i++) { if (sc->bge_cdata.bge_tx_dmamap[i]) bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag, sc->bge_cdata.bge_tx_dmamap[i]); } if (sc->bge_cdata.bge_rx_mtag) bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag); if (sc->bge_cdata.bge_mtag_jumbo) bus_dma_tag_destroy(sc->bge_cdata.bge_mtag_jumbo); if (sc->bge_cdata.bge_tx_mtag) bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag); /* Destroy standard RX ring. */ if (sc->bge_ldata.bge_rx_std_ring_paddr) bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_cdata.bge_rx_std_ring_map); if (sc->bge_ldata.bge_rx_std_ring) bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_ldata.bge_rx_std_ring, sc->bge_cdata.bge_rx_std_ring_map); if (sc->bge_cdata.bge_rx_std_ring_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag); /* Destroy jumbo RX ring. */ if (sc->bge_ldata.bge_rx_jumbo_ring_paddr) bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_cdata.bge_rx_jumbo_ring_map); if (sc->bge_ldata.bge_rx_jumbo_ring) bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_ldata.bge_rx_jumbo_ring, sc->bge_cdata.bge_rx_jumbo_ring_map); if (sc->bge_cdata.bge_rx_jumbo_ring_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag); /* Destroy RX return ring. */ if (sc->bge_ldata.bge_rx_return_ring_paddr) bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag, sc->bge_cdata.bge_rx_return_ring_map); if (sc->bge_ldata.bge_rx_return_ring) bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag, sc->bge_ldata.bge_rx_return_ring, sc->bge_cdata.bge_rx_return_ring_map); if (sc->bge_cdata.bge_rx_return_ring_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag); /* Destroy TX ring. */ if (sc->bge_ldata.bge_tx_ring_paddr) bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag, sc->bge_cdata.bge_tx_ring_map); if (sc->bge_ldata.bge_tx_ring) bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag, sc->bge_ldata.bge_tx_ring, sc->bge_cdata.bge_tx_ring_map); if (sc->bge_cdata.bge_tx_ring_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag); /* Destroy status block. */ if (sc->bge_ldata.bge_status_block_paddr) bus_dmamap_unload(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map); if (sc->bge_ldata.bge_status_block) bus_dmamem_free(sc->bge_cdata.bge_status_tag, sc->bge_ldata.bge_status_block, sc->bge_cdata.bge_status_map); if (sc->bge_cdata.bge_status_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag); /* Destroy statistics block. */ if (sc->bge_ldata.bge_stats_paddr) bus_dmamap_unload(sc->bge_cdata.bge_stats_tag, sc->bge_cdata.bge_stats_map); if (sc->bge_ldata.bge_stats) bus_dmamem_free(sc->bge_cdata.bge_stats_tag, sc->bge_ldata.bge_stats, sc->bge_cdata.bge_stats_map); if (sc->bge_cdata.bge_stats_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag); if (sc->bge_cdata.bge_buffer_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_buffer_tag); /* Destroy the parent tag. */ if (sc->bge_cdata.bge_parent_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag); } static int bge_dma_ring_alloc(struct bge_softc *sc, bus_size_t alignment, bus_size_t maxsize, bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map, bus_addr_t *paddr, const char *msg) { struct bge_dmamap_arg ctx; bus_addr_t lowaddr; bus_size_t ring_end; int error; lowaddr = BUS_SPACE_MAXADDR; again: error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, alignment, 0, lowaddr, BUS_SPACE_MAXADDR, NULL, NULL, maxsize, 1, maxsize, 0, NULL, NULL, tag); if (error != 0) { device_printf(sc->bge_dev, "could not create %s dma tag\n", msg); return (ENOMEM); } /* Allocate DMA'able memory for ring. */ error = bus_dmamem_alloc(*tag, (void **)ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map); if (error != 0) { device_printf(sc->bge_dev, "could not allocate DMA'able memory for %s\n", msg); return (ENOMEM); } /* Load the address of the ring. */ ctx.bge_busaddr = 0; error = bus_dmamap_load(*tag, *map, *ring, maxsize, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->bge_dev, "could not load DMA'able memory for %s\n", msg); return (ENOMEM); } *paddr = ctx.bge_busaddr; ring_end = *paddr + maxsize; if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0 && BGE_ADDR_HI(*paddr) != BGE_ADDR_HI(ring_end)) { /* * 4GB boundary crossed. Limit maximum allowable DMA * address space to 32bit and try again. */ bus_dmamap_unload(*tag, *map); bus_dmamem_free(*tag, *ring, *map); bus_dma_tag_destroy(*tag); if (bootverbose) device_printf(sc->bge_dev, "4GB boundary crossed, " "limit DMA address space to 32bit for %s\n", msg); *ring = NULL; *tag = NULL; *map = NULL; lowaddr = BUS_SPACE_MAXADDR_32BIT; goto again; } return (0); } static int bge_dma_alloc(struct bge_softc *sc) { bus_addr_t lowaddr; bus_size_t boundary, sbsz, rxmaxsegsz, txsegsz, txmaxsegsz; int i, error; lowaddr = BUS_SPACE_MAXADDR; if ((sc->bge_flags & BGE_FLAG_40BIT_BUG) != 0) lowaddr = BGE_DMA_MAXADDR; /* * Allocate the parent bus DMA tag appropriate for PCI. */ error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev), 1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->bge_cdata.bge_parent_tag); if (error != 0) { device_printf(sc->bge_dev, "could not allocate parent dma tag\n"); return (ENOMEM); } /* Create tag for standard RX ring. */ error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STD_RX_RING_SZ, &sc->bge_cdata.bge_rx_std_ring_tag, (uint8_t **)&sc->bge_ldata.bge_rx_std_ring, &sc->bge_cdata.bge_rx_std_ring_map, &sc->bge_ldata.bge_rx_std_ring_paddr, "RX ring"); if (error) return (error); /* Create tag for RX return ring. */ error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_RX_RTN_RING_SZ(sc), &sc->bge_cdata.bge_rx_return_ring_tag, (uint8_t **)&sc->bge_ldata.bge_rx_return_ring, &sc->bge_cdata.bge_rx_return_ring_map, &sc->bge_ldata.bge_rx_return_ring_paddr, "RX return ring"); if (error) return (error); /* Create tag for TX ring. */ error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_TX_RING_SZ, &sc->bge_cdata.bge_tx_ring_tag, (uint8_t **)&sc->bge_ldata.bge_tx_ring, &sc->bge_cdata.bge_tx_ring_map, &sc->bge_ldata.bge_tx_ring_paddr, "TX ring"); if (error) return (error); /* * Create tag for status block. * Because we only use single Tx/Rx/Rx return ring, use * minimum status block size except BCM5700 AX/BX which * seems to want to see full status block size regardless * of configured number of ring. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && sc->bge_chipid != BGE_CHIPID_BCM5700_C0) sbsz = BGE_STATUS_BLK_SZ; else sbsz = 32; error = bge_dma_ring_alloc(sc, PAGE_SIZE, sbsz, &sc->bge_cdata.bge_status_tag, (uint8_t **)&sc->bge_ldata.bge_status_block, &sc->bge_cdata.bge_status_map, &sc->bge_ldata.bge_status_block_paddr, "status block"); if (error) return (error); /* Create tag for statistics block. */ error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_STATS_SZ, &sc->bge_cdata.bge_stats_tag, (uint8_t **)&sc->bge_ldata.bge_stats, &sc->bge_cdata.bge_stats_map, &sc->bge_ldata.bge_stats_paddr, "statistics block"); if (error) return (error); /* Create tag for jumbo RX ring. */ if (BGE_IS_JUMBO_CAPABLE(sc)) { error = bge_dma_ring_alloc(sc, PAGE_SIZE, BGE_JUMBO_RX_RING_SZ, &sc->bge_cdata.bge_rx_jumbo_ring_tag, (uint8_t **)&sc->bge_ldata.bge_rx_jumbo_ring, &sc->bge_cdata.bge_rx_jumbo_ring_map, &sc->bge_ldata.bge_rx_jumbo_ring_paddr, "jumbo RX ring"); if (error) return (error); } /* Create parent tag for buffers. */ boundary = 0; if ((sc->bge_flags & BGE_FLAG_4G_BNDRY_BUG) != 0) { boundary = BGE_DMA_BNDRY; /* * XXX * watchdog timeout issue was observed on BCM5704 which * lives behind PCI-X bridge(e.g AMD 8131 PCI-X bridge). * Both limiting DMA address space to 32bits and flushing * mailbox write seem to address the issue. */ if (sc->bge_pcixcap != 0) lowaddr = BUS_SPACE_MAXADDR_32BIT; } error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev), 1, boundary, lowaddr, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->bge_cdata.bge_buffer_tag); if (error != 0) { device_printf(sc->bge_dev, "could not allocate buffer dma tag\n"); return (ENOMEM); } /* Create tag for Tx mbufs. */ if (sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) { txsegsz = BGE_TSOSEG_SZ; txmaxsegsz = 65535 + sizeof(struct ether_vlan_header); } else { txsegsz = MCLBYTES; txmaxsegsz = MCLBYTES * BGE_NSEG_NEW; } error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, txmaxsegsz, BGE_NSEG_NEW, txsegsz, 0, NULL, NULL, &sc->bge_cdata.bge_tx_mtag); if (error) { device_printf(sc->bge_dev, "could not allocate TX dma tag\n"); return (ENOMEM); } /* Create tag for Rx mbufs. */ if (sc->bge_flags & BGE_FLAG_JUMBO_STD) rxmaxsegsz = MJUM9BYTES; else rxmaxsegsz = MCLBYTES; error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, rxmaxsegsz, 1, rxmaxsegsz, 0, NULL, NULL, &sc->bge_cdata.bge_rx_mtag); if (error) { device_printf(sc->bge_dev, "could not allocate RX dma tag\n"); return (ENOMEM); } /* Create DMA maps for RX buffers. */ error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0, &sc->bge_cdata.bge_rx_std_sparemap); if (error) { device_printf(sc->bge_dev, "can't create spare DMA map for RX\n"); return (ENOMEM); } for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0, &sc->bge_cdata.bge_rx_std_dmamap[i]); if (error) { device_printf(sc->bge_dev, "can't create DMA map for RX\n"); return (ENOMEM); } } /* Create DMA maps for TX buffers. */ for (i = 0; i < BGE_TX_RING_CNT; i++) { error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 0, &sc->bge_cdata.bge_tx_dmamap[i]); if (error) { device_printf(sc->bge_dev, "can't create DMA map for TX\n"); return (ENOMEM); } } /* Create tags for jumbo RX buffers. */ if (BGE_IS_JUMBO_CAPABLE(sc)) { error = bus_dma_tag_create(sc->bge_cdata.bge_buffer_tag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE, 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo); if (error) { device_printf(sc->bge_dev, "could not allocate jumbo dma tag\n"); return (ENOMEM); } /* Create DMA maps for jumbo RX buffers. */ error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo, 0, &sc->bge_cdata.bge_rx_jumbo_sparemap); if (error) { device_printf(sc->bge_dev, "can't create spare DMA map for jumbo RX\n"); return (ENOMEM); } for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo, 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]); if (error) { device_printf(sc->bge_dev, "can't create DMA map for jumbo RX\n"); return (ENOMEM); } } } return (0); } /* * Return true if this device has more than one port. */ static int bge_has_multiple_ports(struct bge_softc *sc) { device_t dev = sc->bge_dev; u_int b, d, f, fscan, s; d = pci_get_domain(dev); b = pci_get_bus(dev); s = pci_get_slot(dev); f = pci_get_function(dev); for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++) if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL) return (1); return (0); } /* * Return true if MSI can be used with this device. */ static int bge_can_use_msi(struct bge_softc *sc) { int can_use_msi = 0; if (sc->bge_msi == 0) return (0); /* Disable MSI for polling(4). */ #ifdef DEVICE_POLLING return (0); #endif switch (sc->bge_asicrev) { case BGE_ASICREV_BCM5714_A0: case BGE_ASICREV_BCM5714: /* * Apparently, MSI doesn't work when these chips are * configured in single-port mode. */ if (bge_has_multiple_ports(sc)) can_use_msi = 1; break; case BGE_ASICREV_BCM5750: if (sc->bge_chiprev != BGE_CHIPREV_5750_AX && sc->bge_chiprev != BGE_CHIPREV_5750_BX) can_use_msi = 1; break; case BGE_ASICREV_BCM5784: /* * Prevent infinite "watchdog timeout" errors * in some MacBook Pro and make it work out-of-the-box. */ if (sc->bge_chiprev == BGE_CHIPREV_5784_AX) break; /* FALLTHROUGH */ default: if (BGE_IS_575X_PLUS(sc)) can_use_msi = 1; } return (can_use_msi); } static int bge_mbox_reorder(struct bge_softc *sc) { /* Lists of PCI bridges that are known to reorder mailbox writes. */ static const struct mbox_reorder { const uint16_t vendor; const uint16_t device; const char *desc; } mbox_reorder_lists[] = { { 0x1022, 0x7450, "AMD-8131 PCI-X Bridge" }, }; devclass_t pci, pcib; device_t bus, dev; int i; pci = devclass_find("pci"); pcib = devclass_find("pcib"); dev = sc->bge_dev; bus = device_get_parent(dev); for (;;) { dev = device_get_parent(bus); bus = device_get_parent(dev); if (device_get_devclass(dev) != pcib) break; if (device_get_devclass(bus) != pci) break; for (i = 0; i < nitems(mbox_reorder_lists); i++) { if (pci_get_vendor(dev) == mbox_reorder_lists[i].vendor && pci_get_device(dev) == mbox_reorder_lists[i].device) { device_printf(sc->bge_dev, "enabling MBOX workaround for %s\n", mbox_reorder_lists[i].desc); return (1); } } } return (0); } static void bge_devinfo(struct bge_softc *sc) { uint32_t cfg, clk; device_printf(sc->bge_dev, "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; ", sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev); if (sc->bge_flags & BGE_FLAG_PCIE) printf("PCI-E\n"); else if (sc->bge_flags & BGE_FLAG_PCIX) { printf("PCI-X "); cfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID_MASK; if (cfg == BGE_MISCCFG_BOARD_ID_5704CIOBE) clk = 133; else { clk = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F; switch (clk) { case 0: clk = 33; break; case 2: clk = 50; break; case 4: clk = 66; break; case 6: clk = 100; break; case 7: clk = 133; break; } } printf("%u MHz\n", clk); } else { if (sc->bge_pcixcap != 0) printf("PCI on PCI-X "); else printf("PCI "); cfg = pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4); if (cfg & BGE_PCISTATE_PCI_BUSSPEED) clk = 66; else clk = 33; if (cfg & BGE_PCISTATE_32BIT_BUS) printf("%u MHz; 32bit\n", clk); else printf("%u MHz; 64bit\n", clk); } } static int bge_attach(device_t dev) { if_t ifp; struct bge_softc *sc; uint32_t hwcfg = 0, misccfg, pcistate; u_char eaddr[ETHER_ADDR_LEN]; int capmask, error, reg, rid, trys; sc = device_get_softc(dev); sc->bge_dev = dev; BGE_LOCK_INIT(sc, device_get_nameunit(dev)); - TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc); + NET_TASK_INIT(&sc->bge_intr_task, 0, bge_intr_task, sc); callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0); pci_enable_busmaster(dev); /* * Allocate control/status registers. */ rid = PCIR_BAR(0); sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->bge_res == NULL) { device_printf (sc->bge_dev, "couldn't map BAR0 memory\n"); error = ENXIO; goto fail; } /* Save various chip information. */ sc->bge_func_addr = pci_get_function(dev); sc->bge_chipid = bge_chipid(dev); sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid); sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid); /* Set default PHY address. */ sc->bge_phy_addr = 1; /* * PHY address mapping for various devices. * * | F0 Cu | F0 Sr | F1 Cu | F1 Sr | * ---------+-------+-------+-------+-------+ * BCM57XX | 1 | X | X | X | * BCM5704 | 1 | X | 1 | X | * BCM5717 | 1 | 8 | 2 | 9 | * BCM5719 | 1 | 8 | 2 | 9 | * BCM5720 | 1 | 8 | 2 | 9 | * * | F2 Cu | F2 Sr | F3 Cu | F3 Sr | * ---------+-------+-------+-------+-------+ * BCM57XX | X | X | X | X | * BCM5704 | X | X | X | X | * BCM5717 | X | X | X | X | * BCM5719 | 3 | 10 | 4 | 11 | * BCM5720 | X | X | X | X | * * Other addresses may respond but they are not * IEEE compliant PHYs and should be ignored. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5717 || sc->bge_asicrev == BGE_ASICREV_BCM5719 || sc->bge_asicrev == BGE_ASICREV_BCM5720) { if (sc->bge_chipid != BGE_CHIPID_BCM5717_A0) { if (CSR_READ_4(sc, BGE_SGDIG_STS) & BGE_SGDIGSTS_IS_SERDES) sc->bge_phy_addr = sc->bge_func_addr + 8; else sc->bge_phy_addr = sc->bge_func_addr + 1; } else { if (CSR_READ_4(sc, BGE_CPMU_PHY_STRAP) & BGE_CPMU_PHY_STRAP_IS_SERDES) sc->bge_phy_addr = sc->bge_func_addr + 8; else sc->bge_phy_addr = sc->bge_func_addr + 1; } } if (bge_has_eaddr(sc)) sc->bge_flags |= BGE_FLAG_EADDR; /* Save chipset family. */ switch (sc->bge_asicrev) { case BGE_ASICREV_BCM5762: case BGE_ASICREV_BCM57765: case BGE_ASICREV_BCM57766: sc->bge_flags |= BGE_FLAG_57765_PLUS; /* FALLTHROUGH */ case BGE_ASICREV_BCM5717: case BGE_ASICREV_BCM5719: case BGE_ASICREV_BCM5720: sc->bge_flags |= BGE_FLAG_5717_PLUS | BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS | BGE_FLAG_5705_PLUS | BGE_FLAG_JUMBO | BGE_FLAG_JUMBO_FRAME; if (sc->bge_asicrev == BGE_ASICREV_BCM5719 || sc->bge_asicrev == BGE_ASICREV_BCM5720) { /* * Enable work around for DMA engine miscalculation * of TXMBUF available space. */ sc->bge_flags |= BGE_FLAG_RDMA_BUG; if (sc->bge_asicrev == BGE_ASICREV_BCM5719 && sc->bge_chipid == BGE_CHIPID_BCM5719_A0) { /* Jumbo frame on BCM5719 A0 does not work. */ sc->bge_flags &= ~BGE_FLAG_JUMBO; } } break; case BGE_ASICREV_BCM5755: case BGE_ASICREV_BCM5761: case BGE_ASICREV_BCM5784: case BGE_ASICREV_BCM5785: case BGE_ASICREV_BCM5787: case BGE_ASICREV_BCM57780: sc->bge_flags |= BGE_FLAG_5755_PLUS | BGE_FLAG_575X_PLUS | BGE_FLAG_5705_PLUS; break; case BGE_ASICREV_BCM5700: case BGE_ASICREV_BCM5701: case BGE_ASICREV_BCM5703: case BGE_ASICREV_BCM5704: sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO; break; case BGE_ASICREV_BCM5714_A0: case BGE_ASICREV_BCM5780: case BGE_ASICREV_BCM5714: sc->bge_flags |= BGE_FLAG_5714_FAMILY | BGE_FLAG_JUMBO_STD; /* FALLTHROUGH */ case BGE_ASICREV_BCM5750: case BGE_ASICREV_BCM5752: case BGE_ASICREV_BCM5906: sc->bge_flags |= BGE_FLAG_575X_PLUS; /* FALLTHROUGH */ case BGE_ASICREV_BCM5705: sc->bge_flags |= BGE_FLAG_5705_PLUS; break; } /* Identify chips with APE processor. */ switch (sc->bge_asicrev) { case BGE_ASICREV_BCM5717: case BGE_ASICREV_BCM5719: case BGE_ASICREV_BCM5720: case BGE_ASICREV_BCM5761: case BGE_ASICREV_BCM5762: sc->bge_flags |= BGE_FLAG_APE; break; } /* Chips with APE need BAR2 access for APE registers/memory. */ if ((sc->bge_flags & BGE_FLAG_APE) != 0) { rid = PCIR_BAR(2); sc->bge_res2 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->bge_res2 == NULL) { device_printf (sc->bge_dev, "couldn't map BAR2 memory\n"); error = ENXIO; goto fail; } /* Enable APE register/memory access by host driver. */ pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4); pcistate |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR | BGE_PCISTATE_ALLOW_APE_SHMEM_WR | BGE_PCISTATE_ALLOW_APE_PSPACE_WR; pci_write_config(dev, BGE_PCI_PCISTATE, pcistate, 4); bge_ape_lock_init(sc); bge_ape_read_fw_ver(sc); } /* Add SYSCTLs, requires the chipset family to be set. */ bge_add_sysctls(sc); /* Identify the chips that use an CPMU. */ if (BGE_IS_5717_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5784 || sc->bge_asicrev == BGE_ASICREV_BCM5761 || sc->bge_asicrev == BGE_ASICREV_BCM5785 || sc->bge_asicrev == BGE_ASICREV_BCM57780) sc->bge_flags |= BGE_FLAG_CPMU_PRESENT; if ((sc->bge_flags & BGE_FLAG_CPMU_PRESENT) != 0) sc->bge_mi_mode = BGE_MIMODE_500KHZ_CONST; else sc->bge_mi_mode = BGE_MIMODE_BASE; /* Enable auto polling for BCM570[0-5]. */ if (BGE_IS_5700_FAMILY(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5705) sc->bge_mi_mode |= BGE_MIMODE_AUTOPOLL; /* * All Broadcom controllers have 4GB boundary DMA bug. * Whenever an address crosses a multiple of the 4GB boundary * (including 4GB, 8Gb, 12Gb, etc.) and makes the transition * from 0xX_FFFF_FFFF to 0x(X+1)_0000_0000 an internal DMA * state machine will lockup and cause the device to hang. */ sc->bge_flags |= BGE_FLAG_4G_BNDRY_BUG; /* BCM5755 or higher and BCM5906 have short DMA bug. */ if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906) sc->bge_flags |= BGE_FLAG_SHORT_DMA_BUG; /* * BCM5719 cannot handle DMA requests for DMA segments that * have larger than 4KB in size. However the maximum DMA * segment size created in DMA tag is 4KB for TSO, so we * wouldn't encounter the issue here. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5719) sc->bge_flags |= BGE_FLAG_4K_RDMA_BUG; misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID_MASK; if (sc->bge_asicrev == BGE_ASICREV_BCM5705) { if (misccfg == BGE_MISCCFG_BOARD_ID_5788 || misccfg == BGE_MISCCFG_BOARD_ID_5788M) sc->bge_flags |= BGE_FLAG_5788; } capmask = BMSR_DEFCAPMASK; if ((sc->bge_asicrev == BGE_ASICREV_BCM5703 && (misccfg == 0x4000 || misccfg == 0x8000)) || (sc->bge_asicrev == BGE_ASICREV_BCM5705 && pci_get_vendor(dev) == BCOM_VENDORID && (pci_get_device(dev) == BCOM_DEVICEID_BCM5901 || pci_get_device(dev) == BCOM_DEVICEID_BCM5901A2 || pci_get_device(dev) == BCOM_DEVICEID_BCM5705F)) || (pci_get_vendor(dev) == BCOM_VENDORID && (pci_get_device(dev) == BCOM_DEVICEID_BCM5751F || pci_get_device(dev) == BCOM_DEVICEID_BCM5753F || pci_get_device(dev) == BCOM_DEVICEID_BCM5787F)) || pci_get_device(dev) == BCOM_DEVICEID_BCM57790 || pci_get_device(dev) == BCOM_DEVICEID_BCM57791 || pci_get_device(dev) == BCOM_DEVICEID_BCM57795 || sc->bge_asicrev == BGE_ASICREV_BCM5906) { /* These chips are 10/100 only. */ capmask &= ~BMSR_EXTSTAT; sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED; } /* * Some controllers seem to require a special firmware to use * TSO. But the firmware is not available to FreeBSD and Linux * claims that the TSO performed by the firmware is slower than * hardware based TSO. Moreover the firmware based TSO has one * known bug which can't handle TSO if Ethernet header + IP/TCP * header is greater than 80 bytes. A workaround for the TSO * bug exist but it seems it's too expensive than not using * TSO at all. Some hardwares also have the TSO bug so limit * the TSO to the controllers that are not affected TSO issues * (e.g. 5755 or higher). */ if (BGE_IS_5717_PLUS(sc)) { /* BCM5717 requires different TSO configuration. */ sc->bge_flags |= BGE_FLAG_TSO3; if (sc->bge_asicrev == BGE_ASICREV_BCM5719 && sc->bge_chipid == BGE_CHIPID_BCM5719_A0) { /* TSO on BCM5719 A0 does not work. */ sc->bge_flags &= ~BGE_FLAG_TSO3; } } else if (BGE_IS_5755_PLUS(sc)) { /* * BCM5754 and BCM5787 shares the same ASIC id so * explicit device id check is required. * Due to unknown reason TSO does not work on BCM5755M. */ if (pci_get_device(dev) != BCOM_DEVICEID_BCM5754 && pci_get_device(dev) != BCOM_DEVICEID_BCM5754M && pci_get_device(dev) != BCOM_DEVICEID_BCM5755M) sc->bge_flags |= BGE_FLAG_TSO; } /* * Check if this is a PCI-X or PCI Express device. */ if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) { /* * Found a PCI Express capabilities register, this * must be a PCI Express device. */ sc->bge_flags |= BGE_FLAG_PCIE; sc->bge_expcap = reg; /* Extract supported maximum payload size. */ sc->bge_mps = pci_read_config(dev, sc->bge_expcap + PCIER_DEVICE_CAP, 2); sc->bge_mps = 128 << (sc->bge_mps & PCIEM_CAP_MAX_PAYLOAD); if (sc->bge_asicrev == BGE_ASICREV_BCM5719 || sc->bge_asicrev == BGE_ASICREV_BCM5720) sc->bge_expmrq = 2048; else sc->bge_expmrq = 4096; pci_set_max_read_req(dev, sc->bge_expmrq); } else { /* * Check if the device is in PCI-X Mode. * (This bit is not valid on PCI Express controllers.) */ if (pci_find_cap(dev, PCIY_PCIX, ®) == 0) sc->bge_pcixcap = reg; if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) & BGE_PCISTATE_PCI_BUSMODE) == 0) sc->bge_flags |= BGE_FLAG_PCIX; } /* * The 40bit DMA bug applies to the 5714/5715 controllers and is * not actually a MAC controller bug but an issue with the embedded * PCIe to PCI-X bridge in the device. Use 40bit DMA workaround. */ if (BGE_IS_5714_FAMILY(sc) && (sc->bge_flags & BGE_FLAG_PCIX)) sc->bge_flags |= BGE_FLAG_40BIT_BUG; /* * Some PCI-X bridges are known to trigger write reordering to * the mailbox registers. Typical phenomena is watchdog timeouts * caused by out-of-order TX completions. Enable workaround for * PCI-X devices that live behind these bridges. * Note, PCI-X controllers can run in PCI mode so we can't use * BGE_FLAG_PCIX flag to detect PCI-X controllers. */ if (sc->bge_pcixcap != 0 && bge_mbox_reorder(sc) != 0) sc->bge_flags |= BGE_FLAG_MBOX_REORDER; /* * Allocate the interrupt, using MSI if possible. These devices * support 8 MSI messages, but only the first one is used in * normal operation. */ rid = 0; if (pci_find_cap(sc->bge_dev, PCIY_MSI, ®) == 0) { sc->bge_msicap = reg; reg = 1; if (bge_can_use_msi(sc) && pci_alloc_msi(dev, ®) == 0) { rid = 1; sc->bge_flags |= BGE_FLAG_MSI; } } /* * All controllers except BCM5700 supports tagged status but * we use tagged status only for MSI case on BCM5717. Otherwise * MSI on BCM5717 does not work. */ #ifndef DEVICE_POLLING if (sc->bge_flags & BGE_FLAG_MSI && BGE_IS_5717_PLUS(sc)) sc->bge_flags |= BGE_FLAG_TAGGED_STATUS; #endif sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | (rid != 0 ? 0 : RF_SHAREABLE)); if (sc->bge_irq == NULL) { device_printf(sc->bge_dev, "couldn't map interrupt\n"); error = ENXIO; goto fail; } bge_devinfo(sc); sc->bge_asf_mode = 0; /* No ASF if APE present. */ if ((sc->bge_flags & BGE_FLAG_APE) == 0) { if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == BGE_SRAM_DATA_SIG_MAGIC)) { if (bge_readmem_ind(sc, BGE_SRAM_DATA_CFG) & BGE_HWCFG_ASF) { sc->bge_asf_mode |= ASF_ENABLE; sc->bge_asf_mode |= ASF_STACKUP; if (BGE_IS_575X_PLUS(sc)) sc->bge_asf_mode |= ASF_NEW_HANDSHAKE; } } } bge_stop_fw(sc); bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN); if (bge_reset(sc)) { device_printf(sc->bge_dev, "chip reset failed\n"); error = ENXIO; goto fail; } bge_sig_legacy(sc, BGE_RESET_SHUTDOWN); bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN); if (bge_chipinit(sc)) { device_printf(sc->bge_dev, "chip initialization failed\n"); error = ENXIO; goto fail; } error = bge_get_eaddr(sc, eaddr); if (error) { device_printf(sc->bge_dev, "failed to read station address\n"); error = ENXIO; goto fail; } /* 5705 limits RX return ring to 512 entries. */ if (BGE_IS_5717_PLUS(sc)) sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; else if (BGE_IS_5705_PLUS(sc)) sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; else sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; if (bge_dma_alloc(sc)) { device_printf(sc->bge_dev, "failed to allocate DMA resources\n"); error = ENXIO; goto fail; } /* Set default tuneable values. */ sc->bge_stat_ticks = BGE_TICKS_PER_SEC; sc->bge_rx_coal_ticks = 150; sc->bge_tx_coal_ticks = 150; sc->bge_rx_max_coal_bds = 10; sc->bge_tx_max_coal_bds = 10; /* Initialize checksum features to use. */ sc->bge_csum_features = BGE_CSUM_FEATURES; if (sc->bge_forced_udpcsum != 0) sc->bge_csum_features |= CSUM_UDP; /* Set up ifnet structure */ ifp = sc->bge_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(sc->bge_dev, "failed to if_alloc()\n"); error = ENXIO; goto fail; } if_setsoftc(ifp, sc); if_initname(ifp, device_get_name(dev), device_get_unit(dev)); if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); if_setioctlfn(ifp, bge_ioctl); if_setstartfn(ifp, bge_start); if_setinitfn(ifp, bge_init); if_setgetcounterfn(ifp, bge_get_counter); if_setsendqlen(ifp, BGE_TX_RING_CNT - 1); if_setsendqready(ifp); if_sethwassist(ifp, sc->bge_csum_features); if_setcapabilities(ifp, IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU); if ((sc->bge_flags & (BGE_FLAG_TSO | BGE_FLAG_TSO3)) != 0) { if_sethwassistbits(ifp, CSUM_TSO, 0); if_setcapabilitiesbit(ifp, IFCAP_TSO4 | IFCAP_VLAN_HWTSO, 0); } #ifdef IFCAP_VLAN_HWCSUM if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWCSUM, 0); #endif if_setcapenable(ifp, if_getcapabilities(ifp)); #ifdef DEVICE_POLLING if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0); #endif /* * 5700 B0 chips do not support checksumming correctly due * to hardware bugs. */ if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) { if_setcapabilitiesbit(ifp, 0, IFCAP_HWCSUM); if_setcapenablebit(ifp, 0, IFCAP_HWCSUM); if_sethwassist(ifp, 0); } /* * Figure out what sort of media we have by checking the * hardware config word in the first 32k of NIC internal memory, * or fall back to examining the EEPROM if necessary. * Note: on some BCM5700 cards, this value appears to be unset. * If that's the case, we have to rely on identifying the NIC * by its PCI subsystem ID, as we do below for the SysKonnect * SK-9D41. */ if (bge_readmem_ind(sc, BGE_SRAM_DATA_SIG) == BGE_SRAM_DATA_SIG_MAGIC) hwcfg = bge_readmem_ind(sc, BGE_SRAM_DATA_CFG); else if ((sc->bge_flags & BGE_FLAG_EADDR) && (sc->bge_asicrev != BGE_ASICREV_BCM5906)) { if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET, sizeof(hwcfg))) { device_printf(sc->bge_dev, "failed to read EEPROM\n"); error = ENXIO; goto fail; } hwcfg = ntohl(hwcfg); } /* The SysKonnect SK-9D41 is a 1000baseSX card. */ if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41 || (hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) { if (BGE_IS_5705_PLUS(sc)) { sc->bge_flags |= BGE_FLAG_MII_SERDES; sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED; } else sc->bge_flags |= BGE_FLAG_TBI; } /* Set various PHY bug flags. */ if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 || sc->bge_chipid == BGE_CHIPID_BCM5701_B0) sc->bge_phy_flags |= BGE_PHY_CRC_BUG; if (sc->bge_chiprev == BGE_CHIPREV_5703_AX || sc->bge_chiprev == BGE_CHIPREV_5704_AX) sc->bge_phy_flags |= BGE_PHY_ADC_BUG; if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0) sc->bge_phy_flags |= BGE_PHY_5704_A0_BUG; if (pci_get_subvendor(dev) == DELL_VENDORID) sc->bge_phy_flags |= BGE_PHY_NO_3LED; if ((BGE_IS_5705_PLUS(sc)) && sc->bge_asicrev != BGE_ASICREV_BCM5906 && sc->bge_asicrev != BGE_ASICREV_BCM5785 && sc->bge_asicrev != BGE_ASICREV_BCM57780 && !BGE_IS_5717_PLUS(sc)) { if (sc->bge_asicrev == BGE_ASICREV_BCM5755 || sc->bge_asicrev == BGE_ASICREV_BCM5761 || sc->bge_asicrev == BGE_ASICREV_BCM5784 || sc->bge_asicrev == BGE_ASICREV_BCM5787) { if (pci_get_device(dev) != BCOM_DEVICEID_BCM5722 && pci_get_device(dev) != BCOM_DEVICEID_BCM5756) sc->bge_phy_flags |= BGE_PHY_JITTER_BUG; if (pci_get_device(dev) == BCOM_DEVICEID_BCM5755M) sc->bge_phy_flags |= BGE_PHY_ADJUST_TRIM; } else sc->bge_phy_flags |= BGE_PHY_BER_BUG; } /* * Don't enable Ethernet@WireSpeed for the 5700 or the * 5705 A0 and A1 chips. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || (sc->bge_asicrev == BGE_ASICREV_BCM5705 && (sc->bge_chipid != BGE_CHIPID_BCM5705_A0 && sc->bge_chipid != BGE_CHIPID_BCM5705_A1))) sc->bge_phy_flags |= BGE_PHY_NO_WIRESPEED; if (sc->bge_flags & BGE_FLAG_TBI) { ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, bge_ifmedia_sts); ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL); ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX, 0, NULL); ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO); sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media; } else { /* * Do transceiver setup and tell the firmware the * driver is down so we can try to get access the * probe if ASF is running. Retry a couple of times * if we get a conflict with the ASF firmware accessing * the PHY. */ trys = 0; BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); again: bge_asf_driver_up(sc); error = mii_attach(dev, &sc->bge_miibus, ifp, (ifm_change_cb_t)bge_ifmedia_upd, (ifm_stat_cb_t)bge_ifmedia_sts, capmask, sc->bge_phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE); if (error != 0) { if (trys++ < 4) { device_printf(sc->bge_dev, "Try again\n"); bge_miibus_writereg(sc->bge_dev, sc->bge_phy_addr, MII_BMCR, BMCR_RESET); goto again; } device_printf(sc->bge_dev, "attaching PHYs failed\n"); goto fail; } /* * Now tell the firmware we are going up after probing the PHY */ if (sc->bge_asf_mode & ASF_STACKUP) BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); } /* * When using the BCM5701 in PCI-X mode, data corruption has * been observed in the first few bytes of some received packets. * Aligning the packet buffer in memory eliminates the corruption. * Unfortunately, this misaligns the packet payloads. On platforms * which do not support unaligned accesses, we will realign the * payloads by copying the received packets. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5701 && sc->bge_flags & BGE_FLAG_PCIX) sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG; /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); /* Tell upper layer we support long frames. */ if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); /* * Hookup IRQ last. */ if (BGE_IS_5755_PLUS(sc) && sc->bge_flags & BGE_FLAG_MSI) { /* Take advantage of single-shot MSI. */ CSR_WRITE_4(sc, BGE_MSI_MODE, CSR_READ_4(sc, BGE_MSI_MODE) & ~BGE_MSIMODE_ONE_SHOT_DISABLE); sc->bge_tq = taskqueue_create_fast("bge_taskq", M_WAITOK, taskqueue_thread_enqueue, &sc->bge_tq); if (sc->bge_tq == NULL) { device_printf(dev, "could not create taskqueue.\n"); ether_ifdetach(ifp); error = ENOMEM; goto fail; } error = taskqueue_start_threads(&sc->bge_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->bge_dev)); if (error != 0) { device_printf(dev, "could not start threads.\n"); ether_ifdetach(ifp); goto fail; } error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE, bge_msi_intr, NULL, sc, &sc->bge_intrhand); } else error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, bge_intr, sc, &sc->bge_intrhand); if (error) { ether_ifdetach(ifp); device_printf(sc->bge_dev, "couldn't set up irq\n"); goto fail; } /* Attach driver debugnet methods. */ DEBUGNET_SET(ifp, bge); fail: if (error) bge_detach(dev); return (error); } static int bge_detach(device_t dev) { struct bge_softc *sc; if_t ifp; sc = device_get_softc(dev); ifp = sc->bge_ifp; #ifdef DEVICE_POLLING if (if_getcapenable(ifp) & IFCAP_POLLING) ether_poll_deregister(ifp); #endif if (device_is_attached(dev)) { ether_ifdetach(ifp); BGE_LOCK(sc); bge_stop(sc); BGE_UNLOCK(sc); callout_drain(&sc->bge_stat_ch); } if (sc->bge_tq) taskqueue_drain(sc->bge_tq, &sc->bge_intr_task); if (sc->bge_flags & BGE_FLAG_TBI) ifmedia_removeall(&sc->bge_ifmedia); else if (sc->bge_miibus != NULL) { bus_generic_detach(dev); device_delete_child(dev, sc->bge_miibus); } bge_release_resources(sc); return (0); } static void bge_release_resources(struct bge_softc *sc) { device_t dev; dev = sc->bge_dev; if (sc->bge_tq != NULL) taskqueue_free(sc->bge_tq); if (sc->bge_intrhand != NULL) bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand); if (sc->bge_irq != NULL) { bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->bge_irq), sc->bge_irq); pci_release_msi(dev); } if (sc->bge_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->bge_res), sc->bge_res); if (sc->bge_res2 != NULL) bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->bge_res2), sc->bge_res2); if (sc->bge_ifp != NULL) if_free(sc->bge_ifp); bge_dma_free(sc); if (mtx_initialized(&sc->bge_mtx)) /* XXX */ BGE_LOCK_DESTROY(sc); } static int bge_reset(struct bge_softc *sc) { device_t dev; uint32_t cachesize, command, mac_mode, mac_mode_mask, reset, val; void (*write_op)(struct bge_softc *, int, int); uint16_t devctl; int i; dev = sc->bge_dev; mac_mode_mask = BGE_MACMODE_HALF_DUPLEX | BGE_MACMODE_PORTMODE; if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) mac_mode_mask |= BGE_MACMODE_APE_RX_EN | BGE_MACMODE_APE_TX_EN; mac_mode = CSR_READ_4(sc, BGE_MAC_MODE) & mac_mode_mask; if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) && (sc->bge_asicrev != BGE_ASICREV_BCM5906)) { if (sc->bge_flags & BGE_FLAG_PCIE) write_op = bge_writemem_direct; else write_op = bge_writemem_ind; } else write_op = bge_writereg_ind; if (sc->bge_asicrev != BGE_ASICREV_BCM5700 && sc->bge_asicrev != BGE_ASICREV_BCM5701) { CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); for (i = 0; i < 8000; i++) { if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1) break; DELAY(20); } if (i == 8000) { if (bootverbose) device_printf(dev, "NVRAM lock timedout!\n"); } } /* Take APE lock when performing reset. */ bge_ape_lock(sc, BGE_APE_LOCK_GRC); /* Save some important PCI state. */ cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4); command = pci_read_config(dev, BGE_PCI_CMD, 4); pci_write_config(dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4); /* Disable fastboot on controllers that support it. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5752 || BGE_IS_5755_PLUS(sc)) { if (bootverbose) device_printf(dev, "Disabling fastboot\n"); CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0); } /* * Write the magic number to SRAM at offset 0xB50. * When firmware finishes its initialization it will * write ~BGE_SRAM_FW_MB_MAGIC to the same location. */ bge_writemem_ind(sc, BGE_SRAM_FW_MB, BGE_SRAM_FW_MB_MAGIC); reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ; /* XXX: Broadcom Linux driver. */ if (sc->bge_flags & BGE_FLAG_PCIE) { if (sc->bge_asicrev != BGE_ASICREV_BCM5785 && (sc->bge_flags & BGE_FLAG_5717_PLUS) == 0) { if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */ CSR_WRITE_4(sc, 0x7E2C, 0x20); } if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { /* Prevent PCIE link training during global reset */ CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29); reset |= 1 << 29; } } if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { val = CSR_READ_4(sc, BGE_VCPU_STATUS); CSR_WRITE_4(sc, BGE_VCPU_STATUS, val | BGE_VCPU_STATUS_DRV_RESET); val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL); CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL, val & ~BGE_VCPU_EXT_CTRL_HALT_CPU); } /* * Set GPHY Power Down Override to leave GPHY * powered up in D0 uninitialized. */ if (BGE_IS_5705_PLUS(sc) && (sc->bge_flags & BGE_FLAG_CPMU_PRESENT) == 0) reset |= BGE_MISCCFG_GPHY_PD_OVERRIDE; /* Issue global reset */ write_op(sc, BGE_MISC_CFG, reset); if (sc->bge_flags & BGE_FLAG_PCIE) DELAY(100 * 1000); else DELAY(1000); /* XXX: Broadcom Linux driver. */ if (sc->bge_flags & BGE_FLAG_PCIE) { if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { DELAY(500000); /* wait for link training to complete */ val = pci_read_config(dev, 0xC4, 4); pci_write_config(dev, 0xC4, val | (1 << 15), 4); } devctl = pci_read_config(dev, sc->bge_expcap + PCIER_DEVICE_CTL, 2); /* Clear enable no snoop and disable relaxed ordering. */ devctl &= ~(PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE); pci_write_config(dev, sc->bge_expcap + PCIER_DEVICE_CTL, devctl, 2); pci_set_max_read_req(dev, sc->bge_expmrq); /* Clear error status. */ pci_write_config(dev, sc->bge_expcap + PCIER_DEVICE_STA, PCIEM_STA_CORRECTABLE_ERROR | PCIEM_STA_NON_FATAL_ERROR | PCIEM_STA_FATAL_ERROR | PCIEM_STA_UNSUPPORTED_REQ, 2); } /* Reset some of the PCI state that got zapped by reset. */ pci_write_config(dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4); val = BGE_PCISTATE_ROM_ENABLE | BGE_PCISTATE_ROM_RETRY_ENABLE; if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0 && (sc->bge_flags & BGE_FLAG_PCIX) != 0) val |= BGE_PCISTATE_RETRY_SAME_DMA; if ((sc->bge_mfw_flags & BGE_MFW_ON_APE) != 0) val |= BGE_PCISTATE_ALLOW_APE_CTLSPC_WR | BGE_PCISTATE_ALLOW_APE_SHMEM_WR | BGE_PCISTATE_ALLOW_APE_PSPACE_WR; pci_write_config(dev, BGE_PCI_PCISTATE, val, 4); pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4); pci_write_config(dev, BGE_PCI_CMD, command, 4); /* * Disable PCI-X relaxed ordering to ensure status block update * comes first then packet buffer DMA. Otherwise driver may * read stale status block. */ if (sc->bge_flags & BGE_FLAG_PCIX) { devctl = pci_read_config(dev, sc->bge_pcixcap + PCIXR_COMMAND, 2); devctl &= ~PCIXM_COMMAND_ERO; if (sc->bge_asicrev == BGE_ASICREV_BCM5703) { devctl &= ~PCIXM_COMMAND_MAX_READ; devctl |= PCIXM_COMMAND_MAX_READ_2048; } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { devctl &= ~(PCIXM_COMMAND_MAX_SPLITS | PCIXM_COMMAND_MAX_READ); devctl |= PCIXM_COMMAND_MAX_READ_2048; } pci_write_config(dev, sc->bge_pcixcap + PCIXR_COMMAND, devctl, 2); } /* Re-enable MSI, if necessary, and enable the memory arbiter. */ if (BGE_IS_5714_FAMILY(sc)) { /* This chip disables MSI on reset. */ if (sc->bge_flags & BGE_FLAG_MSI) { val = pci_read_config(dev, sc->bge_msicap + PCIR_MSI_CTRL, 2); pci_write_config(dev, sc->bge_msicap + PCIR_MSI_CTRL, val | PCIM_MSICTRL_MSI_ENABLE, 2); val = CSR_READ_4(sc, BGE_MSI_MODE); CSR_WRITE_4(sc, BGE_MSI_MODE, val | BGE_MSIMODE_ENABLE); } val = CSR_READ_4(sc, BGE_MARB_MODE); CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val); } else CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); /* Fix up byte swapping. */ CSR_WRITE_4(sc, BGE_MODE_CTL, bge_dma_swap_options(sc)); val = CSR_READ_4(sc, BGE_MAC_MODE); val = (val & ~mac_mode_mask) | mac_mode; CSR_WRITE_4(sc, BGE_MAC_MODE, val); DELAY(40); bge_ape_unlock(sc, BGE_APE_LOCK_GRC); if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { for (i = 0; i < BGE_TIMEOUT; i++) { val = CSR_READ_4(sc, BGE_VCPU_STATUS); if (val & BGE_VCPU_STATUS_INIT_DONE) break; DELAY(100); } if (i == BGE_TIMEOUT) { device_printf(dev, "reset timed out\n"); return (1); } } else { /* * Poll until we see the 1's complement of the magic number. * This indicates that the firmware initialization is complete. * We expect this to fail if no chip containing the Ethernet * address is fitted though. */ for (i = 0; i < BGE_TIMEOUT; i++) { DELAY(10); val = bge_readmem_ind(sc, BGE_SRAM_FW_MB); if (val == ~BGE_SRAM_FW_MB_MAGIC) break; } if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT) device_printf(dev, "firmware handshake timed out, found 0x%08x\n", val); /* BCM57765 A0 needs additional time before accessing. */ if (sc->bge_chipid == BGE_CHIPID_BCM57765_A0) DELAY(10 * 1000); /* XXX */ } /* * The 5704 in TBI mode apparently needs some special * adjustment to insure the SERDES drive level is set * to 1.2V. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && sc->bge_flags & BGE_FLAG_TBI) { val = CSR_READ_4(sc, BGE_SERDES_CFG); val = (val & ~0xFFF) | 0x880; CSR_WRITE_4(sc, BGE_SERDES_CFG, val); } /* XXX: Broadcom Linux driver. */ if (sc->bge_flags & BGE_FLAG_PCIE && !BGE_IS_5717_PLUS(sc) && sc->bge_chipid != BGE_CHIPID_BCM5750_A0 && sc->bge_asicrev != BGE_ASICREV_BCM5785) { /* Enable Data FIFO protection. */ val = CSR_READ_4(sc, 0x7C00); CSR_WRITE_4(sc, 0x7C00, val | (1 << 25)); } if (sc->bge_asicrev == BGE_ASICREV_BCM5720) BGE_CLRBIT(sc, BGE_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN); return (0); } static __inline void bge_rxreuse_std(struct bge_softc *sc, int i) { struct bge_rx_bd *r; r = &sc->bge_ldata.bge_rx_std_ring[sc->bge_std]; r->bge_flags = BGE_RXBDFLAG_END; r->bge_len = sc->bge_cdata.bge_rx_std_seglen[i]; r->bge_idx = i; BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); } static __inline void bge_rxreuse_jumbo(struct bge_softc *sc, int i) { struct bge_extrx_bd *r; r = &sc->bge_ldata.bge_rx_jumbo_ring[sc->bge_jumbo]; r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END; r->bge_len0 = sc->bge_cdata.bge_rx_jumbo_seglen[i][0]; r->bge_len1 = sc->bge_cdata.bge_rx_jumbo_seglen[i][1]; r->bge_len2 = sc->bge_cdata.bge_rx_jumbo_seglen[i][2]; r->bge_len3 = sc->bge_cdata.bge_rx_jumbo_seglen[i][3]; r->bge_idx = i; BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); } /* * Frame reception handling. This is called if there's a frame * on the receive return list. * * Note: we have to be able to handle two possibilities here: * 1) the frame is from the jumbo receive ring * 2) the frame is from the standard receive ring */ static int bge_rxeof(struct bge_softc *sc, uint16_t rx_prod, int holdlck) { if_t ifp; int rx_npkts = 0, stdcnt = 0, jumbocnt = 0; uint16_t rx_cons; rx_cons = sc->bge_rx_saved_considx; /* Nothing to do. */ if (rx_cons == rx_prod) return (rx_npkts); ifp = sc->bge_ifp; bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD); bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTWRITE); if (BGE_IS_JUMBO_CAPABLE(sc) && if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN)) bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTWRITE); while (rx_cons != rx_prod) { struct bge_rx_bd *cur_rx; uint32_t rxidx; struct mbuf *m = NULL; uint16_t vlan_tag = 0; int have_tag = 0; #ifdef DEVICE_POLLING if (if_getcapenable(ifp) & IFCAP_POLLING) { if (sc->rxcycles <= 0) break; sc->rxcycles--; } #endif cur_rx = &sc->bge_ldata.bge_rx_return_ring[rx_cons]; rxidx = cur_rx->bge_idx; BGE_INC(rx_cons, sc->bge_return_ring_cnt); if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING && cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { have_tag = 1; vlan_tag = cur_rx->bge_vlan_tag; } if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { jumbocnt++; m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { bge_rxreuse_jumbo(sc, rxidx); continue; } if (bge_newbuf_jumbo(sc, rxidx) != 0) { bge_rxreuse_jumbo(sc, rxidx); if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); continue; } BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); } else { stdcnt++; m = sc->bge_cdata.bge_rx_std_chain[rxidx]; if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { bge_rxreuse_std(sc, rxidx); continue; } if (bge_newbuf_std(sc, rxidx) != 0) { bge_rxreuse_std(sc, rxidx); if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); continue; } BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); } if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); #ifndef __NO_STRICT_ALIGNMENT /* * For architectures with strict alignment we must make sure * the payload is aligned. */ if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) { bcopy(m->m_data, m->m_data + ETHER_ALIGN, cur_rx->bge_len); m->m_data += ETHER_ALIGN; } #endif m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; m->m_pkthdr.rcvif = ifp; if (if_getcapenable(ifp) & IFCAP_RXCSUM) bge_rxcsum(sc, cur_rx, m); /* * If we received a packet with a vlan tag, * attach that information to the packet. */ if (have_tag) { m->m_pkthdr.ether_vtag = vlan_tag; m->m_flags |= M_VLANTAG; } if (holdlck != 0) { BGE_UNLOCK(sc); if_input(ifp, m); BGE_LOCK(sc); } else if_input(ifp, m); rx_npkts++; if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) return (rx_npkts); } bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREREAD); if (stdcnt > 0) bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE); if (jumbocnt > 0) bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE); sc->bge_rx_saved_considx = rx_cons; bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); if (stdcnt) bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, (sc->bge_std + BGE_STD_RX_RING_CNT - 1) % BGE_STD_RX_RING_CNT); if (jumbocnt) bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, (sc->bge_jumbo + BGE_JUMBO_RX_RING_CNT - 1) % BGE_JUMBO_RX_RING_CNT); #ifdef notyet /* * This register wraps very quickly under heavy packet drops. * If you need correct statistics, you can enable this check. */ if (BGE_IS_5705_PLUS(sc)) if_incierrors(ifp, CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS)); #endif return (rx_npkts); } static void bge_rxcsum(struct bge_softc *sc, struct bge_rx_bd *cur_rx, struct mbuf *m) { if (BGE_IS_5717_PLUS(sc)) { if ((cur_rx->bge_flags & BGE_RXBDFLAG_IPV6) == 0) { if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) { m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if ((cur_rx->bge_error_flag & BGE_RXERRFLAG_IP_CSUM_NOK) == 0) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; } if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) { m->m_pkthdr.csum_data = cur_rx->bge_tcp_udp_csum; m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; } } } else { if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) { m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; } if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM && m->m_pkthdr.len >= ETHER_MIN_NOPAD) { m->m_pkthdr.csum_data = cur_rx->bge_tcp_udp_csum; m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; } } } static void bge_txeof(struct bge_softc *sc, uint16_t tx_cons) { struct bge_tx_bd *cur_tx; if_t ifp; BGE_LOCK_ASSERT(sc); /* Nothing to do. */ if (sc->bge_tx_saved_considx == tx_cons) return; ifp = sc->bge_ifp; bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_POSTWRITE); /* * Go through our tx ring and free mbufs for those * frames that have been sent. */ while (sc->bge_tx_saved_considx != tx_cons) { uint32_t idx; idx = sc->bge_tx_saved_considx; cur_tx = &sc->bge_ldata.bge_tx_ring[idx]; if (cur_tx->bge_flags & BGE_TXBDFLAG_END) if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); if (sc->bge_cdata.bge_tx_chain[idx] != NULL) { bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, sc->bge_cdata.bge_tx_dmamap[idx], BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, sc->bge_cdata.bge_tx_dmamap[idx]); m_freem(sc->bge_cdata.bge_tx_chain[idx]); sc->bge_cdata.bge_tx_chain[idx] = NULL; } sc->bge_txcnt--; BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); } if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); if (sc->bge_txcnt == 0) sc->bge_timer = 0; } #ifdef DEVICE_POLLING static int bge_poll(if_t ifp, enum poll_cmd cmd, int count) { struct bge_softc *sc = if_getsoftc(ifp); uint16_t rx_prod, tx_cons; uint32_t statusword; int rx_npkts = 0; BGE_LOCK(sc); if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { BGE_UNLOCK(sc); return (rx_npkts); } bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* Fetch updates from the status block. */ rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx; tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx; statusword = sc->bge_ldata.bge_status_block->bge_status; /* Clear the status so the next pass only sees the changes. */ sc->bge_ldata.bge_status_block->bge_status = 0; bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */ if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED) sc->bge_link_evt++; if (cmd == POLL_AND_CHECK_STATUS) if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 && sc->bge_chipid != BGE_CHIPID_BCM5700_B2) || sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI)) bge_link_upd(sc); sc->rxcycles = count; rx_npkts = bge_rxeof(sc, rx_prod, 1); if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { BGE_UNLOCK(sc); return (rx_npkts); } bge_txeof(sc, tx_cons); if (!if_sendq_empty(ifp)) bge_start_locked(ifp); BGE_UNLOCK(sc); return (rx_npkts); } #endif /* DEVICE_POLLING */ static int bge_msi_intr(void *arg) { struct bge_softc *sc; sc = (struct bge_softc *)arg; /* * This interrupt is not shared and controller already * disabled further interrupt. */ taskqueue_enqueue(sc->bge_tq, &sc->bge_intr_task); return (FILTER_HANDLED); } static void bge_intr_task(void *arg, int pending) { - struct epoch_tracker et; struct bge_softc *sc; if_t ifp; uint32_t status, status_tag; uint16_t rx_prod, tx_cons; sc = (struct bge_softc *)arg; ifp = sc->bge_ifp; BGE_LOCK(sc); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { BGE_UNLOCK(sc); return; } /* Get updated status block. */ bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* Save producer/consumer indices. */ rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx; tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx; status = sc->bge_ldata.bge_status_block->bge_status; status_tag = sc->bge_ldata.bge_status_block->bge_status_tag << 24; /* Dirty the status flag. */ sc->bge_ldata.bge_status_block->bge_status = 0; bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); if ((sc->bge_flags & BGE_FLAG_TAGGED_STATUS) == 0) status_tag = 0; if ((status & BGE_STATFLAG_LINKSTATE_CHANGED) != 0) bge_link_upd(sc); /* Let controller work. */ bge_writembx(sc, BGE_MBX_IRQ0_LO, status_tag); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING && sc->bge_rx_saved_considx != rx_prod) { /* Check RX return ring producer/consumer. */ BGE_UNLOCK(sc); - NET_EPOCH_ENTER(et); bge_rxeof(sc, rx_prod, 0); - NET_EPOCH_EXIT(et); BGE_LOCK(sc); } if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { /* Check TX ring producer/consumer. */ bge_txeof(sc, tx_cons); if (!if_sendq_empty(ifp)) bge_start_locked(ifp); } BGE_UNLOCK(sc); } static void bge_intr(void *xsc) { struct bge_softc *sc; if_t ifp; uint32_t statusword; uint16_t rx_prod, tx_cons; sc = xsc; BGE_LOCK(sc); ifp = sc->bge_ifp; #ifdef DEVICE_POLLING if (if_getcapenable(ifp) & IFCAP_POLLING) { BGE_UNLOCK(sc); return; } #endif /* * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't * disable interrupts by writing nonzero like we used to, since with * our current organization this just gives complications and * pessimizations for re-enabling interrupts. We used to have races * instead of the necessary complications. Disabling interrupts * would just reduce the chance of a status update while we are * running (by switching to the interrupt-mode coalescence * parameters), but this chance is already very low so it is more * efficient to get another interrupt than prevent it. * * We do the ack first to ensure another interrupt if there is a * status update after the ack. We don't check for the status * changing later because it is more efficient to get another * interrupt than prevent it, not quite as above (not checking is * a smaller optimization than not toggling the interrupt enable, * since checking doesn't involve PCI accesses and toggling require * the status check). So toggling would probably be a pessimization * even with MSI. It would only be needed for using a task queue. */ bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); /* * Do the mandatory PCI flush as well as get the link status. */ statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED; /* Make sure the descriptor ring indexes are coherent. */ bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx; tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx; sc->bge_ldata.bge_status_block->bge_status = 0; bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 && sc->bge_chipid != BGE_CHIPID_BCM5700_B2) || statusword || sc->bge_link_evt) bge_link_upd(sc); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { /* Check RX return ring producer/consumer. */ bge_rxeof(sc, rx_prod, 1); } if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { /* Check TX ring producer/consumer. */ bge_txeof(sc, tx_cons); } if (if_getdrvflags(ifp) & IFF_DRV_RUNNING && !if_sendq_empty(ifp)) bge_start_locked(ifp); BGE_UNLOCK(sc); } static void bge_asf_driver_up(struct bge_softc *sc) { if (sc->bge_asf_mode & ASF_STACKUP) { /* Send ASF heartbeat aprox. every 2s */ if (sc->bge_asf_count) sc->bge_asf_count --; else { sc->bge_asf_count = 2; bge_writemem_ind(sc, BGE_SRAM_FW_CMD_MB, BGE_FW_CMD_DRV_ALIVE); bge_writemem_ind(sc, BGE_SRAM_FW_CMD_LEN_MB, 4); bge_writemem_ind(sc, BGE_SRAM_FW_CMD_DATA_MB, BGE_FW_HB_TIMEOUT_SEC); CSR_WRITE_4(sc, BGE_RX_CPU_EVENT, CSR_READ_4(sc, BGE_RX_CPU_EVENT) | BGE_RX_CPU_DRV_EVENT); } } } static void bge_tick(void *xsc) { struct bge_softc *sc = xsc; struct mii_data *mii = NULL; BGE_LOCK_ASSERT(sc); /* Synchronize with possible callout reset/stop. */ if (callout_pending(&sc->bge_stat_ch) || !callout_active(&sc->bge_stat_ch)) return; if (BGE_IS_5705_PLUS(sc)) bge_stats_update_regs(sc); else bge_stats_update(sc); /* XXX Add APE heartbeat check here? */ if ((sc->bge_flags & BGE_FLAG_TBI) == 0) { mii = device_get_softc(sc->bge_miibus); /* * Do not touch PHY if we have link up. This could break * IPMI/ASF mode or produce extra input errors * (extra errors was reported for bcm5701 & bcm5704). */ if (!sc->bge_link) mii_tick(mii); } else { /* * Since in TBI mode auto-polling can't be used we should poll * link status manually. Here we register pending link event * and trigger interrupt. */ #ifdef DEVICE_POLLING /* In polling mode we poll link state in bge_poll(). */ if (!(if_getcapenable(sc->bge_ifp) & IFCAP_POLLING)) #endif { sc->bge_link_evt++; if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || sc->bge_flags & BGE_FLAG_5788) BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); else BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); } } bge_asf_driver_up(sc); bge_watchdog(sc); callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc); } static void bge_stats_update_regs(struct bge_softc *sc) { if_t ifp; struct bge_mac_stats *stats; uint32_t val; ifp = sc->bge_ifp; stats = &sc->bge_mac_stats; stats->ifHCOutOctets += CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS); stats->etherStatsCollisions += CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS); stats->outXonSent += CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT); stats->outXoffSent += CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT); stats->dot3StatsInternalMacTransmitErrors += CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS); stats->dot3StatsSingleCollisionFrames += CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL); stats->dot3StatsMultipleCollisionFrames += CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL); stats->dot3StatsDeferredTransmissions += CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED); stats->dot3StatsExcessiveCollisions += CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL); stats->dot3StatsLateCollisions += CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL); stats->ifHCOutUcastPkts += CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST); stats->ifHCOutMulticastPkts += CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST); stats->ifHCOutBroadcastPkts += CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST); stats->ifHCInOctets += CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS); stats->etherStatsFragments += CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS); stats->ifHCInUcastPkts += CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST); stats->ifHCInMulticastPkts += CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST); stats->ifHCInBroadcastPkts += CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST); stats->dot3StatsFCSErrors += CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS); stats->dot3StatsAlignmentErrors += CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS); stats->xonPauseFramesReceived += CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD); stats->xoffPauseFramesReceived += CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD); stats->macControlFramesReceived += CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD); stats->xoffStateEntered += CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED); stats->dot3StatsFramesTooLong += CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG); stats->etherStatsJabbers += CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS); stats->etherStatsUndersizePkts += CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE); stats->FramesDroppedDueToFilters += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP); stats->DmaWriteQueueFull += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL); stats->DmaWriteHighPriQueueFull += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL); stats->NoMoreRxBDs += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS); /* * XXX * Unlike other controllers, BGE_RXLP_LOCSTAT_IFIN_DROPS * counter of BCM5717, BCM5718, BCM5719 A0 and BCM5720 A0 * includes number of unwanted multicast frames. This comes * from silicon bug and known workaround to get rough(not * exact) counter is to enable interrupt on MBUF low water * attention. This can be accomplished by setting * BGE_HCCMODE_ATTN bit of BGE_HCC_MODE, * BGE_BMANMODE_LOMBUF_ATTN bit of BGE_BMAN_MODE and * BGE_MODECTL_FLOWCTL_ATTN_INTR bit of BGE_MODE_CTL. * However that change would generate more interrupts and * there are still possibilities of losing multiple frames * during BGE_MODECTL_FLOWCTL_ATTN_INTR interrupt handling. * Given that the workaround still would not get correct * counter I don't think it's worth to implement it. So * ignore reading the counter on controllers that have the * silicon bug. */ if (sc->bge_asicrev != BGE_ASICREV_BCM5717 && sc->bge_chipid != BGE_CHIPID_BCM5719_A0 && sc->bge_chipid != BGE_CHIPID_BCM5720_A0) stats->InputDiscards += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS); stats->InputErrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS); stats->RecvThresholdHit += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT); if (sc->bge_flags & BGE_FLAG_RDMA_BUG) { /* * If controller transmitted more than BGE_NUM_RDMA_CHANNELS * frames, it's safe to disable workaround for DMA engine's * miscalculation of TXMBUF space. */ if (stats->ifHCOutUcastPkts + stats->ifHCOutMulticastPkts + stats->ifHCOutBroadcastPkts > BGE_NUM_RDMA_CHANNELS) { val = CSR_READ_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL); if (sc->bge_asicrev == BGE_ASICREV_BCM5719) val &= ~BGE_RDMA_TX_LENGTH_WA_5719; else val &= ~BGE_RDMA_TX_LENGTH_WA_5720; CSR_WRITE_4(sc, BGE_RDMA_LSO_CRPTEN_CTRL, val); sc->bge_flags &= ~BGE_FLAG_RDMA_BUG; } } } static void bge_stats_clear_regs(struct bge_softc *sc) { CSR_READ_4(sc, BGE_TX_MAC_STATS_OCTETS); CSR_READ_4(sc, BGE_TX_MAC_STATS_COLLS); CSR_READ_4(sc, BGE_TX_MAC_STATS_XON_SENT); CSR_READ_4(sc, BGE_TX_MAC_STATS_XOFF_SENT); CSR_READ_4(sc, BGE_TX_MAC_STATS_ERRORS); CSR_READ_4(sc, BGE_TX_MAC_STATS_SINGLE_COLL); CSR_READ_4(sc, BGE_TX_MAC_STATS_MULTI_COLL); CSR_READ_4(sc, BGE_TX_MAC_STATS_DEFERRED); CSR_READ_4(sc, BGE_TX_MAC_STATS_EXCESS_COLL); CSR_READ_4(sc, BGE_TX_MAC_STATS_LATE_COLL); CSR_READ_4(sc, BGE_TX_MAC_STATS_UCAST); CSR_READ_4(sc, BGE_TX_MAC_STATS_MCAST); CSR_READ_4(sc, BGE_TX_MAC_STATS_BCAST); CSR_READ_4(sc, BGE_RX_MAC_STATS_OCTESTS); CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAGMENTS); CSR_READ_4(sc, BGE_RX_MAC_STATS_UCAST); CSR_READ_4(sc, BGE_RX_MAC_STATS_MCAST); CSR_READ_4(sc, BGE_RX_MAC_STATS_BCAST); CSR_READ_4(sc, BGE_RX_MAC_STATS_FCS_ERRORS); CSR_READ_4(sc, BGE_RX_MAC_STATS_ALGIN_ERRORS); CSR_READ_4(sc, BGE_RX_MAC_STATS_XON_RCVD); CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_RCVD); CSR_READ_4(sc, BGE_RX_MAC_STATS_CTRL_RCVD); CSR_READ_4(sc, BGE_RX_MAC_STATS_XOFF_ENTERED); CSR_READ_4(sc, BGE_RX_MAC_STATS_FRAME_TOO_LONG); CSR_READ_4(sc, BGE_RX_MAC_STATS_JABBERS); CSR_READ_4(sc, BGE_RX_MAC_STATS_UNDERSIZE); CSR_READ_4(sc, BGE_RXLP_LOCSTAT_FILTDROP); CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_WRQ_FULL); CSR_READ_4(sc, BGE_RXLP_LOCSTAT_DMA_HPWRQ_FULL); CSR_READ_4(sc, BGE_RXLP_LOCSTAT_OUT_OF_BDS); CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS); CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_ERRORS); CSR_READ_4(sc, BGE_RXLP_LOCSTAT_RXTHRESH_HIT); } static void bge_stats_update(struct bge_softc *sc) { if_t ifp; bus_size_t stats; uint32_t cnt; /* current register value */ ifp = sc->bge_ifp; stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; #define READ_STAT(sc, stats, stat) \ CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo); if_inc_counter(ifp, IFCOUNTER_COLLISIONS, cnt - sc->bge_tx_collisions); sc->bge_tx_collisions = cnt; cnt = READ_STAT(sc, stats, nicNoMoreRxBDs.bge_addr_lo); if_inc_counter(ifp, IFCOUNTER_IERRORS, cnt - sc->bge_rx_nobds); sc->bge_rx_nobds = cnt; cnt = READ_STAT(sc, stats, ifInErrors.bge_addr_lo); if_inc_counter(ifp, IFCOUNTER_IERRORS, cnt - sc->bge_rx_inerrs); sc->bge_rx_inerrs = cnt; cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo); if_inc_counter(ifp, IFCOUNTER_IERRORS, cnt - sc->bge_rx_discards); sc->bge_rx_discards = cnt; cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo); if_inc_counter(ifp, IFCOUNTER_OERRORS, cnt - sc->bge_tx_discards); sc->bge_tx_discards = cnt; #undef READ_STAT } /* * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason. * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD, * but when such padded frames employ the bge IP/TCP checksum offload, * the hardware checksum assist gives incorrect results (possibly * from incorporating its own padding into the UDP/TCP checksum; who knows). * If we pad such runts with zeros, the onboard checksum comes out correct. */ static __inline int bge_cksum_pad(struct mbuf *m) { int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len; struct mbuf *last; /* If there's only the packet-header and we can pad there, use it. */ if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) && M_TRAILINGSPACE(m) >= padlen) { last = m; } else { /* * Walk packet chain to find last mbuf. We will either * pad there, or append a new mbuf and pad it. */ for (last = m; last->m_next != NULL; last = last->m_next); if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) { /* Allocate new empty mbuf, pad it. Compact later. */ struct mbuf *n; MGET(n, M_NOWAIT, MT_DATA); if (n == NULL) return (ENOBUFS); n->m_len = 0; last->m_next = n; last = n; } } /* Now zero the pad area, to avoid the bge cksum-assist bug. */ memset(mtod(last, caddr_t) + last->m_len, 0, padlen); last->m_len += padlen; m->m_pkthdr.len += padlen; return (0); } static struct mbuf * bge_check_short_dma(struct mbuf *m) { struct mbuf *n; int found; /* * If device receive two back-to-back send BDs with less than * or equal to 8 total bytes then the device may hang. The two * back-to-back send BDs must in the same frame for this failure * to occur. Scan mbuf chains and see whether two back-to-back * send BDs are there. If this is the case, allocate new mbuf * and copy the frame to workaround the silicon bug. */ for (n = m, found = 0; n != NULL; n = n->m_next) { if (n->m_len < 8) { found++; if (found > 1) break; continue; } found = 0; } if (found > 1) { n = m_defrag(m, M_NOWAIT); if (n == NULL) m_freem(m); } else n = m; return (n); } static struct mbuf * bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss, uint16_t *flags) { struct ip *ip; struct tcphdr *tcp; struct mbuf *n; uint16_t hlen; uint32_t poff; if (M_WRITABLE(m) == 0) { /* Get a writable copy. */ n = m_dup(m, M_NOWAIT); m_freem(m); if (n == NULL) return (NULL); m = n; } m = m_pullup(m, sizeof(struct ether_header) + sizeof(struct ip)); if (m == NULL) return (NULL); ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header)); poff = sizeof(struct ether_header) + (ip->ip_hl << 2); m = m_pullup(m, poff + sizeof(struct tcphdr)); if (m == NULL) return (NULL); tcp = (struct tcphdr *)(mtod(m, char *) + poff); m = m_pullup(m, poff + (tcp->th_off << 2)); if (m == NULL) return (NULL); /* * It seems controller doesn't modify IP length and TCP pseudo * checksum. These checksum computed by upper stack should be 0. */ *mss = m->m_pkthdr.tso_segsz; ip = (struct ip *)(mtod(m, char *) + sizeof(struct ether_header)); ip->ip_sum = 0; ip->ip_len = htons(*mss + (ip->ip_hl << 2) + (tcp->th_off << 2)); /* Clear pseudo checksum computed by TCP stack. */ tcp = (struct tcphdr *)(mtod(m, char *) + poff); tcp->th_sum = 0; /* * Broadcom controllers uses different descriptor format for * TSO depending on ASIC revision. Due to TSO-capable firmware * license issue and lower performance of firmware based TSO * we only support hardware based TSO. */ /* Calculate header length, incl. TCP/IP options, in 32 bit units. */ hlen = ((ip->ip_hl << 2) + (tcp->th_off << 2)) >> 2; if (sc->bge_flags & BGE_FLAG_TSO3) { /* * For BCM5717 and newer controllers, hardware based TSO * uses the 14 lower bits of the bge_mss field to store the * MSS and the upper 2 bits to store the lowest 2 bits of * the IP/TCP header length. The upper 6 bits of the header * length are stored in the bge_flags[14:10,4] field. Jumbo * frames are supported. */ *mss |= ((hlen & 0x3) << 14); *flags |= ((hlen & 0xF8) << 7) | ((hlen & 0x4) << 2); } else { /* * For BCM5755 and newer controllers, hardware based TSO uses * the lower 11 bits to store the MSS and the upper 5 bits to * store the IP/TCP header length. Jumbo frames are not * supported. */ *mss |= (hlen << 11); } return (m); } /* * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data * pointers to descriptors. */ static int bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx) { bus_dma_segment_t segs[BGE_NSEG_NEW]; bus_dmamap_t map; struct bge_tx_bd *d; struct mbuf *m = *m_head; uint32_t idx = *txidx; uint16_t csum_flags, mss, vlan_tag; int nsegs, i, error; csum_flags = 0; mss = 0; vlan_tag = 0; if ((sc->bge_flags & BGE_FLAG_SHORT_DMA_BUG) != 0 && m->m_next != NULL) { *m_head = bge_check_short_dma(m); if (*m_head == NULL) return (ENOBUFS); m = *m_head; } if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { *m_head = m = bge_setup_tso(sc, m, &mss, &csum_flags); if (*m_head == NULL) return (ENOBUFS); csum_flags |= BGE_TXBDFLAG_CPU_PRE_DMA | BGE_TXBDFLAG_CPU_POST_DMA; } else if ((m->m_pkthdr.csum_flags & sc->bge_csum_features) != 0) { if (m->m_pkthdr.csum_flags & CSUM_IP) csum_flags |= BGE_TXBDFLAG_IP_CSUM; if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) { csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; if (m->m_pkthdr.len < ETHER_MIN_NOPAD && (error = bge_cksum_pad(m)) != 0) { m_freem(m); *m_head = NULL; return (error); } } } if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0) { if (sc->bge_flags & BGE_FLAG_JUMBO_FRAME && m->m_pkthdr.len > ETHER_MAX_LEN) csum_flags |= BGE_TXBDFLAG_JUMBO_FRAME; if (sc->bge_forced_collapse > 0 && (sc->bge_flags & BGE_FLAG_PCIE) != 0 && m->m_next != NULL) { /* * Forcedly collapse mbuf chains to overcome hardware * limitation which only support a single outstanding * DMA read operation. */ if (sc->bge_forced_collapse == 1) m = m_defrag(m, M_NOWAIT); else m = m_collapse(m, M_NOWAIT, sc->bge_forced_collapse); if (m == NULL) m = *m_head; *m_head = m; } } map = sc->bge_cdata.bge_tx_dmamap[idx]; error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (error == EFBIG) { m = m_collapse(m, M_NOWAIT, BGE_NSEG_NEW); if (m == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } *m_head = m; error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (error) { m_freem(m); *m_head = NULL; return (error); } } else if (error != 0) return (error); /* Check if we have enough free send BDs. */ if (sc->bge_txcnt + nsegs >= BGE_TX_RING_CNT) { bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map); return (ENOBUFS); } bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE); if (m->m_flags & M_VLANTAG) { csum_flags |= BGE_TXBDFLAG_VLAN_TAG; vlan_tag = m->m_pkthdr.ether_vtag; } if (sc->bge_asicrev == BGE_ASICREV_BCM5762 && (m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { /* * 5725 family of devices corrupts TSO packets when TSO DMA * buffers cross into regions which are within MSS bytes of * a 4GB boundary. If we encounter the condition, drop the * packet. */ for (i = 0; ; i++) { d = &sc->bge_ldata.bge_tx_ring[idx]; d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr); d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr); d->bge_len = segs[i].ds_len; if (d->bge_addr.bge_addr_lo + segs[i].ds_len + mss < d->bge_addr.bge_addr_lo) break; d->bge_flags = csum_flags; d->bge_vlan_tag = vlan_tag; d->bge_mss = mss; if (i == nsegs - 1) break; BGE_INC(idx, BGE_TX_RING_CNT); } if (i != nsegs - 1) { bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map); m_freem(*m_head); *m_head = NULL; return (EIO); } } else { for (i = 0; ; i++) { d = &sc->bge_ldata.bge_tx_ring[idx]; d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr); d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr); d->bge_len = segs[i].ds_len; d->bge_flags = csum_flags; d->bge_vlan_tag = vlan_tag; d->bge_mss = mss; if (i == nsegs - 1) break; BGE_INC(idx, BGE_TX_RING_CNT); } } /* Mark the last segment as end of packet... */ d->bge_flags |= BGE_TXBDFLAG_END; /* * Insure that the map for this transmission * is placed at the array index of the last descriptor * in this chain. */ sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx]; sc->bge_cdata.bge_tx_dmamap[idx] = map; sc->bge_cdata.bge_tx_chain[idx] = m; sc->bge_txcnt += nsegs; BGE_INC(idx, BGE_TX_RING_CNT); *txidx = idx; return (0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit descriptors. */ static void bge_start_locked(if_t ifp) { struct bge_softc *sc; struct mbuf *m_head; uint32_t prodidx; int count; sc = if_getsoftc(ifp); BGE_LOCK_ASSERT(sc); if (!sc->bge_link || (if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return; prodidx = sc->bge_tx_prodidx; for (count = 0; !if_sendq_empty(ifp);) { if (sc->bge_txcnt > BGE_TX_RING_CNT - 16) { if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); break; } m_head = if_dequeue(ifp); if (m_head == NULL) break; /* * Pack the data into the transmit ring. If we * don't have room, set the OACTIVE flag and wait * for the NIC to drain the ring. */ if (bge_encap(sc, &m_head, &prodidx)) { if (m_head == NULL) break; if_sendq_prepend(ifp, m_head); if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); break; } ++count; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ if_bpfmtap(ifp, m_head); } if (count > 0) bge_start_tx(sc, prodidx); } static void bge_start_tx(struct bge_softc *sc, uint32_t prodidx) { bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_PREWRITE); /* Transmit. */ bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); /* 5700 b2 errata */ if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); sc->bge_tx_prodidx = prodidx; /* Set a timeout in case the chip goes out to lunch. */ sc->bge_timer = BGE_TX_TIMEOUT; } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit descriptors. */ static void bge_start(if_t ifp) { struct bge_softc *sc; sc = if_getsoftc(ifp); BGE_LOCK(sc); bge_start_locked(ifp); BGE_UNLOCK(sc); } static void bge_init_locked(struct bge_softc *sc) { if_t ifp; uint16_t *m; uint32_t mode; BGE_LOCK_ASSERT(sc); ifp = sc->bge_ifp; if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) return; /* Cancel pending I/O and flush buffers. */ bge_stop(sc); bge_stop_fw(sc); bge_sig_pre_reset(sc, BGE_RESET_START); bge_reset(sc); bge_sig_legacy(sc, BGE_RESET_START); bge_sig_post_reset(sc, BGE_RESET_START); bge_chipinit(sc); /* * Init the various state machines, ring * control blocks and firmware. */ if (bge_blockinit(sc)) { device_printf(sc->bge_dev, "initialization failure\n"); return; } ifp = sc->bge_ifp; /* Specify MTU. */ CSR_WRITE_4(sc, BGE_RX_MTU, if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN + (if_getcapenable(ifp) & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0)); /* Load our MAC address. */ m = (uint16_t *)IF_LLADDR(sc->bge_ifp); CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); /* Program promiscuous mode. */ bge_setpromisc(sc); /* Program multicast filter. */ bge_setmulti(sc); /* Program VLAN tag stripping. */ bge_setvlan(sc); /* Override UDP checksum offloading. */ if (sc->bge_forced_udpcsum == 0) sc->bge_csum_features &= ~CSUM_UDP; else sc->bge_csum_features |= CSUM_UDP; if (if_getcapabilities(ifp) & IFCAP_TXCSUM && if_getcapenable(ifp) & IFCAP_TXCSUM) { if_sethwassistbits(ifp, 0, (BGE_CSUM_FEATURES | CSUM_UDP)); if_sethwassistbits(ifp, sc->bge_csum_features, 0); } /* Init RX ring. */ if (bge_init_rx_ring_std(sc) != 0) { device_printf(sc->bge_dev, "no memory for std Rx buffers.\n"); bge_stop(sc); return; } /* * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's * memory to insure that the chip has in fact read the first * entry of the ring. */ if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) { uint32_t v, i; for (i = 0; i < 10; i++) { DELAY(20); v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8); if (v == (MCLBYTES - ETHER_ALIGN)) break; } if (i == 10) device_printf (sc->bge_dev, "5705 A0 chip failed to load RX ring\n"); } /* Init jumbo RX ring. */ if (BGE_IS_JUMBO_CAPABLE(sc) && if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN)) { if (bge_init_rx_ring_jumbo(sc) != 0) { device_printf(sc->bge_dev, "no memory for jumbo Rx buffers.\n"); bge_stop(sc); return; } } /* Init our RX return ring index. */ sc->bge_rx_saved_considx = 0; /* Init our RX/TX stat counters. */ sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0; /* Init TX ring. */ bge_init_tx_ring(sc); /* Enable TX MAC state machine lockup fix. */ mode = CSR_READ_4(sc, BGE_TX_MODE); if (BGE_IS_5755_PLUS(sc) || sc->bge_asicrev == BGE_ASICREV_BCM5906) mode |= BGE_TXMODE_MBUF_LOCKUP_FIX; if (sc->bge_asicrev == BGE_ASICREV_BCM5720 || sc->bge_asicrev == BGE_ASICREV_BCM5762) { mode &= ~(BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); mode |= CSR_READ_4(sc, BGE_TX_MODE) & (BGE_TXMODE_JMB_FRM_LEN | BGE_TXMODE_CNT_DN_MODE); } /* Turn on transmitter. */ CSR_WRITE_4(sc, BGE_TX_MODE, mode | BGE_TXMODE_ENABLE); DELAY(100); /* Turn on receiver. */ mode = CSR_READ_4(sc, BGE_RX_MODE); if (BGE_IS_5755_PLUS(sc)) mode |= BGE_RXMODE_IPV6_ENABLE; if (sc->bge_asicrev == BGE_ASICREV_BCM5762) mode |= BGE_RXMODE_IPV4_FRAG_FIX; CSR_WRITE_4(sc,BGE_RX_MODE, mode | BGE_RXMODE_ENABLE); DELAY(10); /* * Set the number of good frames to receive after RX MBUF * Low Watermark has been reached. After the RX MAC receives * this number of frames, it will drop subsequent incoming * frames until the MBUF High Watermark is reached. */ if (BGE_IS_57765_PLUS(sc)) CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 1); else CSR_WRITE_4(sc, BGE_MAX_RX_FRAME_LOWAT, 2); /* Clear MAC statistics. */ if (BGE_IS_5705_PLUS(sc)) bge_stats_clear_regs(sc); /* Tell firmware we're alive. */ BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); #ifdef DEVICE_POLLING /* Disable interrupts if we are polling. */ if (if_getcapenable(ifp) & IFCAP_POLLING) { BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); } else #endif /* Enable host interrupts. */ { BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); } if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); bge_ifmedia_upd_locked(ifp); callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc); } static void bge_init(void *xsc) { struct bge_softc *sc = xsc; BGE_LOCK(sc); bge_init_locked(sc); BGE_UNLOCK(sc); } /* * Set media options. */ static int bge_ifmedia_upd(if_t ifp) { struct bge_softc *sc = if_getsoftc(ifp); int res; BGE_LOCK(sc); res = bge_ifmedia_upd_locked(ifp); BGE_UNLOCK(sc); return (res); } static int bge_ifmedia_upd_locked(if_t ifp) { struct bge_softc *sc = if_getsoftc(ifp); struct mii_data *mii; struct mii_softc *miisc; struct ifmedia *ifm; BGE_LOCK_ASSERT(sc); ifm = &sc->bge_ifmedia; /* If this is a 1000baseX NIC, enable the TBI port. */ if (sc->bge_flags & BGE_FLAG_TBI) { if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); switch(IFM_SUBTYPE(ifm->ifm_media)) { case IFM_AUTO: /* * The BCM5704 ASIC appears to have a special * mechanism for programming the autoneg * advertisement registers in TBI mode. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { uint32_t sgdig; sgdig = CSR_READ_4(sc, BGE_SGDIG_STS); if (sgdig & BGE_SGDIGSTS_DONE) { CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0); sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG); sgdig |= BGE_SGDIGCFG_AUTO | BGE_SGDIGCFG_PAUSE_CAP | BGE_SGDIGCFG_ASYM_PAUSE; CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig | BGE_SGDIGCFG_SEND); DELAY(5); CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig); } } break; case IFM_1000_SX: if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); } else { BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); } DELAY(40); break; default: return (EINVAL); } return (0); } sc->bge_link_evt++; mii = device_get_softc(sc->bge_miibus); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); mii_mediachg(mii); /* * Force an interrupt so that we will call bge_link_upd * if needed and clear any pending link state attention. * Without this we are not getting any further interrupts * for link state changes and thus will not UP the link and * not be able to send in bge_start_locked. The only * way to get things working was to receive a packet and * get an RX intr. * bge_tick should help for fiber cards and we might not * need to do this here if BGE_FLAG_TBI is set but as * we poll for fiber anyway it should not harm. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || sc->bge_flags & BGE_FLAG_5788) BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); else BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); return (0); } /* * Report current media status. */ static void bge_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) { struct bge_softc *sc = if_getsoftc(ifp); struct mii_data *mii; BGE_LOCK(sc); if ((if_getflags(ifp) & IFF_UP) == 0) { BGE_UNLOCK(sc); return; } if (sc->bge_flags & BGE_FLAG_TBI) { ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_TBI_PCS_SYNCHED) ifmr->ifm_status |= IFM_ACTIVE; else { ifmr->ifm_active |= IFM_NONE; BGE_UNLOCK(sc); return; } ifmr->ifm_active |= IFM_1000_SX; if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) ifmr->ifm_active |= IFM_HDX; else ifmr->ifm_active |= IFM_FDX; BGE_UNLOCK(sc); return; } mii = device_get_softc(sc->bge_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; BGE_UNLOCK(sc); } static int bge_ioctl(if_t ifp, u_long command, caddr_t data) { struct bge_softc *sc = if_getsoftc(ifp); struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii; int flags, mask, error = 0; switch (command) { case SIOCSIFMTU: if (BGE_IS_JUMBO_CAPABLE(sc) || (sc->bge_flags & BGE_FLAG_JUMBO_STD)) { if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > BGE_JUMBO_MTU) { error = EINVAL; break; } } else if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU) { error = EINVAL; break; } BGE_LOCK(sc); if (if_getmtu(ifp) != ifr->ifr_mtu) { if_setmtu(ifp, ifr->ifr_mtu); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); bge_init_locked(sc); } } BGE_UNLOCK(sc); break; case SIOCSIFFLAGS: BGE_LOCK(sc); if (if_getflags(ifp) & IFF_UP) { /* * If only the state of the PROMISC flag changed, * then just use the 'set promisc mode' command * instead of reinitializing the entire NIC. Doing * a full re-init means reloading the firmware and * waiting for it to start up, which may take a * second or two. Similarly for ALLMULTI. */ if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { flags = if_getflags(ifp) ^ sc->bge_if_flags; if (flags & IFF_PROMISC) bge_setpromisc(sc); if (flags & IFF_ALLMULTI) bge_setmulti(sc); } else bge_init_locked(sc); } else { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { bge_stop(sc); } } sc->bge_if_flags = if_getflags(ifp); BGE_UNLOCK(sc); error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { BGE_LOCK(sc); bge_setmulti(sc); BGE_UNLOCK(sc); error = 0; } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: if (sc->bge_flags & BGE_FLAG_TBI) { error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia, command); } else { mii = device_get_softc(sc->bge_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); } break; case SIOCSIFCAP: mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); #ifdef DEVICE_POLLING if (mask & IFCAP_POLLING) { if (ifr->ifr_reqcap & IFCAP_POLLING) { error = ether_poll_register(bge_poll, ifp); if (error) return (error); BGE_LOCK(sc); BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); if_setcapenablebit(ifp, IFCAP_POLLING, 0); BGE_UNLOCK(sc); } else { error = ether_poll_deregister(ifp); /* Enable interrupt even in error case */ BGE_LOCK(sc); BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); if_setcapenablebit(ifp, 0, IFCAP_POLLING); BGE_UNLOCK(sc); } } #endif if ((mask & IFCAP_TXCSUM) != 0 && (if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) { if_togglecapenable(ifp, IFCAP_TXCSUM); if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) if_sethwassistbits(ifp, sc->bge_csum_features, 0); else if_sethwassistbits(ifp, 0, sc->bge_csum_features); } if ((mask & IFCAP_RXCSUM) != 0 && (if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0) if_togglecapenable(ifp, IFCAP_RXCSUM); if ((mask & IFCAP_TSO4) != 0 && (if_getcapabilities(ifp) & IFCAP_TSO4) != 0) { if_togglecapenable(ifp, IFCAP_TSO4); if ((if_getcapenable(ifp) & IFCAP_TSO4) != 0) if_sethwassistbits(ifp, CSUM_TSO, 0); else if_sethwassistbits(ifp, 0, CSUM_TSO); } if (mask & IFCAP_VLAN_MTU) { if_togglecapenable(ifp, IFCAP_VLAN_MTU); if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); bge_init(sc); } if ((mask & IFCAP_VLAN_HWTSO) != 0 && (if_getcapabilities(ifp) & IFCAP_VLAN_HWTSO) != 0) if_togglecapenable(ifp, IFCAP_VLAN_HWTSO); if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && (if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0) { if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING); if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0) if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWTSO); BGE_LOCK(sc); bge_setvlan(sc); BGE_UNLOCK(sc); } #ifdef VLAN_CAPABILITIES if_vlancap(ifp); #endif break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } static void bge_watchdog(struct bge_softc *sc) { if_t ifp; uint32_t status; BGE_LOCK_ASSERT(sc); if (sc->bge_timer == 0 || --sc->bge_timer) return; /* If pause frames are active then don't reset the hardware. */ if ((CSR_READ_4(sc, BGE_RX_MODE) & BGE_RXMODE_FLOWCTL_ENABLE) != 0) { status = CSR_READ_4(sc, BGE_RX_STS); if ((status & BGE_RXSTAT_REMOTE_XOFFED) != 0) { /* * If link partner has us in XOFF state then wait for * the condition to clear. */ CSR_WRITE_4(sc, BGE_RX_STS, status); sc->bge_timer = BGE_TX_TIMEOUT; return; } else if ((status & BGE_RXSTAT_RCVD_XOFF) != 0 && (status & BGE_RXSTAT_RCVD_XON) != 0) { /* * If link partner has us in XOFF state then wait for * the condition to clear. */ CSR_WRITE_4(sc, BGE_RX_STS, status); sc->bge_timer = BGE_TX_TIMEOUT; return; } /* * Any other condition is unexpected and the controller * should be reset. */ } ifp = sc->bge_ifp; if_printf(ifp, "watchdog timeout -- resetting\n"); if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); bge_init_locked(sc); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } static void bge_stop_block(struct bge_softc *sc, bus_size_t reg, uint32_t bit) { int i; BGE_CLRBIT(sc, reg, bit); for (i = 0; i < BGE_TIMEOUT; i++) { if ((CSR_READ_4(sc, reg) & bit) == 0) return; DELAY(100); } } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void bge_stop(struct bge_softc *sc) { if_t ifp; BGE_LOCK_ASSERT(sc); ifp = sc->bge_ifp; callout_stop(&sc->bge_stat_ch); /* Disable host interrupts. */ BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); /* * Tell firmware we're shutting down. */ bge_stop_fw(sc); bge_sig_pre_reset(sc, BGE_RESET_SHUTDOWN); /* * Disable all of the receiver blocks. */ bge_stop_block(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); bge_stop_block(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); bge_stop_block(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); if (BGE_IS_5700_FAMILY(sc)) bge_stop_block(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); bge_stop_block(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); bge_stop_block(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); bge_stop_block(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); /* * Disable all of the transmit blocks. */ bge_stop_block(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); bge_stop_block(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); bge_stop_block(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); bge_stop_block(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); bge_stop_block(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); if (BGE_IS_5700_FAMILY(sc)) bge_stop_block(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); bge_stop_block(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); /* * Shut down all of the memory managers and related * state machines. */ bge_stop_block(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); bge_stop_block(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); if (BGE_IS_5700_FAMILY(sc)) bge_stop_block(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); if (!(BGE_IS_5705_PLUS(sc))) { BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); } /* Update MAC statistics. */ if (BGE_IS_5705_PLUS(sc)) bge_stats_update_regs(sc); bge_reset(sc); bge_sig_legacy(sc, BGE_RESET_SHUTDOWN); bge_sig_post_reset(sc, BGE_RESET_SHUTDOWN); /* * Keep the ASF firmware running if up. */ if (sc->bge_asf_mode & ASF_STACKUP) BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); else BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); /* Free the RX lists. */ bge_free_rx_ring_std(sc); /* Free jumbo RX list. */ if (BGE_IS_JUMBO_CAPABLE(sc)) bge_free_rx_ring_jumbo(sc); /* Free TX buffers. */ bge_free_tx_ring(sc); sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; /* Clear MAC's link state (PHY may still have link UP). */ if (bootverbose && sc->bge_link) if_printf(sc->bge_ifp, "link DOWN\n"); sc->bge_link = 0; if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)); } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static int bge_shutdown(device_t dev) { struct bge_softc *sc; sc = device_get_softc(dev); BGE_LOCK(sc); bge_stop(sc); BGE_UNLOCK(sc); return (0); } static int bge_suspend(device_t dev) { struct bge_softc *sc; sc = device_get_softc(dev); BGE_LOCK(sc); bge_stop(sc); BGE_UNLOCK(sc); return (0); } static int bge_resume(device_t dev) { struct bge_softc *sc; if_t ifp; sc = device_get_softc(dev); BGE_LOCK(sc); ifp = sc->bge_ifp; if (if_getflags(ifp) & IFF_UP) { bge_init_locked(sc); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) bge_start_locked(ifp); } BGE_UNLOCK(sc); return (0); } static void bge_link_upd(struct bge_softc *sc) { struct mii_data *mii; uint32_t link, status; BGE_LOCK_ASSERT(sc); /* Clear 'pending link event' flag. */ sc->bge_link_evt = 0; /* * Process link state changes. * Grrr. The link status word in the status block does * not work correctly on the BCM5700 rev AX and BX chips, * according to all available information. Hence, we have * to enable MII interrupts in order to properly obtain * async link changes. Unfortunately, this also means that * we have to read the MAC status register to detect link * changes, thereby adding an additional register access to * the interrupt handler. * * XXX: perhaps link state detection procedure used for * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && sc->bge_chipid != BGE_CHIPID_BCM5700_B2) { status = CSR_READ_4(sc, BGE_MAC_STS); if (status & BGE_MACSTAT_MI_INTERRUPT) { mii = device_get_softc(sc->bge_miibus); mii_pollstat(mii); if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { sc->bge_link++; if (bootverbose) if_printf(sc->bge_ifp, "link UP\n"); } else if (sc->bge_link && (!(mii->mii_media_status & IFM_ACTIVE) || IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { sc->bge_link = 0; if (bootverbose) if_printf(sc->bge_ifp, "link DOWN\n"); } /* Clear the interrupt. */ CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_MI_INTERRUPT); bge_miibus_readreg(sc->bge_dev, sc->bge_phy_addr, BRGPHY_MII_ISR); bge_miibus_writereg(sc->bge_dev, sc->bge_phy_addr, BRGPHY_MII_IMR, BRGPHY_INTRS); } return; } if (sc->bge_flags & BGE_FLAG_TBI) { status = CSR_READ_4(sc, BGE_MAC_STS); if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) { if (!sc->bge_link) { sc->bge_link++; if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_TBI_SEND_CFGS); DELAY(40); } CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); if (bootverbose) if_printf(sc->bge_ifp, "link UP\n"); if_link_state_change(sc->bge_ifp, LINK_STATE_UP); } } else if (sc->bge_link) { sc->bge_link = 0; if (bootverbose) if_printf(sc->bge_ifp, "link DOWN\n"); if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN); } } else if ((sc->bge_mi_mode & BGE_MIMODE_AUTOPOLL) != 0) { /* * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit * in status word always set. Workaround this bug by reading * PHY link status directly. */ link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0; if (link != sc->bge_link || sc->bge_asicrev == BGE_ASICREV_BCM5700) { mii = device_get_softc(sc->bge_miibus); mii_pollstat(mii); if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { sc->bge_link++; if (bootverbose) if_printf(sc->bge_ifp, "link UP\n"); } else if (sc->bge_link && (!(mii->mii_media_status & IFM_ACTIVE) || IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { sc->bge_link = 0; if (bootverbose) if_printf(sc->bge_ifp, "link DOWN\n"); } } } else { /* * For controllers that call mii_tick, we have to poll * link status. */ mii = device_get_softc(sc->bge_miibus); mii_pollstat(mii); bge_miibus_statchg(sc->bge_dev); } /* Disable MAC attention when link is up. */ CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | BGE_MACSTAT_LINK_CHANGED); } static void bge_add_sysctls(struct bge_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; int unit; ctx = device_get_sysctl_ctx(sc->bge_dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev)); #ifdef BGE_REGISTER_DEBUG SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info", CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I", "Debug Information"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read", CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I", "MAC Register Read"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ape_read", CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_ape_read, "I", "APE Register Read"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read", CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I", "Memory Read"); #endif unit = device_get_unit(sc->bge_dev); /* * A common design characteristic for many Broadcom client controllers * is that they only support a single outstanding DMA read operation * on the PCIe bus. This means that it will take twice as long to fetch * a TX frame that is split into header and payload buffers as it does * to fetch a single, contiguous TX frame (2 reads vs. 1 read). For * these controllers, coalescing buffers to reduce the number of memory * reads is effective way to get maximum performance(about 940Mbps). * Without collapsing TX buffers the maximum TCP bulk transfer * performance is about 850Mbps. However forcing coalescing mbufs * consumes a lot of CPU cycles, so leave it off by default. */ sc->bge_forced_collapse = 0; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_collapse", CTLFLAG_RWTUN, &sc->bge_forced_collapse, 0, "Number of fragmented TX buffers of a frame allowed before " "forced collapsing"); sc->bge_msi = 1; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "msi", CTLFLAG_RDTUN, &sc->bge_msi, 0, "Enable MSI"); /* * It seems all Broadcom controllers have a bug that can generate UDP * datagrams with checksum value 0 when TX UDP checksum offloading is * enabled. Generating UDP checksum value 0 is RFC 768 violation. * Even though the probability of generating such UDP datagrams is * low, I don't want to see FreeBSD boxes to inject such datagrams * into network so disable UDP checksum offloading by default. Users * still override this behavior by setting a sysctl variable, * dev.bge.0.forced_udpcsum. */ sc->bge_forced_udpcsum = 0; SYSCTL_ADD_INT(ctx, children, OID_AUTO, "forced_udpcsum", CTLFLAG_RWTUN, &sc->bge_forced_udpcsum, 0, "Enable UDP checksum offloading even if controller can " "generate UDP checksum value 0"); if (BGE_IS_5705_PLUS(sc)) bge_add_sysctl_stats_regs(sc, ctx, children); else bge_add_sysctl_stats(sc, ctx, children); } #define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \ SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \ sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \ desc) static void bge_add_sysctl_stats(struct bge_softc *sc, struct sysctl_ctx_list *ctx, struct sysctl_oid_list *parent) { struct sysctl_oid *tree; struct sysctl_oid_list *children, *schildren; tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD, NULL, "BGE Statistics"); schildren = children = SYSCTL_CHILDREN(tree); BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters", children, COSFramesDroppedDueToFilters, "FramesDroppedDueToFilters"); BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full", children, nicDmaWriteQueueFull, "DmaWriteQueueFull"); BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full", children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull"); BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors", children, nicNoMoreRxBDs, "NoMoreRxBDs"); BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames", children, ifInDiscards, "InputDiscards"); BGE_SYSCTL_STAT(sc, ctx, "Input Errors", children, ifInErrors, "InputErrors"); BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit", children, nicRecvThresholdHit, "RecvThresholdHit"); BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full", children, nicDmaReadQueueFull, "DmaReadQueueFull"); BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full", children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull"); BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full", children, nicSendDataCompQueueFull, "SendDataCompQueueFull"); BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index", children, nicRingSetSendProdIndex, "RingSetSendProdIndex"); BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update", children, nicRingStatusUpdate, "RingStatusUpdate"); BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts", children, nicInterrupts, "Interrupts"); BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts", children, nicAvoidedInterrupts, "AvoidedInterrupts"); BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit", children, nicSendThresholdHit, "SendThresholdHit"); tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD, NULL, "BGE RX Statistics"); children = SYSCTL_CHILDREN(tree); BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets", children, rxstats.ifHCInOctets, "ifHCInOctets"); BGE_SYSCTL_STAT(sc, ctx, "Fragments", children, rxstats.etherStatsFragments, "Fragments"); BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets", children, rxstats.ifHCInUcastPkts, "UnicastPkts"); BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets", children, rxstats.ifHCInMulticastPkts, "MulticastPkts"); BGE_SYSCTL_STAT(sc, ctx, "FCS Errors", children, rxstats.dot3StatsFCSErrors, "FCSErrors"); BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors", children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors"); BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received", children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived"); BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received", children, rxstats.xoffPauseFramesReceived, "xoffPauseFramesReceived"); BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received", children, rxstats.macControlFramesReceived, "ControlFramesReceived"); BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered", children, rxstats.xoffStateEntered, "xoffStateEntered"); BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long", children, rxstats.dot3StatsFramesTooLong, "FramesTooLong"); BGE_SYSCTL_STAT(sc, ctx, "Jabbers", children, rxstats.etherStatsJabbers, "Jabbers"); BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets", children, rxstats.etherStatsUndersizePkts, "UndersizePkts"); BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors", children, rxstats.inRangeLengthError, "inRangeLengthError"); BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors", children, rxstats.outRangeLengthError, "outRangeLengthError"); tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD, NULL, "BGE TX Statistics"); children = SYSCTL_CHILDREN(tree); BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets", children, txstats.ifHCOutOctets, "ifHCOutOctets"); BGE_SYSCTL_STAT(sc, ctx, "TX Collisions", children, txstats.etherStatsCollisions, "Collisions"); BGE_SYSCTL_STAT(sc, ctx, "XON Sent", children, txstats.outXonSent, "XonSent"); BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent", children, txstats.outXoffSent, "XoffSent"); BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done", children, txstats.flowControlDone, "flowControlDone"); BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors", children, txstats.dot3StatsInternalMacTransmitErrors, "InternalMacTransmitErrors"); BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames", children, txstats.dot3StatsSingleCollisionFrames, "SingleCollisionFrames"); BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames", children, txstats.dot3StatsMultipleCollisionFrames, "MultipleCollisionFrames"); BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions", children, txstats.dot3StatsDeferredTransmissions, "DeferredTransmissions"); BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions", children, txstats.dot3StatsExcessiveCollisions, "ExcessiveCollisions"); BGE_SYSCTL_STAT(sc, ctx, "Late Collisions", children, txstats.dot3StatsLateCollisions, "LateCollisions"); BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets", children, txstats.ifHCOutUcastPkts, "UnicastPkts"); BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets", children, txstats.ifHCOutMulticastPkts, "MulticastPkts"); BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets", children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts"); BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors", children, txstats.dot3StatsCarrierSenseErrors, "CarrierSenseErrors"); BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards", children, txstats.ifOutDiscards, "Discards"); BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors", children, txstats.ifOutErrors, "Errors"); } #undef BGE_SYSCTL_STAT #define BGE_SYSCTL_STAT_ADD64(c, h, n, p, d) \ SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d) static void bge_add_sysctl_stats_regs(struct bge_softc *sc, struct sysctl_ctx_list *ctx, struct sysctl_oid_list *parent) { struct sysctl_oid *tree; struct sysctl_oid_list *child, *schild; struct bge_mac_stats *stats; stats = &sc->bge_mac_stats; tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD, NULL, "BGE Statistics"); schild = child = SYSCTL_CHILDREN(tree); BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesDroppedDueToFilters", &stats->FramesDroppedDueToFilters, "Frames Dropped Due to Filters"); BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteQueueFull", &stats->DmaWriteQueueFull, "NIC DMA Write Queue Full"); BGE_SYSCTL_STAT_ADD64(ctx, child, "DmaWriteHighPriQueueFull", &stats->DmaWriteHighPriQueueFull, "NIC DMA Write High Priority Queue Full"); BGE_SYSCTL_STAT_ADD64(ctx, child, "NoMoreRxBDs", &stats->NoMoreRxBDs, "NIC No More RX Buffer Descriptors"); BGE_SYSCTL_STAT_ADD64(ctx, child, "InputDiscards", &stats->InputDiscards, "Discarded Input Frames"); BGE_SYSCTL_STAT_ADD64(ctx, child, "InputErrors", &stats->InputErrors, "Input Errors"); BGE_SYSCTL_STAT_ADD64(ctx, child, "RecvThresholdHit", &stats->RecvThresholdHit, "NIC Recv Threshold Hit"); tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "rx", CTLFLAG_RD, NULL, "BGE RX Statistics"); child = SYSCTL_CHILDREN(tree); BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCInOctets", &stats->ifHCInOctets, "Inbound Octets"); BGE_SYSCTL_STAT_ADD64(ctx, child, "Fragments", &stats->etherStatsFragments, "Fragments"); BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts", &stats->ifHCInUcastPkts, "Inbound Unicast Packets"); BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts", &stats->ifHCInMulticastPkts, "Inbound Multicast Packets"); BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts", &stats->ifHCInBroadcastPkts, "Inbound Broadcast Packets"); BGE_SYSCTL_STAT_ADD64(ctx, child, "FCSErrors", &stats->dot3StatsFCSErrors, "FCS Errors"); BGE_SYSCTL_STAT_ADD64(ctx, child, "AlignmentErrors", &stats->dot3StatsAlignmentErrors, "Alignment Errors"); BGE_SYSCTL_STAT_ADD64(ctx, child, "xonPauseFramesReceived", &stats->xonPauseFramesReceived, "XON Pause Frames Received"); BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffPauseFramesReceived", &stats->xoffPauseFramesReceived, "XOFF Pause Frames Received"); BGE_SYSCTL_STAT_ADD64(ctx, child, "ControlFramesReceived", &stats->macControlFramesReceived, "MAC Control Frames Received"); BGE_SYSCTL_STAT_ADD64(ctx, child, "xoffStateEntered", &stats->xoffStateEntered, "XOFF State Entered"); BGE_SYSCTL_STAT_ADD64(ctx, child, "FramesTooLong", &stats->dot3StatsFramesTooLong, "Frames Too Long"); BGE_SYSCTL_STAT_ADD64(ctx, child, "Jabbers", &stats->etherStatsJabbers, "Jabbers"); BGE_SYSCTL_STAT_ADD64(ctx, child, "UndersizePkts", &stats->etherStatsUndersizePkts, "Undersized Packets"); tree = SYSCTL_ADD_NODE(ctx, schild, OID_AUTO, "tx", CTLFLAG_RD, NULL, "BGE TX Statistics"); child = SYSCTL_CHILDREN(tree); BGE_SYSCTL_STAT_ADD64(ctx, child, "ifHCOutOctets", &stats->ifHCOutOctets, "Outbound Octets"); BGE_SYSCTL_STAT_ADD64(ctx, child, "Collisions", &stats->etherStatsCollisions, "TX Collisions"); BGE_SYSCTL_STAT_ADD64(ctx, child, "XonSent", &stats->outXonSent, "XON Sent"); BGE_SYSCTL_STAT_ADD64(ctx, child, "XoffSent", &stats->outXoffSent, "XOFF Sent"); BGE_SYSCTL_STAT_ADD64(ctx, child, "InternalMacTransmitErrors", &stats->dot3StatsInternalMacTransmitErrors, "Internal MAC TX Errors"); BGE_SYSCTL_STAT_ADD64(ctx, child, "SingleCollisionFrames", &stats->dot3StatsSingleCollisionFrames, "Single Collision Frames"); BGE_SYSCTL_STAT_ADD64(ctx, child, "MultipleCollisionFrames", &stats->dot3StatsMultipleCollisionFrames, "Multiple Collision Frames"); BGE_SYSCTL_STAT_ADD64(ctx, child, "DeferredTransmissions", &stats->dot3StatsDeferredTransmissions, "Deferred Transmissions"); BGE_SYSCTL_STAT_ADD64(ctx, child, "ExcessiveCollisions", &stats->dot3StatsExcessiveCollisions, "Excessive Collisions"); BGE_SYSCTL_STAT_ADD64(ctx, child, "LateCollisions", &stats->dot3StatsLateCollisions, "Late Collisions"); BGE_SYSCTL_STAT_ADD64(ctx, child, "UnicastPkts", &stats->ifHCOutUcastPkts, "Outbound Unicast Packets"); BGE_SYSCTL_STAT_ADD64(ctx, child, "MulticastPkts", &stats->ifHCOutMulticastPkts, "Outbound Multicast Packets"); BGE_SYSCTL_STAT_ADD64(ctx, child, "BroadcastPkts", &stats->ifHCOutBroadcastPkts, "Outbound Broadcast Packets"); } #undef BGE_SYSCTL_STAT_ADD64 static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS) { struct bge_softc *sc; uint32_t result; int offset; sc = (struct bge_softc *)arg1; offset = arg2; result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset + offsetof(bge_hostaddr, bge_addr_lo)); return (sysctl_handle_int(oidp, &result, 0, req)); } #ifdef BGE_REGISTER_DEBUG static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS) { struct bge_softc *sc; uint16_t *sbdata; int error, result, sbsz; int i, j; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || (req->newptr == NULL)) return (error); if (result == 1) { sc = (struct bge_softc *)arg1; if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && sc->bge_chipid != BGE_CHIPID_BCM5700_C0) sbsz = BGE_STATUS_BLK_SZ; else sbsz = 32; sbdata = (uint16_t *)sc->bge_ldata.bge_status_block; printf("Status Block:\n"); BGE_LOCK(sc); bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); for (i = 0x0; i < sbsz / sizeof(uint16_t); ) { printf("%06x:", i); for (j = 0; j < 8; j++) printf(" %04x", sbdata[i++]); printf("\n"); } printf("Registers:\n"); for (i = 0x800; i < 0xA00; ) { printf("%06x:", i); for (j = 0; j < 8; j++) { printf(" %08x", CSR_READ_4(sc, i)); i += 4; } printf("\n"); } BGE_UNLOCK(sc); printf("Hardware Flags:\n"); if (BGE_IS_5717_PLUS(sc)) printf(" - 5717 Plus\n"); if (BGE_IS_5755_PLUS(sc)) printf(" - 5755 Plus\n"); if (BGE_IS_575X_PLUS(sc)) printf(" - 575X Plus\n"); if (BGE_IS_5705_PLUS(sc)) printf(" - 5705 Plus\n"); if (BGE_IS_5714_FAMILY(sc)) printf(" - 5714 Family\n"); if (BGE_IS_5700_FAMILY(sc)) printf(" - 5700 Family\n"); if (sc->bge_flags & BGE_FLAG_JUMBO) printf(" - Supports Jumbo Frames\n"); if (sc->bge_flags & BGE_FLAG_PCIX) printf(" - PCI-X Bus\n"); if (sc->bge_flags & BGE_FLAG_PCIE) printf(" - PCI Express Bus\n"); if (sc->bge_phy_flags & BGE_PHY_NO_3LED) printf(" - No 3 LEDs\n"); if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) printf(" - RX Alignment Bug\n"); } return (error); } static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS) { struct bge_softc *sc; int error; uint16_t result; uint32_t val; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || (req->newptr == NULL)) return (error); if (result < 0x8000) { sc = (struct bge_softc *)arg1; val = CSR_READ_4(sc, result); printf("reg 0x%06X = 0x%08X\n", result, val); } return (error); } static int bge_sysctl_ape_read(SYSCTL_HANDLER_ARGS) { struct bge_softc *sc; int error; uint16_t result; uint32_t val; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || (req->newptr == NULL)) return (error); if (result < 0x8000) { sc = (struct bge_softc *)arg1; val = APE_READ_4(sc, result); printf("reg 0x%06X = 0x%08X\n", result, val); } return (error); } static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS) { struct bge_softc *sc; int error; uint16_t result; uint32_t val; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || (req->newptr == NULL)) return (error); if (result < 0x8000) { sc = (struct bge_softc *)arg1; val = bge_readmem_ind(sc, result); printf("mem 0x%06X = 0x%08X\n", result, val); } return (error); } #endif static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]) { return (1); } static int bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[]) { uint32_t mac_addr; mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_HIGH_MB); if ((mac_addr >> 16) == 0x484b) { ether_addr[0] = (uint8_t)(mac_addr >> 8); ether_addr[1] = (uint8_t)mac_addr; mac_addr = bge_readmem_ind(sc, BGE_SRAM_MAC_ADDR_LOW_MB); ether_addr[2] = (uint8_t)(mac_addr >> 24); ether_addr[3] = (uint8_t)(mac_addr >> 16); ether_addr[4] = (uint8_t)(mac_addr >> 8); ether_addr[5] = (uint8_t)mac_addr; return (0); } return (1); } static int bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[]) { int mac_offset = BGE_EE_MAC_OFFSET; if (sc->bge_asicrev == BGE_ASICREV_BCM5906) mac_offset = BGE_EE_MAC_OFFSET_5906; return (bge_read_nvram(sc, ether_addr, mac_offset + 2, ETHER_ADDR_LEN)); } static int bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[]) { if (sc->bge_asicrev == BGE_ASICREV_BCM5906) return (1); return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)); } static int bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[]) { static const bge_eaddr_fcn_t bge_eaddr_funcs[] = { /* NOTE: Order is critical */ bge_get_eaddr_fw, bge_get_eaddr_mem, bge_get_eaddr_nvram, bge_get_eaddr_eeprom, NULL }; const bge_eaddr_fcn_t *func; for (func = bge_eaddr_funcs; *func != NULL; ++func) { if ((*func)(sc, eaddr) == 0) break; } return (*func == NULL ? ENXIO : 0); } static uint64_t bge_get_counter(if_t ifp, ift_counter cnt) { struct bge_softc *sc; struct bge_mac_stats *stats; sc = if_getsoftc(ifp); if (!BGE_IS_5705_PLUS(sc)) return (if_get_counter_default(ifp, cnt)); stats = &sc->bge_mac_stats; switch (cnt) { case IFCOUNTER_IERRORS: return (stats->NoMoreRxBDs + stats->InputDiscards + stats->InputErrors); case IFCOUNTER_COLLISIONS: return (stats->etherStatsCollisions); default: return (if_get_counter_default(ifp, cnt)); } } #ifdef DEBUGNET static void bge_debugnet_init(if_t ifp, int *nrxr, int *ncl, int *clsize) { struct bge_softc *sc; sc = if_getsoftc(ifp); BGE_LOCK(sc); *nrxr = sc->bge_return_ring_cnt; *ncl = DEBUGNET_MAX_IN_FLIGHT; if ((sc->bge_flags & BGE_FLAG_JUMBO_STD) != 0 && (if_getmtu(sc->bge_ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN))) *clsize = MJUM9BYTES; else *clsize = MCLBYTES; BGE_UNLOCK(sc); } static void bge_debugnet_event(if_t ifp __unused, enum debugnet_ev event __unused) { } static int bge_debugnet_transmit(if_t ifp, struct mbuf *m) { struct bge_softc *sc; uint32_t prodidx; int error; sc = if_getsoftc(ifp); if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return (1); prodidx = sc->bge_tx_prodidx; error = bge_encap(sc, &m, &prodidx); if (error == 0) bge_start_tx(sc, prodidx); return (error); } static int bge_debugnet_poll(if_t ifp, int count) { struct bge_softc *sc; uint32_t rx_prod, tx_cons; sc = if_getsoftc(ifp); if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return (1); bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); rx_prod = sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx; tx_cons = sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx; bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); (void)bge_rxeof(sc, rx_prod, 0); bge_txeof(sc, tx_cons); return (0); } #endif /* DEBUGNET */ diff --git a/sys/dev/bwn/if_bwn.c b/sys/dev/bwn/if_bwn.c index 8f22de453898..6fefaf4ccd89 100644 --- a/sys/dev/bwn/if_bwn.c +++ b/sys/dev/bwn/if_bwn.c @@ -1,7750 +1,7750 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009-2010 Weongyo Jeong * Copyright (c) 2016 Landon Fuller * Copyright (c) 2017 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Landon Fuller * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); /* * The Broadcom Wireless LAN controller driver. */ #include "opt_bwn.h" #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "bhnd_nvram_map.h" #include "gpio_if.h" static SYSCTL_NODE(_hw, OID_AUTO, bwn, CTLFLAG_RD, 0, "Broadcom driver parameters"); /* * Tunable & sysctl variables. */ #ifdef BWN_DEBUG static int bwn_debug = 0; SYSCTL_INT(_hw_bwn, OID_AUTO, debug, CTLFLAG_RWTUN, &bwn_debug, 0, "Broadcom debugging printfs"); #endif static int bwn_bfp = 0; /* use "Bad Frames Preemption" */ SYSCTL_INT(_hw_bwn, OID_AUTO, bfp, CTLFLAG_RW, &bwn_bfp, 0, "uses Bad Frames Preemption"); static int bwn_bluetooth = 1; SYSCTL_INT(_hw_bwn, OID_AUTO, bluetooth, CTLFLAG_RW, &bwn_bluetooth, 0, "turns on Bluetooth Coexistence"); static int bwn_hwpctl = 0; SYSCTL_INT(_hw_bwn, OID_AUTO, hwpctl, CTLFLAG_RW, &bwn_hwpctl, 0, "uses H/W power control"); static int bwn_usedma = 1; SYSCTL_INT(_hw_bwn, OID_AUTO, usedma, CTLFLAG_RD, &bwn_usedma, 0, "uses DMA"); TUNABLE_INT("hw.bwn.usedma", &bwn_usedma); static int bwn_wme = 1; SYSCTL_INT(_hw_bwn, OID_AUTO, wme, CTLFLAG_RW, &bwn_wme, 0, "uses WME support"); static void bwn_attach_pre(struct bwn_softc *); static int bwn_attach_post(struct bwn_softc *); static int bwn_retain_bus_providers(struct bwn_softc *sc); static void bwn_release_bus_providers(struct bwn_softc *sc); static void bwn_sprom_bugfixes(device_t); static int bwn_init(struct bwn_softc *); static void bwn_parent(struct ieee80211com *); static void bwn_start(struct bwn_softc *); static int bwn_transmit(struct ieee80211com *, struct mbuf *); static int bwn_attach_core(struct bwn_mac *); static int bwn_phy_getinfo(struct bwn_mac *, int); static int bwn_chiptest(struct bwn_mac *); static int bwn_setup_channels(struct bwn_mac *, int, int); static void bwn_shm_ctlword(struct bwn_mac *, uint16_t, uint16_t); static void bwn_addchannels(struct ieee80211_channel [], int, int *, const struct bwn_channelinfo *, const uint8_t []); static int bwn_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void bwn_updateslot(struct ieee80211com *); static void bwn_update_promisc(struct ieee80211com *); static void bwn_wme_init(struct bwn_mac *); static int bwn_wme_update(struct ieee80211com *); static void bwn_wme_clear(struct bwn_softc *); static void bwn_wme_load(struct bwn_mac *); static void bwn_wme_loadparams(struct bwn_mac *, const struct wmeParams *, uint16_t); static void bwn_scan_start(struct ieee80211com *); static void bwn_scan_end(struct ieee80211com *); static void bwn_set_channel(struct ieee80211com *); static struct ieee80211vap *bwn_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void bwn_vap_delete(struct ieee80211vap *); static void bwn_stop(struct bwn_softc *); static int bwn_core_forceclk(struct bwn_mac *, bool); static int bwn_core_init(struct bwn_mac *); static void bwn_core_start(struct bwn_mac *); static void bwn_core_exit(struct bwn_mac *); static void bwn_bt_disable(struct bwn_mac *); static int bwn_chip_init(struct bwn_mac *); static void bwn_set_txretry(struct bwn_mac *, int, int); static void bwn_rate_init(struct bwn_mac *); static void bwn_set_phytxctl(struct bwn_mac *); static void bwn_spu_setdelay(struct bwn_mac *, int); static void bwn_bt_enable(struct bwn_mac *); static void bwn_set_macaddr(struct bwn_mac *); static void bwn_crypt_init(struct bwn_mac *); static void bwn_chip_exit(struct bwn_mac *); static int bwn_fw_fillinfo(struct bwn_mac *); static int bwn_fw_loaducode(struct bwn_mac *); static int bwn_gpio_init(struct bwn_mac *); static int bwn_fw_loadinitvals(struct bwn_mac *); static int bwn_phy_init(struct bwn_mac *); static void bwn_set_txantenna(struct bwn_mac *, int); static void bwn_set_opmode(struct bwn_mac *); static void bwn_rate_write(struct bwn_mac *, uint16_t, int); static uint8_t bwn_plcp_getcck(const uint8_t); static uint8_t bwn_plcp_getofdm(const uint8_t); static void bwn_pio_init(struct bwn_mac *); static uint16_t bwn_pio_idx2base(struct bwn_mac *, int); static void bwn_pio_set_txqueue(struct bwn_mac *, struct bwn_pio_txqueue *, int); static void bwn_pio_setupqueue_rx(struct bwn_mac *, struct bwn_pio_rxqueue *, int); static void bwn_destroy_queue_tx(struct bwn_pio_txqueue *); static uint16_t bwn_pio_read_2(struct bwn_mac *, struct bwn_pio_txqueue *, uint16_t); static void bwn_pio_cancel_tx_packets(struct bwn_pio_txqueue *); static int bwn_pio_rx(struct bwn_pio_rxqueue *); static uint8_t bwn_pio_rxeof(struct bwn_pio_rxqueue *); static void bwn_pio_handle_txeof(struct bwn_mac *, const struct bwn_txstatus *); static uint16_t bwn_pio_rx_read_2(struct bwn_pio_rxqueue *, uint16_t); static uint32_t bwn_pio_rx_read_4(struct bwn_pio_rxqueue *, uint16_t); static void bwn_pio_rx_write_2(struct bwn_pio_rxqueue *, uint16_t, uint16_t); static void bwn_pio_rx_write_4(struct bwn_pio_rxqueue *, uint16_t, uint32_t); static int bwn_pio_tx_start(struct bwn_mac *, struct ieee80211_node *, struct mbuf **); static struct bwn_pio_txqueue *bwn_pio_select(struct bwn_mac *, uint8_t); static uint32_t bwn_pio_write_multi_4(struct bwn_mac *, struct bwn_pio_txqueue *, uint32_t, const void *, int); static void bwn_pio_write_4(struct bwn_mac *, struct bwn_pio_txqueue *, uint16_t, uint32_t); static uint16_t bwn_pio_write_multi_2(struct bwn_mac *, struct bwn_pio_txqueue *, uint16_t, const void *, int); static uint16_t bwn_pio_write_mbuf_2(struct bwn_mac *, struct bwn_pio_txqueue *, uint16_t, struct mbuf *); static struct bwn_pio_txqueue *bwn_pio_parse_cookie(struct bwn_mac *, uint16_t, struct bwn_pio_txpkt **); static void bwn_dma_init(struct bwn_mac *); static void bwn_dma_rxdirectfifo(struct bwn_mac *, int, uint8_t); static uint16_t bwn_dma_base(int, int); static void bwn_dma_ringfree(struct bwn_dma_ring **); static void bwn_dma_32_getdesc(struct bwn_dma_ring *, int, struct bwn_dmadesc_generic **, struct bwn_dmadesc_meta **); static void bwn_dma_32_setdesc(struct bwn_dma_ring *, struct bwn_dmadesc_generic *, bus_addr_t, uint16_t, int, int, int); static void bwn_dma_32_start_transfer(struct bwn_dma_ring *, int); static void bwn_dma_32_suspend(struct bwn_dma_ring *); static void bwn_dma_32_resume(struct bwn_dma_ring *); static int bwn_dma_32_get_curslot(struct bwn_dma_ring *); static void bwn_dma_32_set_curslot(struct bwn_dma_ring *, int); static void bwn_dma_64_getdesc(struct bwn_dma_ring *, int, struct bwn_dmadesc_generic **, struct bwn_dmadesc_meta **); static void bwn_dma_64_setdesc(struct bwn_dma_ring *, struct bwn_dmadesc_generic *, bus_addr_t, uint16_t, int, int, int); static void bwn_dma_64_start_transfer(struct bwn_dma_ring *, int); static void bwn_dma_64_suspend(struct bwn_dma_ring *); static void bwn_dma_64_resume(struct bwn_dma_ring *); static int bwn_dma_64_get_curslot(struct bwn_dma_ring *); static void bwn_dma_64_set_curslot(struct bwn_dma_ring *, int); static int bwn_dma_allocringmemory(struct bwn_dma_ring *); static void bwn_dma_setup(struct bwn_dma_ring *); static void bwn_dma_free_ringmemory(struct bwn_dma_ring *); static void bwn_dma_cleanup(struct bwn_dma_ring *); static void bwn_dma_free_descbufs(struct bwn_dma_ring *); static int bwn_dma_tx_reset(struct bwn_mac *, uint16_t, int); static void bwn_dma_rx(struct bwn_dma_ring *); static int bwn_dma_rx_reset(struct bwn_mac *, uint16_t, int); static void bwn_dma_free_descbuf(struct bwn_dma_ring *, struct bwn_dmadesc_meta *); static void bwn_dma_set_redzone(struct bwn_dma_ring *, struct mbuf *); static void bwn_dma_ring_addr(void *, bus_dma_segment_t *, int, int); static int bwn_dma_freeslot(struct bwn_dma_ring *); static int bwn_dma_nextslot(struct bwn_dma_ring *, int); static void bwn_dma_rxeof(struct bwn_dma_ring *, int *); static int bwn_dma_newbuf(struct bwn_dma_ring *, struct bwn_dmadesc_generic *, struct bwn_dmadesc_meta *, int); static void bwn_dma_buf_addr(void *, bus_dma_segment_t *, int, bus_size_t, int); static uint8_t bwn_dma_check_redzone(struct bwn_dma_ring *, struct mbuf *); static void bwn_ratectl_tx_complete(const struct ieee80211_node *, const struct bwn_txstatus *); static void bwn_dma_handle_txeof(struct bwn_mac *, const struct bwn_txstatus *); static int bwn_dma_tx_start(struct bwn_mac *, struct ieee80211_node *, struct mbuf **); static int bwn_dma_getslot(struct bwn_dma_ring *); static struct bwn_dma_ring *bwn_dma_select(struct bwn_mac *, uint8_t); static int bwn_dma_attach(struct bwn_mac *); static struct bwn_dma_ring *bwn_dma_ringsetup(struct bwn_mac *, int, int); static struct bwn_dma_ring *bwn_dma_parse_cookie(struct bwn_mac *, const struct bwn_txstatus *, uint16_t, int *); static void bwn_dma_free(struct bwn_mac *); static int bwn_fw_gets(struct bwn_mac *, enum bwn_fwtype); static int bwn_fw_get(struct bwn_mac *, enum bwn_fwtype, const char *, struct bwn_fwfile *); static void bwn_release_firmware(struct bwn_mac *); static void bwn_do_release_fw(struct bwn_fwfile *); static uint16_t bwn_fwcaps_read(struct bwn_mac *); static int bwn_fwinitvals_write(struct bwn_mac *, const struct bwn_fwinitvals *, size_t, size_t); static uint16_t bwn_ant2phy(int); static void bwn_mac_write_bssid(struct bwn_mac *); static void bwn_mac_setfilter(struct bwn_mac *, uint16_t, const uint8_t *); static void bwn_key_dowrite(struct bwn_mac *, uint8_t, uint8_t, const uint8_t *, size_t, const uint8_t *); static void bwn_key_macwrite(struct bwn_mac *, uint8_t, const uint8_t *); static void bwn_key_write(struct bwn_mac *, uint8_t, uint8_t, const uint8_t *); static void bwn_phy_exit(struct bwn_mac *); static void bwn_core_stop(struct bwn_mac *); static int bwn_switch_band(struct bwn_softc *, struct ieee80211_channel *); static int bwn_phy_reset(struct bwn_mac *); static int bwn_newstate(struct ieee80211vap *, enum ieee80211_state, int); static void bwn_set_pretbtt(struct bwn_mac *); static int bwn_intr(void *); static void bwn_intrtask(void *, int); static void bwn_restart(struct bwn_mac *, const char *); static void bwn_intr_ucode_debug(struct bwn_mac *); static void bwn_intr_tbtt_indication(struct bwn_mac *); static void bwn_intr_atim_end(struct bwn_mac *); static void bwn_intr_beacon(struct bwn_mac *); static void bwn_intr_pmq(struct bwn_mac *); static void bwn_intr_noise(struct bwn_mac *); static void bwn_intr_txeof(struct bwn_mac *); static void bwn_hwreset(void *, int); static void bwn_handle_fwpanic(struct bwn_mac *); static void bwn_load_beacon0(struct bwn_mac *); static void bwn_load_beacon1(struct bwn_mac *); static uint32_t bwn_jssi_read(struct bwn_mac *); static void bwn_noise_gensample(struct bwn_mac *); static void bwn_handle_txeof(struct bwn_mac *, const struct bwn_txstatus *); static void bwn_rxeof(struct bwn_mac *, struct mbuf *, const void *); static void bwn_phy_txpower_check(struct bwn_mac *, uint32_t); static int bwn_tx_start(struct bwn_softc *, struct ieee80211_node *, struct mbuf *); static int bwn_tx_isfull(struct bwn_softc *, struct mbuf *); static int bwn_set_txhdr(struct bwn_mac *, struct ieee80211_node *, struct mbuf *, struct bwn_txhdr *, uint16_t); static void bwn_plcp_genhdr(struct bwn_plcp4 *, const uint16_t, const uint8_t); static uint8_t bwn_antenna_sanitize(struct bwn_mac *, uint8_t); static uint8_t bwn_get_fbrate(uint8_t); static void bwn_txpwr(void *, int); static void bwn_tasks(void *); static void bwn_task_15s(struct bwn_mac *); static void bwn_task_30s(struct bwn_mac *); static void bwn_task_60s(struct bwn_mac *); static int bwn_plcp_get_ofdmrate(struct bwn_mac *, struct bwn_plcp6 *, uint8_t); static int bwn_plcp_get_cckrate(struct bwn_mac *, struct bwn_plcp6 *); static void bwn_rx_radiotap(struct bwn_mac *, struct mbuf *, const struct bwn_rxhdr4 *, struct bwn_plcp6 *, int, int, int); static void bwn_tsf_read(struct bwn_mac *, uint64_t *); static void bwn_set_slot_time(struct bwn_mac *, uint16_t); static void bwn_watchdog(void *); static void bwn_dma_stop(struct bwn_mac *); static void bwn_pio_stop(struct bwn_mac *); static void bwn_dma_ringstop(struct bwn_dma_ring **); static int bwn_led_attach(struct bwn_mac *); static void bwn_led_newstate(struct bwn_mac *, enum ieee80211_state); static void bwn_led_event(struct bwn_mac *, int); static void bwn_led_blink_start(struct bwn_mac *, int, int); static void bwn_led_blink_next(void *); static void bwn_led_blink_end(void *); static void bwn_rfswitch(void *); static void bwn_rf_turnon(struct bwn_mac *); static void bwn_rf_turnoff(struct bwn_mac *); static void bwn_sysctl_node(struct bwn_softc *); static const struct bwn_channelinfo bwn_chantable_bg = { .channels = { { 2412, 1, 30 }, { 2417, 2, 30 }, { 2422, 3, 30 }, { 2427, 4, 30 }, { 2432, 5, 30 }, { 2437, 6, 30 }, { 2442, 7, 30 }, { 2447, 8, 30 }, { 2452, 9, 30 }, { 2457, 10, 30 }, { 2462, 11, 30 }, { 2467, 12, 30 }, { 2472, 13, 30 }, { 2484, 14, 30 } }, .nchannels = 14 }; static const struct bwn_channelinfo bwn_chantable_a = { .channels = { { 5170, 34, 30 }, { 5180, 36, 30 }, { 5190, 38, 30 }, { 5200, 40, 30 }, { 5210, 42, 30 }, { 5220, 44, 30 }, { 5230, 46, 30 }, { 5240, 48, 30 }, { 5260, 52, 30 }, { 5280, 56, 30 }, { 5300, 60, 30 }, { 5320, 64, 30 }, { 5500, 100, 30 }, { 5520, 104, 30 }, { 5540, 108, 30 }, { 5560, 112, 30 }, { 5580, 116, 30 }, { 5600, 120, 30 }, { 5620, 124, 30 }, { 5640, 128, 30 }, { 5660, 132, 30 }, { 5680, 136, 30 }, { 5700, 140, 30 }, { 5745, 149, 30 }, { 5765, 153, 30 }, { 5785, 157, 30 }, { 5805, 161, 30 }, { 5825, 165, 30 }, { 5920, 184, 30 }, { 5940, 188, 30 }, { 5960, 192, 30 }, { 5980, 196, 30 }, { 6000, 200, 30 }, { 6020, 204, 30 }, { 6040, 208, 30 }, { 6060, 212, 30 }, { 6080, 216, 30 } }, .nchannels = 37 }; #if 0 static const struct bwn_channelinfo bwn_chantable_n = { .channels = { { 5160, 32, 30 }, { 5170, 34, 30 }, { 5180, 36, 30 }, { 5190, 38, 30 }, { 5200, 40, 30 }, { 5210, 42, 30 }, { 5220, 44, 30 }, { 5230, 46, 30 }, { 5240, 48, 30 }, { 5250, 50, 30 }, { 5260, 52, 30 }, { 5270, 54, 30 }, { 5280, 56, 30 }, { 5290, 58, 30 }, { 5300, 60, 30 }, { 5310, 62, 30 }, { 5320, 64, 30 }, { 5330, 66, 30 }, { 5340, 68, 30 }, { 5350, 70, 30 }, { 5360, 72, 30 }, { 5370, 74, 30 }, { 5380, 76, 30 }, { 5390, 78, 30 }, { 5400, 80, 30 }, { 5410, 82, 30 }, { 5420, 84, 30 }, { 5430, 86, 30 }, { 5440, 88, 30 }, { 5450, 90, 30 }, { 5460, 92, 30 }, { 5470, 94, 30 }, { 5480, 96, 30 }, { 5490, 98, 30 }, { 5500, 100, 30 }, { 5510, 102, 30 }, { 5520, 104, 30 }, { 5530, 106, 30 }, { 5540, 108, 30 }, { 5550, 110, 30 }, { 5560, 112, 30 }, { 5570, 114, 30 }, { 5580, 116, 30 }, { 5590, 118, 30 }, { 5600, 120, 30 }, { 5610, 122, 30 }, { 5620, 124, 30 }, { 5630, 126, 30 }, { 5640, 128, 30 }, { 5650, 130, 30 }, { 5660, 132, 30 }, { 5670, 134, 30 }, { 5680, 136, 30 }, { 5690, 138, 30 }, { 5700, 140, 30 }, { 5710, 142, 30 }, { 5720, 144, 30 }, { 5725, 145, 30 }, { 5730, 146, 30 }, { 5735, 147, 30 }, { 5740, 148, 30 }, { 5745, 149, 30 }, { 5750, 150, 30 }, { 5755, 151, 30 }, { 5760, 152, 30 }, { 5765, 153, 30 }, { 5770, 154, 30 }, { 5775, 155, 30 }, { 5780, 156, 30 }, { 5785, 157, 30 }, { 5790, 158, 30 }, { 5795, 159, 30 }, { 5800, 160, 30 }, { 5805, 161, 30 }, { 5810, 162, 30 }, { 5815, 163, 30 }, { 5820, 164, 30 }, { 5825, 165, 30 }, { 5830, 166, 30 }, { 5840, 168, 30 }, { 5850, 170, 30 }, { 5860, 172, 30 }, { 5870, 174, 30 }, { 5880, 176, 30 }, { 5890, 178, 30 }, { 5900, 180, 30 }, { 5910, 182, 30 }, { 5920, 184, 30 }, { 5930, 186, 30 }, { 5940, 188, 30 }, { 5950, 190, 30 }, { 5960, 192, 30 }, { 5970, 194, 30 }, { 5980, 196, 30 }, { 5990, 198, 30 }, { 6000, 200, 30 }, { 6010, 202, 30 }, { 6020, 204, 30 }, { 6030, 206, 30 }, { 6040, 208, 30 }, { 6050, 210, 30 }, { 6060, 212, 30 }, { 6070, 214, 30 }, { 6080, 216, 30 }, { 6090, 218, 30 }, { 6100, 220, 30 }, { 6110, 222, 30 }, { 6120, 224, 30 }, { 6130, 226, 30 }, { 6140, 228, 30 } }, .nchannels = 110 }; #endif #define VENDOR_LED_ACT(vendor) \ { \ .vid = PCI_VENDOR_##vendor, \ .led_act = { BWN_VENDOR_LED_ACT_##vendor } \ } static const struct { uint16_t vid; uint8_t led_act[BWN_LED_MAX]; } bwn_vendor_led_act[] = { VENDOR_LED_ACT(HP_COMPAQ), VENDOR_LED_ACT(ASUSTEK) }; static const uint8_t bwn_default_led_act[BWN_LED_MAX] = { BWN_VENDOR_LED_ACT_DEFAULT }; #undef VENDOR_LED_ACT static const char *bwn_led_vars[] = { BHND_NVAR_LEDBH0, BHND_NVAR_LEDBH1, BHND_NVAR_LEDBH2, BHND_NVAR_LEDBH3 }; static const struct { int on_dur; int off_dur; } bwn_led_duration[109] = { [0] = { 400, 100 }, [2] = { 150, 75 }, [4] = { 90, 45 }, [11] = { 66, 34 }, [12] = { 53, 26 }, [18] = { 42, 21 }, [22] = { 35, 17 }, [24] = { 32, 16 }, [36] = { 21, 10 }, [48] = { 16, 8 }, [72] = { 11, 5 }, [96] = { 9, 4 }, [108] = { 7, 3 } }; static const uint16_t bwn_wme_shm_offsets[] = { [0] = BWN_WME_BESTEFFORT, [1] = BWN_WME_BACKGROUND, [2] = BWN_WME_VOICE, [3] = BWN_WME_VIDEO, }; /* Supported D11 core revisions */ #define BWN_DEV(_hwrev) {{ \ BHND_MATCH_CORE(BHND_MFGID_BCM, BHND_COREID_D11), \ BHND_MATCH_CORE_REV(_hwrev), \ }} static const struct bhnd_device bwn_devices[] = { BWN_DEV(HWREV_RANGE(5, 16)), BWN_DEV(HWREV_EQ(23)), BHND_DEVICE_END }; /* D11 quirks when bridged via a PCI host bridge core */ static const struct bhnd_device_quirk pci_bridge_quirks[] = { BHND_CORE_QUIRK (HWREV_LTE(10), BWN_QUIRK_UCODE_SLOWCLOCK_WAR), BHND_DEVICE_QUIRK_END }; /* D11 quirks when bridged via a PCMCIA host bridge core */ static const struct bhnd_device_quirk pcmcia_bridge_quirks[] = { BHND_CORE_QUIRK (HWREV_ANY, BWN_QUIRK_NODMA), BHND_DEVICE_QUIRK_END }; /* Host bridge cores for which D11 quirk flags should be applied */ static const struct bhnd_device bridge_devices[] = { BHND_DEVICE(BCM, PCI, NULL, pci_bridge_quirks), BHND_DEVICE(BCM, PCMCIA, NULL, pcmcia_bridge_quirks), BHND_DEVICE_END }; static int bwn_probe(device_t dev) { const struct bhnd_device *id; id = bhnd_device_lookup(dev, bwn_devices, sizeof(bwn_devices[0])); if (id == NULL) return (ENXIO); bhnd_set_default_core_desc(dev); return (BUS_PROBE_DEFAULT); } static int bwn_attach(device_t dev) { struct bwn_mac *mac; struct bwn_softc *sc; device_t parent, hostb; char chip_name[BHND_CHIPID_MAX_NAMELEN]; int error; sc = device_get_softc(dev); sc->sc_dev = dev; #ifdef BWN_DEBUG sc->sc_debug = bwn_debug; #endif mac = NULL; /* Determine the driver quirks applicable to this device, including any * quirks specific to the bus host bridge core (if any) */ sc->sc_quirks = bhnd_device_quirks(dev, bwn_devices, sizeof(bwn_devices[0])); parent = device_get_parent(dev); if ((hostb = bhnd_bus_find_hostb_device(parent)) != NULL) { sc->sc_quirks |= bhnd_device_quirks(hostb, bridge_devices, sizeof(bridge_devices[0])); } /* DMA explicitly disabled? */ if (!bwn_usedma) sc->sc_quirks |= BWN_QUIRK_NODMA; /* Fetch our chip identification and board info */ sc->sc_cid = *bhnd_get_chipid(dev); if ((error = bhnd_read_board_info(dev, &sc->sc_board_info))) { device_printf(sc->sc_dev, "couldn't read board info\n"); return (error); } /* Allocate our D11 register block and PMU state */ sc->sc_mem_rid = 0; sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_mem_rid, RF_ACTIVE); if (sc->sc_mem_res == NULL) { device_printf(sc->sc_dev, "couldn't allocate registers\n"); return (error); } if ((error = bhnd_alloc_pmu(sc->sc_dev))) { bus_release_resource(sc->sc_dev, SYS_RES_MEMORY, sc->sc_mem_rid, sc->sc_mem_res); return (error); } /* Retain references to all required bus service providers */ if ((error = bwn_retain_bus_providers(sc))) goto fail; /* Fetch mask of available antennas */ error = bhnd_nvram_getvar_uint8(sc->sc_dev, BHND_NVAR_AA2G, &sc->sc_ant2g); if (error) { device_printf(sc->sc_dev, "error determining 2GHz antenna " "availability from NVRAM: %d\n", error); goto fail; } error = bhnd_nvram_getvar_uint8(sc->sc_dev, BHND_NVAR_AA5G, &sc->sc_ant5g); if (error) { device_printf(sc->sc_dev, "error determining 5GHz antenna " "availability from NVRAM: %d\n", error); goto fail; } if ((sc->sc_flags & BWN_FLAG_ATTACHED) == 0) { bwn_attach_pre(sc); bwn_sprom_bugfixes(dev); sc->sc_flags |= BWN_FLAG_ATTACHED; } mac = malloc(sizeof(*mac), M_DEVBUF, M_WAITOK | M_ZERO); mac->mac_sc = sc; mac->mac_status = BWN_MAC_STATUS_UNINIT; if (bwn_bfp != 0) mac->mac_flags |= BWN_MAC_FLAG_BADFRAME_PREEMP; TASK_INIT(&mac->mac_hwreset, 0, bwn_hwreset, mac); - TASK_INIT(&mac->mac_intrtask, 0, bwn_intrtask, mac); + NET_TASK_INIT(&mac->mac_intrtask, 0, bwn_intrtask, mac); TASK_INIT(&mac->mac_txpower, 0, bwn_txpwr, mac); error = bwn_attach_core(mac); if (error) goto fail; error = bwn_led_attach(mac); if (error) goto fail; bhnd_format_chip_id(chip_name, sizeof(chip_name), sc->sc_cid.chip_id); device_printf(sc->sc_dev, "WLAN (%s rev %u sromrev %u) " "PHY (analog %d type %d rev %d) RADIO (manuf %#x ver %#x rev %d)\n", chip_name, bhnd_get_hwrev(sc->sc_dev), sc->sc_board_info.board_srom_rev, mac->mac_phy.analog, mac->mac_phy.type, mac->mac_phy.rev, mac->mac_phy.rf_manuf, mac->mac_phy.rf_ver, mac->mac_phy.rf_rev); if (mac->mac_flags & BWN_MAC_FLAG_DMA) device_printf(sc->sc_dev, "DMA (%d bits)\n", mac->mac_dmatype); else device_printf(sc->sc_dev, "PIO\n"); #ifdef BWN_GPL_PHY device_printf(sc->sc_dev, "Note: compiled with BWN_GPL_PHY; includes GPLv2 code\n"); #endif mac->mac_rid_irq = 0; mac->mac_res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &mac->mac_rid_irq, RF_ACTIVE | RF_SHAREABLE); if (mac->mac_res_irq == NULL) { device_printf(sc->sc_dev, "couldn't allocate IRQ resource\n"); error = ENXIO; goto fail; } error = bus_setup_intr(dev, mac->mac_res_irq, INTR_TYPE_NET | INTR_MPSAFE, bwn_intr, NULL, mac, &mac->mac_intrhand); if (error != 0) { device_printf(sc->sc_dev, "couldn't setup interrupt (%d)\n", error); goto fail; } TAILQ_INSERT_TAIL(&sc->sc_maclist, mac, mac_list); /* * calls attach-post routine */ if ((sc->sc_flags & BWN_FLAG_ATTACHED) != 0) bwn_attach_post(sc); return (0); fail: if (mac != NULL && mac->mac_res_irq != NULL) { bus_release_resource(dev, SYS_RES_IRQ, mac->mac_rid_irq, mac->mac_res_irq); } free(mac, M_DEVBUF); bhnd_release_pmu(dev); bwn_release_bus_providers(sc); if (sc->sc_mem_res != NULL) { bus_release_resource(sc->sc_dev, SYS_RES_MEMORY, sc->sc_mem_rid, sc->sc_mem_res); } return (error); } static int bwn_retain_bus_providers(struct bwn_softc *sc) { struct chipc_caps *ccaps; sc->sc_chipc = bhnd_retain_provider(sc->sc_dev, BHND_SERVICE_CHIPC); if (sc->sc_chipc == NULL) { device_printf(sc->sc_dev, "ChipCommon device not found\n"); goto failed; } ccaps = BHND_CHIPC_GET_CAPS(sc->sc_chipc); sc->sc_gpio = bhnd_retain_provider(sc->sc_dev, BHND_SERVICE_GPIO); if (sc->sc_gpio == NULL) { device_printf(sc->sc_dev, "GPIO device not found\n"); goto failed; } if (ccaps->pmu) { sc->sc_pmu = bhnd_retain_provider(sc->sc_dev, BHND_SERVICE_PMU); if (sc->sc_pmu == NULL) { device_printf(sc->sc_dev, "PMU device not found\n"); goto failed; } } return (0); failed: bwn_release_bus_providers(sc); return (ENXIO); } static void bwn_release_bus_providers(struct bwn_softc *sc) { #define BWN_RELEASE_PROV(_sc, _prov, _service) do { \ if ((_sc)-> _prov != NULL) { \ bhnd_release_provider((_sc)->sc_dev, (_sc)-> _prov, \ (_service)); \ (_sc)-> _prov = NULL; \ } \ } while (0) BWN_RELEASE_PROV(sc, sc_chipc, BHND_SERVICE_CHIPC); BWN_RELEASE_PROV(sc, sc_gpio, BHND_SERVICE_GPIO); BWN_RELEASE_PROV(sc, sc_pmu, BHND_SERVICE_PMU); #undef BWN_RELEASE_PROV } static int bwn_attach_post(struct bwn_softc *sc) { struct ieee80211com *ic; const char *mac_varname; u_int core_unit; int error; ic = &sc->sc_ic; ic->ic_softc = sc; ic->ic_name = device_get_nameunit(sc->sc_dev); /* XXX not right but it's not used anywhere important */ ic->ic_phytype = IEEE80211_T_OFDM; ic->ic_opmode = IEEE80211_M_STA; ic->ic_caps = IEEE80211_C_STA /* station mode supported */ | IEEE80211_C_MONITOR /* monitor mode */ | IEEE80211_C_AHDEMO /* adhoc demo mode */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_WME /* WME/WMM supported */ | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ #if 0 | IEEE80211_C_BGSCAN /* capable of bg scanning */ #endif | IEEE80211_C_TXPMGT /* capable of txpow mgt */ ; ic->ic_flags_ext |= IEEE80211_FEXT_SWBMISS; /* s/w bmiss */ /* Determine the NVRAM variable containing our MAC address */ core_unit = bhnd_get_core_unit(sc->sc_dev); mac_varname = NULL; if (sc->sc_board_info.board_srom_rev <= 2) { if (core_unit == 0) { mac_varname = BHND_NVAR_IL0MACADDR; } else if (core_unit == 1) { mac_varname = BHND_NVAR_ET1MACADDR; } } else { if (core_unit == 0) { mac_varname = BHND_NVAR_MACADDR; } } if (mac_varname == NULL) { device_printf(sc->sc_dev, "missing MAC address variable for " "D11 core %u", core_unit); return (ENXIO); } /* Read the MAC address from NVRAM */ error = bhnd_nvram_getvar_array(sc->sc_dev, mac_varname, ic->ic_macaddr, sizeof(ic->ic_macaddr), BHND_NVRAM_TYPE_UINT8_ARRAY); if (error) { device_printf(sc->sc_dev, "error reading %s: %d\n", mac_varname, error); return (error); } /* call MI attach routine. */ ieee80211_ifattach(ic); ic->ic_headroom = sizeof(struct bwn_txhdr); /* override default methods */ ic->ic_raw_xmit = bwn_raw_xmit; ic->ic_updateslot = bwn_updateslot; ic->ic_update_promisc = bwn_update_promisc; ic->ic_wme.wme_update = bwn_wme_update; ic->ic_scan_start = bwn_scan_start; ic->ic_scan_end = bwn_scan_end; ic->ic_set_channel = bwn_set_channel; ic->ic_vap_create = bwn_vap_create; ic->ic_vap_delete = bwn_vap_delete; ic->ic_transmit = bwn_transmit; ic->ic_parent = bwn_parent; ieee80211_radiotap_attach(ic, &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), BWN_TX_RADIOTAP_PRESENT, &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), BWN_RX_RADIOTAP_PRESENT); bwn_sysctl_node(sc); if (bootverbose) ieee80211_announce(ic); return (0); } static void bwn_phy_detach(struct bwn_mac *mac) { if (mac->mac_phy.detach != NULL) mac->mac_phy.detach(mac); } static int bwn_detach(device_t dev) { struct bwn_softc *sc = device_get_softc(dev); struct bwn_mac *mac = sc->sc_curmac; struct ieee80211com *ic = &sc->sc_ic; sc->sc_flags |= BWN_FLAG_INVALID; if (device_is_attached(sc->sc_dev)) { BWN_LOCK(sc); bwn_stop(sc); BWN_UNLOCK(sc); bwn_dma_free(mac); callout_drain(&sc->sc_led_blink_ch); callout_drain(&sc->sc_rfswitch_ch); callout_drain(&sc->sc_task_ch); callout_drain(&sc->sc_watchdog_ch); bwn_phy_detach(mac); ieee80211_draintask(ic, &mac->mac_hwreset); ieee80211_draintask(ic, &mac->mac_txpower); ieee80211_ifdetach(ic); } taskqueue_drain(sc->sc_tq, &mac->mac_intrtask); taskqueue_free(sc->sc_tq); if (mac->mac_intrhand != NULL) { bus_teardown_intr(dev, mac->mac_res_irq, mac->mac_intrhand); mac->mac_intrhand = NULL; } bhnd_release_pmu(dev); bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid, sc->sc_mem_res); bus_release_resource(dev, SYS_RES_IRQ, mac->mac_rid_irq, mac->mac_res_irq); mbufq_drain(&sc->sc_snd); bwn_release_firmware(mac); BWN_LOCK_DESTROY(sc); bwn_release_bus_providers(sc); return (0); } static void bwn_attach_pre(struct bwn_softc *sc) { BWN_LOCK_INIT(sc); TAILQ_INIT(&sc->sc_maclist); callout_init_mtx(&sc->sc_rfswitch_ch, &sc->sc_mtx, 0); callout_init_mtx(&sc->sc_task_ch, &sc->sc_mtx, 0); callout_init_mtx(&sc->sc_watchdog_ch, &sc->sc_mtx, 0); mbufq_init(&sc->sc_snd, ifqmaxlen); sc->sc_tq = taskqueue_create_fast("bwn_taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->sc_tq); taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->sc_dev)); } static void bwn_sprom_bugfixes(device_t dev) { struct bwn_softc *sc = device_get_softc(dev); #define BWN_ISDEV(_device, _subvendor, _subdevice) \ ((sc->sc_board_info.board_devid == PCI_DEVID_##_device) && \ (sc->sc_board_info.board_vendor == PCI_VENDOR_##_subvendor) && \ (sc->sc_board_info.board_type == _subdevice)) /* A subset of Apple Airport Extreme (BCM4306 rev 2) devices * were programmed with a missing PACTRL boardflag */ if (sc->sc_board_info.board_vendor == PCI_VENDOR_APPLE && sc->sc_board_info.board_type == 0x4e && sc->sc_board_info.board_rev > 0x40) sc->sc_board_info.board_flags |= BHND_BFL_PACTRL; if (BWN_ISDEV(BCM4318_D11G, ASUSTEK, 0x100f) || BWN_ISDEV(BCM4306_D11G, DELL, 0x0003) || BWN_ISDEV(BCM4306_D11G, HP, 0x12f8) || BWN_ISDEV(BCM4306_D11G, LINKSYS, 0x0013) || BWN_ISDEV(BCM4306_D11G, LINKSYS, 0x0014) || BWN_ISDEV(BCM4306_D11G, LINKSYS, 0x0015) || BWN_ISDEV(BCM4306_D11G, MOTOROLA, 0x7010)) sc->sc_board_info.board_flags &= ~BHND_BFL_BTCOEX; #undef BWN_ISDEV } static void bwn_parent(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; int startall = 0; BWN_LOCK(sc); if (ic->ic_nrunning > 0) { if ((sc->sc_flags & BWN_FLAG_RUNNING) == 0) { bwn_init(sc); startall = 1; } else bwn_update_promisc(ic); } else if (sc->sc_flags & BWN_FLAG_RUNNING) bwn_stop(sc); BWN_UNLOCK(sc); if (startall) ieee80211_start_all(ic); } static int bwn_transmit(struct ieee80211com *ic, struct mbuf *m) { struct bwn_softc *sc = ic->ic_softc; int error; BWN_LOCK(sc); if ((sc->sc_flags & BWN_FLAG_RUNNING) == 0) { BWN_UNLOCK(sc); return (ENXIO); } error = mbufq_enqueue(&sc->sc_snd, m); if (error) { BWN_UNLOCK(sc); return (error); } bwn_start(sc); BWN_UNLOCK(sc); return (0); } static void bwn_start(struct bwn_softc *sc) { struct bwn_mac *mac = sc->sc_curmac; struct ieee80211_frame *wh; struct ieee80211_node *ni; struct ieee80211_key *k; struct mbuf *m; BWN_ASSERT_LOCKED(sc); if ((sc->sc_flags & BWN_FLAG_RUNNING) == 0 || mac == NULL || mac->mac_status < BWN_MAC_STATUS_STARTED) return; while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) { if (bwn_tx_isfull(sc, m)) break; ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; if (ni == NULL) { device_printf(sc->sc_dev, "unexpected NULL ni\n"); m_freem(m); counter_u64_add(sc->sc_ic.ic_oerrors, 1); continue; } wh = mtod(m, struct ieee80211_frame *); if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m); if (k == NULL) { if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); ieee80211_free_node(ni); m_freem(m); continue; } } wh = NULL; /* Catch any invalid use */ if (bwn_tx_start(sc, ni, m) != 0) { if (ni != NULL) { if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); ieee80211_free_node(ni); } continue; } sc->sc_watchdog_timer = 5; } } static int bwn_tx_isfull(struct bwn_softc *sc, struct mbuf *m) { struct bwn_dma_ring *dr; struct bwn_mac *mac = sc->sc_curmac; struct bwn_pio_txqueue *tq; int pktlen = roundup(m->m_pkthdr.len + BWN_HDRSIZE(mac), 4); BWN_ASSERT_LOCKED(sc); if (mac->mac_flags & BWN_MAC_FLAG_DMA) { dr = bwn_dma_select(mac, M_WME_GETAC(m)); if (dr->dr_stop == 1 || bwn_dma_freeslot(dr) < BWN_TX_SLOTS_PER_FRAME) { dr->dr_stop = 1; goto full; } } else { tq = bwn_pio_select(mac, M_WME_GETAC(m)); if (tq->tq_free == 0 || pktlen > tq->tq_size || pktlen > (tq->tq_size - tq->tq_used)) goto full; } return (0); full: mbufq_prepend(&sc->sc_snd, m); return (1); } static int bwn_tx_start(struct bwn_softc *sc, struct ieee80211_node *ni, struct mbuf *m) { struct bwn_mac *mac = sc->sc_curmac; int error; BWN_ASSERT_LOCKED(sc); if (m->m_pkthdr.len < IEEE80211_MIN_LEN || mac == NULL) { m_freem(m); return (ENXIO); } error = (mac->mac_flags & BWN_MAC_FLAG_DMA) ? bwn_dma_tx_start(mac, ni, &m) : bwn_pio_tx_start(mac, ni, &m); if (error) { m_freem(m); return (error); } return (0); } static int bwn_pio_tx_start(struct bwn_mac *mac, struct ieee80211_node *ni, struct mbuf **mp) { struct bwn_pio_txpkt *tp; struct bwn_pio_txqueue *tq; struct bwn_softc *sc = mac->mac_sc; struct bwn_txhdr txhdr; struct mbuf *m, *m_new; uint32_t ctl32; int error; uint16_t ctl16; BWN_ASSERT_LOCKED(sc); /* XXX TODO send packets after DTIM */ m = *mp; tq = bwn_pio_select(mac, M_WME_GETAC(m)); KASSERT(!TAILQ_EMPTY(&tq->tq_pktlist), ("%s: fail", __func__)); tp = TAILQ_FIRST(&tq->tq_pktlist); tp->tp_ni = ni; tp->tp_m = m; error = bwn_set_txhdr(mac, ni, m, &txhdr, BWN_PIO_COOKIE(tq, tp)); if (error) { device_printf(sc->sc_dev, "tx fail\n"); return (error); } TAILQ_REMOVE(&tq->tq_pktlist, tp, tp_list); tq->tq_used += roundup(m->m_pkthdr.len + BWN_HDRSIZE(mac), 4); tq->tq_free--; if (bhnd_get_hwrev(sc->sc_dev) >= 8) { /* * XXX please removes m_defrag(9) */ m_new = m_defrag(*mp, M_NOWAIT); if (m_new == NULL) { device_printf(sc->sc_dev, "%s: can't defrag TX buffer\n", __func__); return (ENOBUFS); } *mp = m_new; if (m_new->m_next != NULL) device_printf(sc->sc_dev, "TODO: fragmented packets for PIO\n"); tp->tp_m = m_new; /* send HEADER */ ctl32 = bwn_pio_write_multi_4(mac, tq, (BWN_PIO_READ_4(mac, tq, BWN_PIO8_TXCTL) | BWN_PIO8_TXCTL_FRAMEREADY) & ~BWN_PIO8_TXCTL_EOF, (const uint8_t *)&txhdr, BWN_HDRSIZE(mac)); /* send BODY */ ctl32 = bwn_pio_write_multi_4(mac, tq, ctl32, mtod(m_new, const void *), m_new->m_pkthdr.len); bwn_pio_write_4(mac, tq, BWN_PIO_TXCTL, ctl32 | BWN_PIO8_TXCTL_EOF); } else { ctl16 = bwn_pio_write_multi_2(mac, tq, (bwn_pio_read_2(mac, tq, BWN_PIO_TXCTL) | BWN_PIO_TXCTL_FRAMEREADY) & ~BWN_PIO_TXCTL_EOF, (const uint8_t *)&txhdr, BWN_HDRSIZE(mac)); ctl16 = bwn_pio_write_mbuf_2(mac, tq, ctl16, m); BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl16 | BWN_PIO_TXCTL_EOF); } return (0); } static struct bwn_pio_txqueue * bwn_pio_select(struct bwn_mac *mac, uint8_t prio) { if ((mac->mac_flags & BWN_MAC_FLAG_WME) == 0) return (&mac->mac_method.pio.wme[WME_AC_BE]); switch (prio) { case 0: return (&mac->mac_method.pio.wme[WME_AC_BE]); case 1: return (&mac->mac_method.pio.wme[WME_AC_BK]); case 2: return (&mac->mac_method.pio.wme[WME_AC_VI]); case 3: return (&mac->mac_method.pio.wme[WME_AC_VO]); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (NULL); } static int bwn_dma_tx_start(struct bwn_mac *mac, struct ieee80211_node *ni, struct mbuf **mp) { #define BWN_GET_TXHDRCACHE(slot) \ &(txhdr_cache[(slot / BWN_TX_SLOTS_PER_FRAME) * BWN_HDRSIZE(mac)]) struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_dma_ring *dr = bwn_dma_select(mac, M_WME_GETAC(*mp)); struct bwn_dmadesc_generic *desc; struct bwn_dmadesc_meta *mt; struct bwn_softc *sc = mac->mac_sc; struct mbuf *m; uint8_t *txhdr_cache = (uint8_t *)dr->dr_txhdr_cache; int error, slot, backup[2] = { dr->dr_curslot, dr->dr_usedslot }; BWN_ASSERT_LOCKED(sc); KASSERT(!dr->dr_stop, ("%s:%d: fail", __func__, __LINE__)); /* XXX send after DTIM */ m = *mp; slot = bwn_dma_getslot(dr); dr->getdesc(dr, slot, &desc, &mt); KASSERT(mt->mt_txtype == BWN_DMADESC_METATYPE_HEADER, ("%s:%d: fail", __func__, __LINE__)); error = bwn_set_txhdr(dr->dr_mac, ni, m, (struct bwn_txhdr *)BWN_GET_TXHDRCACHE(slot), BWN_DMA_COOKIE(dr, slot)); if (error) goto fail; error = bus_dmamap_load(dr->dr_txring_dtag, mt->mt_dmap, BWN_GET_TXHDRCACHE(slot), BWN_HDRSIZE(mac), bwn_dma_ring_addr, &mt->mt_paddr, BUS_DMA_NOWAIT); if (error) { device_printf(sc->sc_dev, "%s: can't load TX buffer (1) %d\n", __func__, error); goto fail; } bus_dmamap_sync(dr->dr_txring_dtag, mt->mt_dmap, BUS_DMASYNC_PREWRITE); dr->setdesc(dr, desc, mt->mt_paddr, BWN_HDRSIZE(mac), 1, 0, 0); bus_dmamap_sync(dr->dr_ring_dtag, dr->dr_ring_dmap, BUS_DMASYNC_PREWRITE); slot = bwn_dma_getslot(dr); dr->getdesc(dr, slot, &desc, &mt); KASSERT(mt->mt_txtype == BWN_DMADESC_METATYPE_BODY && mt->mt_islast == 1, ("%s:%d: fail", __func__, __LINE__)); mt->mt_m = m; mt->mt_ni = ni; error = bus_dmamap_load_mbuf(dma->txbuf_dtag, mt->mt_dmap, m, bwn_dma_buf_addr, &mt->mt_paddr, BUS_DMA_NOWAIT); if (error && error != EFBIG) { device_printf(sc->sc_dev, "%s: can't load TX buffer (1) %d\n", __func__, error); goto fail; } if (error) { /* error == EFBIG */ struct mbuf *m_new; m_new = m_defrag(m, M_NOWAIT); if (m_new == NULL) { device_printf(sc->sc_dev, "%s: can't defrag TX buffer\n", __func__); error = ENOBUFS; goto fail; } *mp = m = m_new; mt->mt_m = m; error = bus_dmamap_load_mbuf(dma->txbuf_dtag, mt->mt_dmap, m, bwn_dma_buf_addr, &mt->mt_paddr, BUS_DMA_NOWAIT); if (error) { device_printf(sc->sc_dev, "%s: can't load TX buffer (2) %d\n", __func__, error); goto fail; } } bus_dmamap_sync(dma->txbuf_dtag, mt->mt_dmap, BUS_DMASYNC_PREWRITE); dr->setdesc(dr, desc, mt->mt_paddr, m->m_pkthdr.len, 0, 1, 1); bus_dmamap_sync(dr->dr_ring_dtag, dr->dr_ring_dmap, BUS_DMASYNC_PREWRITE); /* XXX send after DTIM */ dr->start_transfer(dr, bwn_dma_nextslot(dr, slot)); return (0); fail: dr->dr_curslot = backup[0]; dr->dr_usedslot = backup[1]; return (error); #undef BWN_GET_TXHDRCACHE } static void bwn_watchdog(void *arg) { struct bwn_softc *sc = arg; if (sc->sc_watchdog_timer != 0 && --sc->sc_watchdog_timer == 0) { device_printf(sc->sc_dev, "device timeout\n"); counter_u64_add(sc->sc_ic.ic_oerrors, 1); } callout_schedule(&sc->sc_watchdog_ch, hz); } static int bwn_attach_core(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; int error, have_bg = 0, have_a = 0; uint16_t iost; KASSERT(bhnd_get_hwrev(sc->sc_dev) >= 5, ("unsupported revision %d", bhnd_get_hwrev(sc->sc_dev))); if ((error = bwn_core_forceclk(mac, true))) return (error); if ((error = bhnd_read_iost(sc->sc_dev, &iost))) { device_printf(sc->sc_dev, "error reading I/O status flags: " "%d\n", error); return (error); } have_a = (iost & BWN_IOST_HAVE_5GHZ) ? 1 : 0; have_bg = (iost & BWN_IOST_HAVE_2GHZ) ? 1 : 0; if (iost & BWN_IOST_DUALPHY) { have_bg = 1; have_a = 1; } #if 0 device_printf(sc->sc_dev, "%s: iost=0x%04hx, have_a=%d, have_bg=%d," " deviceid=0x%04x, siba_deviceid=0x%04x\n", __func__, iost, have_a, have_bg, sc->sc_board_info.board_devid, sc->sc_cid.chip_id); #endif /* * Guess at whether it has A-PHY or G-PHY. * This is just used for resetting the core to probe things; * we will re-guess once it's all up and working. */ error = bwn_reset_core(mac, have_bg); if (error) goto fail; /* * Determine the DMA engine type */ if (iost & BHND_IOST_DMA64) { mac->mac_dmatype = BHND_DMA_ADDR_64BIT; } else { uint32_t tmp; uint16_t base; base = bwn_dma_base(0, 0); BWN_WRITE_4(mac, base + BWN_DMA32_TXCTL, BWN_DMA32_TXADDREXT_MASK); tmp = BWN_READ_4(mac, base + BWN_DMA32_TXCTL); if (tmp & BWN_DMA32_TXADDREXT_MASK) { mac->mac_dmatype = BHND_DMA_ADDR_32BIT; } else { mac->mac_dmatype = BHND_DMA_ADDR_30BIT; } } /* * Get the PHY version. */ error = bwn_phy_getinfo(mac, have_bg); if (error) goto fail; /* * This is the whitelist of devices which we "believe" * the SPROM PHY config from. The rest are "guessed". */ if (sc->sc_board_info.board_devid != PCI_DEVID_BCM4311_D11DUAL && sc->sc_board_info.board_devid != PCI_DEVID_BCM4328_D11G && sc->sc_board_info.board_devid != PCI_DEVID_BCM4318_D11DUAL && sc->sc_board_info.board_devid != PCI_DEVID_BCM4306_D11DUAL && sc->sc_board_info.board_devid != PCI_DEVID_BCM4321_D11N && sc->sc_board_info.board_devid != PCI_DEVID_BCM4322_D11N) { have_a = have_bg = 0; if (mac->mac_phy.type == BWN_PHYTYPE_A) have_a = 1; else if (mac->mac_phy.type == BWN_PHYTYPE_G || mac->mac_phy.type == BWN_PHYTYPE_N || mac->mac_phy.type == BWN_PHYTYPE_LP) have_bg = 1; else KASSERT(0 == 1, ("%s: unknown phy type (%d)", __func__, mac->mac_phy.type)); } /* * XXX The PHY-G support doesn't do 5GHz operation. */ if (mac->mac_phy.type != BWN_PHYTYPE_LP && mac->mac_phy.type != BWN_PHYTYPE_N) { device_printf(sc->sc_dev, "%s: forcing 2GHz only; no dual-band support for PHY\n", __func__); have_a = 0; have_bg = 1; } mac->mac_phy.phy_n = NULL; if (mac->mac_phy.type == BWN_PHYTYPE_G) { mac->mac_phy.attach = bwn_phy_g_attach; mac->mac_phy.detach = bwn_phy_g_detach; mac->mac_phy.prepare_hw = bwn_phy_g_prepare_hw; mac->mac_phy.init_pre = bwn_phy_g_init_pre; mac->mac_phy.init = bwn_phy_g_init; mac->mac_phy.exit = bwn_phy_g_exit; mac->mac_phy.phy_read = bwn_phy_g_read; mac->mac_phy.phy_write = bwn_phy_g_write; mac->mac_phy.rf_read = bwn_phy_g_rf_read; mac->mac_phy.rf_write = bwn_phy_g_rf_write; mac->mac_phy.use_hwpctl = bwn_phy_g_hwpctl; mac->mac_phy.rf_onoff = bwn_phy_g_rf_onoff; mac->mac_phy.switch_analog = bwn_phy_switch_analog; mac->mac_phy.switch_channel = bwn_phy_g_switch_channel; mac->mac_phy.get_default_chan = bwn_phy_g_get_default_chan; mac->mac_phy.set_antenna = bwn_phy_g_set_antenna; mac->mac_phy.set_im = bwn_phy_g_im; mac->mac_phy.recalc_txpwr = bwn_phy_g_recalc_txpwr; mac->mac_phy.set_txpwr = bwn_phy_g_set_txpwr; mac->mac_phy.task_15s = bwn_phy_g_task_15s; mac->mac_phy.task_60s = bwn_phy_g_task_60s; } else if (mac->mac_phy.type == BWN_PHYTYPE_LP) { mac->mac_phy.init_pre = bwn_phy_lp_init_pre; mac->mac_phy.init = bwn_phy_lp_init; mac->mac_phy.phy_read = bwn_phy_lp_read; mac->mac_phy.phy_write = bwn_phy_lp_write; mac->mac_phy.phy_maskset = bwn_phy_lp_maskset; mac->mac_phy.rf_read = bwn_phy_lp_rf_read; mac->mac_phy.rf_write = bwn_phy_lp_rf_write; mac->mac_phy.rf_onoff = bwn_phy_lp_rf_onoff; mac->mac_phy.switch_analog = bwn_phy_lp_switch_analog; mac->mac_phy.switch_channel = bwn_phy_lp_switch_channel; mac->mac_phy.get_default_chan = bwn_phy_lp_get_default_chan; mac->mac_phy.set_antenna = bwn_phy_lp_set_antenna; mac->mac_phy.task_60s = bwn_phy_lp_task_60s; } else if (mac->mac_phy.type == BWN_PHYTYPE_N) { mac->mac_phy.attach = bwn_phy_n_attach; mac->mac_phy.detach = bwn_phy_n_detach; mac->mac_phy.prepare_hw = bwn_phy_n_prepare_hw; mac->mac_phy.init_pre = bwn_phy_n_init_pre; mac->mac_phy.init = bwn_phy_n_init; mac->mac_phy.exit = bwn_phy_n_exit; mac->mac_phy.phy_read = bwn_phy_n_read; mac->mac_phy.phy_write = bwn_phy_n_write; mac->mac_phy.rf_read = bwn_phy_n_rf_read; mac->mac_phy.rf_write = bwn_phy_n_rf_write; mac->mac_phy.use_hwpctl = bwn_phy_n_hwpctl; mac->mac_phy.rf_onoff = bwn_phy_n_rf_onoff; mac->mac_phy.switch_analog = bwn_phy_n_switch_analog; mac->mac_phy.switch_channel = bwn_phy_n_switch_channel; mac->mac_phy.get_default_chan = bwn_phy_n_get_default_chan; mac->mac_phy.set_antenna = bwn_phy_n_set_antenna; mac->mac_phy.set_im = bwn_phy_n_im; mac->mac_phy.recalc_txpwr = bwn_phy_n_recalc_txpwr; mac->mac_phy.set_txpwr = bwn_phy_n_set_txpwr; mac->mac_phy.task_15s = bwn_phy_n_task_15s; mac->mac_phy.task_60s = bwn_phy_n_task_60s; } else { device_printf(sc->sc_dev, "unsupported PHY type (%d)\n", mac->mac_phy.type); error = ENXIO; goto fail; } mac->mac_phy.gmode = have_bg; if (mac->mac_phy.attach != NULL) { error = mac->mac_phy.attach(mac); if (error) { device_printf(sc->sc_dev, "failed\n"); goto fail; } } error = bwn_reset_core(mac, have_bg); if (error) goto fail; error = bwn_chiptest(mac); if (error) goto fail; error = bwn_setup_channels(mac, have_bg, have_a); if (error) { device_printf(sc->sc_dev, "failed to setup channels\n"); goto fail; } if (sc->sc_curmac == NULL) sc->sc_curmac = mac; error = bwn_dma_attach(mac); if (error != 0) { device_printf(sc->sc_dev, "failed to initialize DMA\n"); goto fail; } mac->mac_phy.switch_analog(mac, 0); fail: bhnd_suspend_hw(sc->sc_dev, 0); bwn_release_firmware(mac); return (error); } /* * Reset */ int bwn_reset_core(struct bwn_mac *mac, int g_mode) { struct bwn_softc *sc; uint32_t ctl; uint16_t ioctl, ioctl_mask; int error; sc = mac->mac_sc; DPRINTF(sc, BWN_DEBUG_RESET, "%s: g_mode=%d\n", __func__, g_mode); /* Reset core */ ioctl = (BWN_IOCTL_PHYCLOCK_ENABLE | BWN_IOCTL_PHYRESET); if (g_mode) ioctl |= BWN_IOCTL_SUPPORT_G; /* XXX N-PHY only; and hard-code to 20MHz for now */ if (mac->mac_phy.type == BWN_PHYTYPE_N) ioctl |= BWN_IOCTL_PHY_BANDWIDTH_20MHZ; if ((error = bhnd_reset_hw(sc->sc_dev, ioctl, ioctl))) { device_printf(sc->sc_dev, "core reset failed: %d", error); return (error); } DELAY(2000); /* Take PHY out of reset */ ioctl = BHND_IOCTL_CLK_FORCE; ioctl_mask = BHND_IOCTL_CLK_FORCE | BWN_IOCTL_PHYRESET | BWN_IOCTL_PHYCLOCK_ENABLE; if ((error = bhnd_write_ioctl(sc->sc_dev, ioctl, ioctl_mask))) { device_printf(sc->sc_dev, "failed to set core ioctl flags: " "%d\n", error); return (error); } DELAY(2000); ioctl = BWN_IOCTL_PHYCLOCK_ENABLE; if ((error = bhnd_write_ioctl(sc->sc_dev, ioctl, ioctl_mask))) { device_printf(sc->sc_dev, "failed to set core ioctl flags: " "%d\n", error); return (error); } DELAY(2000); if (mac->mac_phy.switch_analog != NULL) mac->mac_phy.switch_analog(mac, 1); ctl = BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_GMODE; if (g_mode) ctl |= BWN_MACCTL_GMODE; BWN_WRITE_4(mac, BWN_MACCTL, ctl | BWN_MACCTL_IHR_ON); return (0); } static int bwn_phy_getinfo(struct bwn_mac *mac, int gmode) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_softc *sc = mac->mac_sc; uint32_t tmp; /* PHY */ tmp = BWN_READ_2(mac, BWN_PHYVER); phy->gmode = gmode; phy->rf_on = 1; phy->analog = (tmp & BWN_PHYVER_ANALOG) >> 12; phy->type = (tmp & BWN_PHYVER_TYPE) >> 8; phy->rev = (tmp & BWN_PHYVER_VERSION); if ((phy->type == BWN_PHYTYPE_A && phy->rev >= 4) || (phy->type == BWN_PHYTYPE_B && phy->rev != 2 && phy->rev != 4 && phy->rev != 6 && phy->rev != 7) || (phy->type == BWN_PHYTYPE_G && phy->rev > 9) || (phy->type == BWN_PHYTYPE_N && phy->rev > 6) || (phy->type == BWN_PHYTYPE_LP && phy->rev > 2)) goto unsupphy; /* RADIO */ BWN_WRITE_2(mac, BWN_RFCTL, BWN_RFCTL_ID); tmp = BWN_READ_2(mac, BWN_RFDATALO); BWN_WRITE_2(mac, BWN_RFCTL, BWN_RFCTL_ID); tmp |= (uint32_t)BWN_READ_2(mac, BWN_RFDATAHI) << 16; phy->rf_rev = (tmp & 0xf0000000) >> 28; phy->rf_ver = (tmp & 0x0ffff000) >> 12; phy->rf_manuf = (tmp & 0x00000fff); /* * For now, just always do full init (ie, what bwn has traditionally * done) */ phy->phy_do_full_init = 1; if (phy->rf_manuf != 0x17f) /* 0x17f is broadcom */ goto unsupradio; if ((phy->type == BWN_PHYTYPE_A && (phy->rf_ver != 0x2060 || phy->rf_rev != 1 || phy->rf_manuf != 0x17f)) || (phy->type == BWN_PHYTYPE_B && (phy->rf_ver & 0xfff0) != 0x2050) || (phy->type == BWN_PHYTYPE_G && phy->rf_ver != 0x2050) || (phy->type == BWN_PHYTYPE_N && phy->rf_ver != 0x2055 && phy->rf_ver != 0x2056) || (phy->type == BWN_PHYTYPE_LP && phy->rf_ver != 0x2062 && phy->rf_ver != 0x2063)) goto unsupradio; return (0); unsupphy: device_printf(sc->sc_dev, "unsupported PHY (type %#x, rev %#x, " "analog %#x)\n", phy->type, phy->rev, phy->analog); return (ENXIO); unsupradio: device_printf(sc->sc_dev, "unsupported radio (manuf %#x, ver %#x, " "rev %#x)\n", phy->rf_manuf, phy->rf_ver, phy->rf_rev); return (ENXIO); } static int bwn_chiptest(struct bwn_mac *mac) { #define TESTVAL0 0x55aaaa55 #define TESTVAL1 0xaa5555aa struct bwn_softc *sc = mac->mac_sc; uint32_t v, backup; BWN_LOCK(sc); backup = bwn_shm_read_4(mac, BWN_SHARED, 0); bwn_shm_write_4(mac, BWN_SHARED, 0, TESTVAL0); if (bwn_shm_read_4(mac, BWN_SHARED, 0) != TESTVAL0) goto error; bwn_shm_write_4(mac, BWN_SHARED, 0, TESTVAL1); if (bwn_shm_read_4(mac, BWN_SHARED, 0) != TESTVAL1) goto error; bwn_shm_write_4(mac, BWN_SHARED, 0, backup); if ((bhnd_get_hwrev(sc->sc_dev) >= 3) && (bhnd_get_hwrev(sc->sc_dev) <= 10)) { BWN_WRITE_2(mac, BWN_TSF_CFP_START, 0xaaaa); BWN_WRITE_4(mac, BWN_TSF_CFP_START, 0xccccbbbb); if (BWN_READ_2(mac, BWN_TSF_CFP_START_LOW) != 0xbbbb) goto error; if (BWN_READ_2(mac, BWN_TSF_CFP_START_HIGH) != 0xcccc) goto error; } BWN_WRITE_4(mac, BWN_TSF_CFP_START, 0); v = BWN_READ_4(mac, BWN_MACCTL) | BWN_MACCTL_GMODE; if (v != (BWN_MACCTL_GMODE | BWN_MACCTL_IHR_ON)) goto error; BWN_UNLOCK(sc); return (0); error: BWN_UNLOCK(sc); device_printf(sc->sc_dev, "failed to validate the chipaccess\n"); return (ENODEV); } static int bwn_setup_channels(struct bwn_mac *mac, int have_bg, int have_a) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint8_t bands[IEEE80211_MODE_BYTES]; memset(ic->ic_channels, 0, sizeof(ic->ic_channels)); ic->ic_nchans = 0; DPRINTF(sc, BWN_DEBUG_EEPROM, "%s: called; bg=%d, a=%d\n", __func__, have_bg, have_a); if (have_bg) { memset(bands, 0, sizeof(bands)); setbit(bands, IEEE80211_MODE_11B); setbit(bands, IEEE80211_MODE_11G); bwn_addchannels(ic->ic_channels, IEEE80211_CHAN_MAX, &ic->ic_nchans, &bwn_chantable_bg, bands); } if (have_a) { memset(bands, 0, sizeof(bands)); setbit(bands, IEEE80211_MODE_11A); bwn_addchannels(ic->ic_channels, IEEE80211_CHAN_MAX, &ic->ic_nchans, &bwn_chantable_a, bands); } mac->mac_phy.supports_2ghz = have_bg; mac->mac_phy.supports_5ghz = have_a; return (ic->ic_nchans == 0 ? ENXIO : 0); } uint32_t bwn_shm_read_4(struct bwn_mac *mac, uint16_t way, uint16_t offset) { uint32_t ret; BWN_ASSERT_LOCKED(mac->mac_sc); if (way == BWN_SHARED) { KASSERT((offset & 0x0001) == 0, ("%s:%d warn", __func__, __LINE__)); if (offset & 0x0003) { bwn_shm_ctlword(mac, way, offset >> 2); ret = BWN_READ_2(mac, BWN_SHM_DATA_UNALIGNED); ret <<= 16; bwn_shm_ctlword(mac, way, (offset >> 2) + 1); ret |= BWN_READ_2(mac, BWN_SHM_DATA); goto out; } offset >>= 2; } bwn_shm_ctlword(mac, way, offset); ret = BWN_READ_4(mac, BWN_SHM_DATA); out: return (ret); } uint16_t bwn_shm_read_2(struct bwn_mac *mac, uint16_t way, uint16_t offset) { uint16_t ret; BWN_ASSERT_LOCKED(mac->mac_sc); if (way == BWN_SHARED) { KASSERT((offset & 0x0001) == 0, ("%s:%d warn", __func__, __LINE__)); if (offset & 0x0003) { bwn_shm_ctlword(mac, way, offset >> 2); ret = BWN_READ_2(mac, BWN_SHM_DATA_UNALIGNED); goto out; } offset >>= 2; } bwn_shm_ctlword(mac, way, offset); ret = BWN_READ_2(mac, BWN_SHM_DATA); out: return (ret); } static void bwn_shm_ctlword(struct bwn_mac *mac, uint16_t way, uint16_t offset) { uint32_t control; control = way; control <<= 16; control |= offset; BWN_WRITE_4(mac, BWN_SHM_CONTROL, control); } void bwn_shm_write_4(struct bwn_mac *mac, uint16_t way, uint16_t offset, uint32_t value) { BWN_ASSERT_LOCKED(mac->mac_sc); if (way == BWN_SHARED) { KASSERT((offset & 0x0001) == 0, ("%s:%d warn", __func__, __LINE__)); if (offset & 0x0003) { bwn_shm_ctlword(mac, way, offset >> 2); BWN_WRITE_2(mac, BWN_SHM_DATA_UNALIGNED, (value >> 16) & 0xffff); bwn_shm_ctlword(mac, way, (offset >> 2) + 1); BWN_WRITE_2(mac, BWN_SHM_DATA, value & 0xffff); return; } offset >>= 2; } bwn_shm_ctlword(mac, way, offset); BWN_WRITE_4(mac, BWN_SHM_DATA, value); } void bwn_shm_write_2(struct bwn_mac *mac, uint16_t way, uint16_t offset, uint16_t value) { BWN_ASSERT_LOCKED(mac->mac_sc); if (way == BWN_SHARED) { KASSERT((offset & 0x0001) == 0, ("%s:%d warn", __func__, __LINE__)); if (offset & 0x0003) { bwn_shm_ctlword(mac, way, offset >> 2); BWN_WRITE_2(mac, BWN_SHM_DATA_UNALIGNED, value); return; } offset >>= 2; } bwn_shm_ctlword(mac, way, offset); BWN_WRITE_2(mac, BWN_SHM_DATA, value); } static void bwn_addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans, const struct bwn_channelinfo *ci, const uint8_t bands[]) { int i, error; for (i = 0, error = 0; i < ci->nchannels && error == 0; i++) { const struct bwn_channel *hc = &ci->channels[i]; error = ieee80211_add_channel(chans, maxchans, nchans, hc->ieee, hc->freq, hc->maxTxPow, 0, bands); } } static int bwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac = sc->sc_curmac; int error; if ((sc->sc_flags & BWN_FLAG_RUNNING) == 0 || mac->mac_status < BWN_MAC_STATUS_STARTED) { m_freem(m); return (ENETDOWN); } BWN_LOCK(sc); if (bwn_tx_isfull(sc, m)) { m_freem(m); BWN_UNLOCK(sc); return (ENOBUFS); } error = bwn_tx_start(sc, ni, m); if (error == 0) sc->sc_watchdog_timer = 5; BWN_UNLOCK(sc); return (error); } /* * Callback from the 802.11 layer to update the slot time * based on the current setting. We use it to notify the * firmware of ERP changes and the f/w takes care of things * like slot time and preamble. */ static void bwn_updateslot(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac; BWN_LOCK(sc); if (sc->sc_flags & BWN_FLAG_RUNNING) { mac = (struct bwn_mac *)sc->sc_curmac; bwn_set_slot_time(mac, IEEE80211_GET_SLOTTIME(ic)); } BWN_UNLOCK(sc); } /* * Callback from the 802.11 layer after a promiscuous mode change. * Note this interface does not check the operating mode as this * is an internal callback and we are expected to honor the current * state (e.g. this is used for setting the interface in promiscuous * mode when operating in hostap mode to do ACS). */ static void bwn_update_promisc(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac = sc->sc_curmac; BWN_LOCK(sc); mac = sc->sc_curmac; if (mac != NULL && mac->mac_status >= BWN_MAC_STATUS_INITED) { if (ic->ic_promisc > 0) sc->sc_filters |= BWN_MACCTL_PROMISC; else sc->sc_filters &= ~BWN_MACCTL_PROMISC; bwn_set_opmode(mac); } BWN_UNLOCK(sc); } /* * Callback from the 802.11 layer to update WME parameters. */ static int bwn_wme_update(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac = sc->sc_curmac; struct chanAccParams chp; struct wmeParams *wmep; int i; ieee80211_wme_ic_getparams(ic, &chp); BWN_LOCK(sc); mac = sc->sc_curmac; if (mac != NULL && mac->mac_status >= BWN_MAC_STATUS_INITED) { bwn_mac_suspend(mac); for (i = 0; i < N(sc->sc_wmeParams); i++) { wmep = &chp.cap_wmeParams[i]; bwn_wme_loadparams(mac, wmep, bwn_wme_shm_offsets[i]); } bwn_mac_enable(mac); } BWN_UNLOCK(sc); return (0); } static void bwn_scan_start(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac; BWN_LOCK(sc); mac = sc->sc_curmac; if (mac != NULL && mac->mac_status >= BWN_MAC_STATUS_INITED) { sc->sc_filters |= BWN_MACCTL_BEACON_PROMISC; bwn_set_opmode(mac); /* disable CFP update during scan */ bwn_hf_write(mac, bwn_hf_read(mac) | BWN_HF_SKIP_CFP_UPDATE); } BWN_UNLOCK(sc); } static void bwn_scan_end(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac; BWN_LOCK(sc); mac = sc->sc_curmac; if (mac != NULL && mac->mac_status >= BWN_MAC_STATUS_INITED) { sc->sc_filters &= ~BWN_MACCTL_BEACON_PROMISC; bwn_set_opmode(mac); bwn_hf_write(mac, bwn_hf_read(mac) & ~BWN_HF_SKIP_CFP_UPDATE); } BWN_UNLOCK(sc); } static void bwn_set_channel(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac = sc->sc_curmac; struct bwn_phy *phy = &mac->mac_phy; int chan, error; BWN_LOCK(sc); error = bwn_switch_band(sc, ic->ic_curchan); if (error) goto fail; bwn_mac_suspend(mac); bwn_set_txretry(mac, BWN_RETRY_SHORT, BWN_RETRY_LONG); chan = ieee80211_chan2ieee(ic, ic->ic_curchan); if (chan != phy->chan) bwn_switch_channel(mac, chan); /* TX power level */ if (ic->ic_curchan->ic_maxpower != 0 && ic->ic_curchan->ic_maxpower != phy->txpower) { phy->txpower = ic->ic_curchan->ic_maxpower / 2; bwn_phy_txpower_check(mac, BWN_TXPWR_IGNORE_TIME | BWN_TXPWR_IGNORE_TSSI); } bwn_set_txantenna(mac, BWN_ANT_DEFAULT); if (phy->set_antenna) phy->set_antenna(mac, BWN_ANT_DEFAULT); if (sc->sc_rf_enabled != phy->rf_on) { if (sc->sc_rf_enabled) { bwn_rf_turnon(mac); if (!(mac->mac_flags & BWN_MAC_FLAG_RADIO_ON)) device_printf(sc->sc_dev, "please turn on the RF switch\n"); } else bwn_rf_turnoff(mac); } bwn_mac_enable(mac); fail: BWN_UNLOCK(sc); } static struct ieee80211vap * bwn_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct ieee80211vap *vap; struct bwn_vap *bvp; switch (opmode) { case IEEE80211_M_HOSTAP: case IEEE80211_M_MBSS: case IEEE80211_M_STA: case IEEE80211_M_WDS: case IEEE80211_M_MONITOR: case IEEE80211_M_IBSS: case IEEE80211_M_AHDEMO: break; default: return (NULL); } bvp = malloc(sizeof(struct bwn_vap), M_80211_VAP, M_WAITOK | M_ZERO); vap = &bvp->bv_vap; ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); /* override with driver methods */ bvp->bv_newstate = vap->iv_newstate; vap->iv_newstate = bwn_newstate; /* override max aid so sta's cannot assoc when we're out of sta id's */ vap->iv_max_aid = BWN_STAID_MAX; ieee80211_ratectl_init(vap); /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status, mac); return (vap); } static void bwn_vap_delete(struct ieee80211vap *vap) { struct bwn_vap *bvp = BWN_VAP(vap); ieee80211_ratectl_deinit(vap); ieee80211_vap_detach(vap); free(bvp, M_80211_VAP); } static int bwn_init(struct bwn_softc *sc) { struct bwn_mac *mac; int error; BWN_ASSERT_LOCKED(sc); DPRINTF(sc, BWN_DEBUG_RESET, "%s: called\n", __func__); bzero(sc->sc_bssid, IEEE80211_ADDR_LEN); sc->sc_flags |= BWN_FLAG_NEED_BEACON_TP; sc->sc_filters = 0; bwn_wme_clear(sc); sc->sc_beacons[0] = sc->sc_beacons[1] = 0; sc->sc_rf_enabled = 1; mac = sc->sc_curmac; if (mac->mac_status == BWN_MAC_STATUS_UNINIT) { error = bwn_core_init(mac); if (error != 0) return (error); } if (mac->mac_status == BWN_MAC_STATUS_INITED) bwn_core_start(mac); bwn_set_opmode(mac); bwn_set_pretbtt(mac); bwn_spu_setdelay(mac, 0); bwn_set_macaddr(mac); sc->sc_flags |= BWN_FLAG_RUNNING; callout_reset(&sc->sc_rfswitch_ch, hz, bwn_rfswitch, sc); callout_reset(&sc->sc_watchdog_ch, hz, bwn_watchdog, sc); return (0); } static void bwn_stop(struct bwn_softc *sc) { struct bwn_mac *mac = sc->sc_curmac; BWN_ASSERT_LOCKED(sc); DPRINTF(sc, BWN_DEBUG_RESET, "%s: called\n", __func__); if (mac->mac_status >= BWN_MAC_STATUS_INITED) { /* XXX FIXME opmode not based on VAP */ bwn_set_opmode(mac); bwn_set_macaddr(mac); } if (mac->mac_status >= BWN_MAC_STATUS_STARTED) bwn_core_stop(mac); callout_stop(&sc->sc_led_blink_ch); sc->sc_led_blinking = 0; bwn_core_exit(mac); sc->sc_rf_enabled = 0; sc->sc_flags &= ~BWN_FLAG_RUNNING; } static void bwn_wme_clear(struct bwn_softc *sc) { #define MS(_v, _f) (((_v) & _f) >> _f##_S) struct wmeParams *p; unsigned int i; KASSERT(N(bwn_wme_shm_offsets) == N(sc->sc_wmeParams), ("%s:%d: fail", __func__, __LINE__)); for (i = 0; i < N(sc->sc_wmeParams); i++) { p = &(sc->sc_wmeParams[i]); switch (bwn_wme_shm_offsets[i]) { case BWN_WME_VOICE: p->wmep_txopLimit = 0; p->wmep_aifsn = 2; /* XXX FIXME: log2(cwmin) */ p->wmep_logcwmin = MS(0x0001, WME_PARAM_LOGCWMIN); p->wmep_logcwmax = MS(0x0001, WME_PARAM_LOGCWMAX); break; case BWN_WME_VIDEO: p->wmep_txopLimit = 0; p->wmep_aifsn = 2; /* XXX FIXME: log2(cwmin) */ p->wmep_logcwmin = MS(0x0001, WME_PARAM_LOGCWMIN); p->wmep_logcwmax = MS(0x0001, WME_PARAM_LOGCWMAX); break; case BWN_WME_BESTEFFORT: p->wmep_txopLimit = 0; p->wmep_aifsn = 3; /* XXX FIXME: log2(cwmin) */ p->wmep_logcwmin = MS(0x0001, WME_PARAM_LOGCWMIN); p->wmep_logcwmax = MS(0x03ff, WME_PARAM_LOGCWMAX); break; case BWN_WME_BACKGROUND: p->wmep_txopLimit = 0; p->wmep_aifsn = 7; /* XXX FIXME: log2(cwmin) */ p->wmep_logcwmin = MS(0x0001, WME_PARAM_LOGCWMIN); p->wmep_logcwmax = MS(0x03ff, WME_PARAM_LOGCWMAX); break; default: KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } } } static int bwn_core_forceclk(struct bwn_mac *mac, bool force) { struct bwn_softc *sc; bhnd_clock clock; int error; sc = mac->mac_sc; /* On PMU equipped devices, we do not need to force the HT clock */ if (sc->sc_pmu != NULL) return (0); /* Issue a PMU clock request */ if (force) clock = BHND_CLOCK_HT; else clock = BHND_CLOCK_DYN; if ((error = bhnd_request_clock(sc->sc_dev, clock))) { device_printf(sc->sc_dev, "%d clock request failed: %d\n", clock, error); return (error); } return (0); } static int bwn_core_init(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint64_t hf; int error; KASSERT(mac->mac_status == BWN_MAC_STATUS_UNINIT, ("%s:%d: fail", __func__, __LINE__)); DPRINTF(mac->mac_sc, BWN_DEBUG_RESET, "%s: called\n", __func__); if ((error = bwn_core_forceclk(mac, true))) return (error); if (bhnd_is_hw_suspended(sc->sc_dev)) { if ((error = bwn_reset_core(mac, mac->mac_phy.gmode))) goto fail0; } mac->mac_flags &= ~BWN_MAC_FLAG_DFQVALID; mac->mac_flags |= BWN_MAC_FLAG_RADIO_ON; mac->mac_phy.hwpctl = (bwn_hwpctl) ? 1 : 0; BWN_GETTIME(mac->mac_phy.nexttime); mac->mac_phy.txerrors = BWN_TXERROR_MAX; bzero(&mac->mac_stats, sizeof(mac->mac_stats)); mac->mac_stats.link_noise = -95; mac->mac_reason_intr = 0; bzero(mac->mac_reason, sizeof(mac->mac_reason)); mac->mac_intr_mask = BWN_INTR_MASKTEMPLATE; #ifdef BWN_DEBUG if (sc->sc_debug & BWN_DEBUG_XMIT) mac->mac_intr_mask &= ~BWN_INTR_PHY_TXERR; #endif mac->mac_suspended = 1; mac->mac_task_state = 0; memset(&mac->mac_noise, 0, sizeof(mac->mac_noise)); mac->mac_phy.init_pre(mac); bwn_bt_disable(mac); if (mac->mac_phy.prepare_hw) { error = mac->mac_phy.prepare_hw(mac); if (error) goto fail0; } DPRINTF(mac->mac_sc, BWN_DEBUG_RESET, "%s: chip_init\n", __func__); error = bwn_chip_init(mac); if (error) goto fail0; bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_COREREV, bhnd_get_hwrev(sc->sc_dev)); hf = bwn_hf_read(mac); if (mac->mac_phy.type == BWN_PHYTYPE_G) { hf |= BWN_HF_GPHY_SYM_WORKAROUND; if (sc->sc_board_info.board_flags & BHND_BFL_PACTRL) hf |= BWN_HF_PAGAINBOOST_OFDM_ON; if (mac->mac_phy.rev == 1) hf |= BWN_HF_GPHY_DC_CANCELFILTER; } if (mac->mac_phy.rf_ver == 0x2050) { if (mac->mac_phy.rf_rev < 6) hf |= BWN_HF_FORCE_VCO_RECALC; if (mac->mac_phy.rf_rev == 6) hf |= BWN_HF_4318_TSSI; } if (sc->sc_board_info.board_flags & BHND_BFL_NOPLLDOWN) hf |= BWN_HF_SLOWCLOCK_REQ_OFF; if (sc->sc_quirks & BWN_QUIRK_UCODE_SLOWCLOCK_WAR) hf |= BWN_HF_PCI_SLOWCLOCK_WORKAROUND; hf &= ~BWN_HF_SKIP_CFP_UPDATE; bwn_hf_write(mac, hf); /* Tell the firmware about the MAC capabilities */ if (bhnd_get_hwrev(sc->sc_dev) >= 13) { uint32_t cap; cap = BWN_READ_4(mac, BWN_MAC_HW_CAP); DPRINTF(sc, BWN_DEBUG_RESET, "%s: hw capabilities: 0x%08x\n", __func__, cap); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_MACHW_L, cap & 0xffff); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_MACHW_H, (cap >> 16) & 0xffff); } bwn_set_txretry(mac, BWN_RETRY_SHORT, BWN_RETRY_LONG); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_SHORT_RETRY_FALLBACK, 3); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_LONG_RETRY_FALLBACK, 2); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PROBE_RESP_MAXTIME, 1); bwn_rate_init(mac); bwn_set_phytxctl(mac); bwn_shm_write_2(mac, BWN_SCRATCH, BWN_SCRATCH_CONT_MIN, (mac->mac_phy.type == BWN_PHYTYPE_B) ? 0x1f : 0xf); bwn_shm_write_2(mac, BWN_SCRATCH, BWN_SCRATCH_CONT_MAX, 0x3ff); if (sc->sc_quirks & BWN_QUIRK_NODMA) bwn_pio_init(mac); else bwn_dma_init(mac); bwn_wme_init(mac); bwn_spu_setdelay(mac, 1); bwn_bt_enable(mac); DPRINTF(mac->mac_sc, BWN_DEBUG_RESET, "%s: powerup\n", __func__); if (sc->sc_board_info.board_flags & BHND_BFL_NOPLLDOWN) bwn_core_forceclk(mac, true); else bwn_core_forceclk(mac, false); bwn_set_macaddr(mac); bwn_crypt_init(mac); /* XXX LED initializatin */ mac->mac_status = BWN_MAC_STATUS_INITED; DPRINTF(mac->mac_sc, BWN_DEBUG_RESET, "%s: done\n", __func__); return (error); fail0: bhnd_suspend_hw(sc->sc_dev, 0); KASSERT(mac->mac_status == BWN_MAC_STATUS_UNINIT, ("%s:%d: fail", __func__, __LINE__)); DPRINTF(mac->mac_sc, BWN_DEBUG_RESET, "%s: fail\n", __func__); return (error); } static void bwn_core_start(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint32_t tmp; KASSERT(mac->mac_status == BWN_MAC_STATUS_INITED, ("%s:%d: fail", __func__, __LINE__)); if (bhnd_get_hwrev(sc->sc_dev) < 5) return; while (1) { tmp = BWN_READ_4(mac, BWN_XMITSTAT_0); if (!(tmp & 0x00000001)) break; tmp = BWN_READ_4(mac, BWN_XMITSTAT_1); } bwn_mac_enable(mac); BWN_WRITE_4(mac, BWN_INTR_MASK, mac->mac_intr_mask); callout_reset(&sc->sc_task_ch, hz * 15, bwn_tasks, mac); mac->mac_status = BWN_MAC_STATUS_STARTED; } static void bwn_core_exit(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint32_t macctl; BWN_ASSERT_LOCKED(mac->mac_sc); KASSERT(mac->mac_status <= BWN_MAC_STATUS_INITED, ("%s:%d: fail", __func__, __LINE__)); if (mac->mac_status != BWN_MAC_STATUS_INITED) return; mac->mac_status = BWN_MAC_STATUS_UNINIT; macctl = BWN_READ_4(mac, BWN_MACCTL); macctl &= ~BWN_MACCTL_MCODE_RUN; macctl |= BWN_MACCTL_MCODE_JMP0; BWN_WRITE_4(mac, BWN_MACCTL, macctl); bwn_dma_stop(mac); bwn_pio_stop(mac); bwn_chip_exit(mac); mac->mac_phy.switch_analog(mac, 0); bhnd_suspend_hw(sc->sc_dev, 0); } static void bwn_bt_disable(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; (void)sc; /* XXX do nothing yet */ } static int bwn_chip_init(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct bwn_phy *phy = &mac->mac_phy; uint32_t macctl; u_int delay; int error; macctl = BWN_MACCTL_IHR_ON | BWN_MACCTL_SHM_ON | BWN_MACCTL_STA; if (phy->gmode) macctl |= BWN_MACCTL_GMODE; BWN_WRITE_4(mac, BWN_MACCTL, macctl); error = bwn_fw_fillinfo(mac); if (error) return (error); error = bwn_fw_loaducode(mac); if (error) return (error); error = bwn_gpio_init(mac); if (error) return (error); error = bwn_fw_loadinitvals(mac); if (error) return (error); phy->switch_analog(mac, 1); error = bwn_phy_init(mac); if (error) return (error); if (phy->set_im) phy->set_im(mac, BWN_IMMODE_NONE); if (phy->set_antenna) phy->set_antenna(mac, BWN_ANT_DEFAULT); bwn_set_txantenna(mac, BWN_ANT_DEFAULT); if (phy->type == BWN_PHYTYPE_B) BWN_WRITE_2(mac, 0x005e, BWN_READ_2(mac, 0x005e) | 0x0004); BWN_WRITE_4(mac, 0x0100, 0x01000000); if (bhnd_get_hwrev(sc->sc_dev) < 5) BWN_WRITE_4(mac, 0x010c, 0x01000000); BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_STA); BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) | BWN_MACCTL_STA); bwn_shm_write_2(mac, BWN_SHARED, 0x0074, 0x0000); bwn_set_opmode(mac); if (bhnd_get_hwrev(sc->sc_dev) < 3) { BWN_WRITE_2(mac, 0x060e, 0x0000); BWN_WRITE_2(mac, 0x0610, 0x8000); BWN_WRITE_2(mac, 0x0604, 0x0000); BWN_WRITE_2(mac, 0x0606, 0x0200); } else { BWN_WRITE_4(mac, 0x0188, 0x80000000); BWN_WRITE_4(mac, 0x018c, 0x02000000); } BWN_WRITE_4(mac, BWN_INTR_REASON, 0x00004000); BWN_WRITE_4(mac, BWN_DMA0_INTR_MASK, 0x0001dc00); BWN_WRITE_4(mac, BWN_DMA1_INTR_MASK, 0x0000dc00); BWN_WRITE_4(mac, BWN_DMA2_INTR_MASK, 0x0000dc00); BWN_WRITE_4(mac, BWN_DMA3_INTR_MASK, 0x0001dc00); BWN_WRITE_4(mac, BWN_DMA4_INTR_MASK, 0x0000dc00); BWN_WRITE_4(mac, BWN_DMA5_INTR_MASK, 0x0000dc00); bwn_mac_phy_clock_set(mac, true); /* Provide the HT clock transition latency to the MAC core */ error = bhnd_get_clock_latency(sc->sc_dev, BHND_CLOCK_HT, &delay); if (error) { device_printf(sc->sc_dev, "failed to fetch HT clock latency: " "%d\n", error); return (error); } if (delay > UINT16_MAX) { device_printf(sc->sc_dev, "invalid HT clock latency: %u\n", delay); return (ENXIO); } BWN_WRITE_2(mac, BWN_POWERUP_DELAY, delay); return (0); } /* read hostflags */ uint64_t bwn_hf_read(struct bwn_mac *mac) { uint64_t ret; ret = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_HFHI); ret <<= 16; ret |= bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_HFMI); ret <<= 16; ret |= bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_HFLO); return (ret); } void bwn_hf_write(struct bwn_mac *mac, uint64_t value) { bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_HFLO, (value & 0x00000000ffffull)); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_HFMI, (value & 0x0000ffff0000ull) >> 16); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_HFHI, (value & 0xffff00000000ULL) >> 32); } static void bwn_set_txretry(struct bwn_mac *mac, int s, int l) { bwn_shm_write_2(mac, BWN_SCRATCH, BWN_SCRATCH_SHORT_RETRY, MIN(s, 0xf)); bwn_shm_write_2(mac, BWN_SCRATCH, BWN_SCRATCH_LONG_RETRY, MIN(l, 0xf)); } static void bwn_rate_init(struct bwn_mac *mac) { switch (mac->mac_phy.type) { case BWN_PHYTYPE_A: case BWN_PHYTYPE_G: case BWN_PHYTYPE_LP: case BWN_PHYTYPE_N: bwn_rate_write(mac, BWN_OFDM_RATE_6MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_12MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_18MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_24MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_36MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_48MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_54MB, 1); if (mac->mac_phy.type == BWN_PHYTYPE_A) break; /* FALLTHROUGH */ case BWN_PHYTYPE_B: bwn_rate_write(mac, BWN_CCK_RATE_1MB, 0); bwn_rate_write(mac, BWN_CCK_RATE_2MB, 0); bwn_rate_write(mac, BWN_CCK_RATE_5MB, 0); bwn_rate_write(mac, BWN_CCK_RATE_11MB, 0); break; default: KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } } static void bwn_rate_write(struct bwn_mac *mac, uint16_t rate, int ofdm) { uint16_t offset; if (ofdm) { offset = 0x480; offset += (bwn_plcp_getofdm(rate) & 0x000f) * 2; } else { offset = 0x4c0; offset += (bwn_plcp_getcck(rate) & 0x000f) * 2; } bwn_shm_write_2(mac, BWN_SHARED, offset + 0x20, bwn_shm_read_2(mac, BWN_SHARED, offset)); } static uint8_t bwn_plcp_getcck(const uint8_t bitrate) { switch (bitrate) { case BWN_CCK_RATE_1MB: return (0x0a); case BWN_CCK_RATE_2MB: return (0x14); case BWN_CCK_RATE_5MB: return (0x37); case BWN_CCK_RATE_11MB: return (0x6e); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (0); } static uint8_t bwn_plcp_getofdm(const uint8_t bitrate) { switch (bitrate) { case BWN_OFDM_RATE_6MB: return (0xb); case BWN_OFDM_RATE_9MB: return (0xf); case BWN_OFDM_RATE_12MB: return (0xa); case BWN_OFDM_RATE_18MB: return (0xe); case BWN_OFDM_RATE_24MB: return (0x9); case BWN_OFDM_RATE_36MB: return (0xd); case BWN_OFDM_RATE_48MB: return (0x8); case BWN_OFDM_RATE_54MB: return (0xc); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (0); } static void bwn_set_phytxctl(struct bwn_mac *mac) { uint16_t ctl; ctl = (BWN_TX_PHY_ENC_CCK | BWN_TX_PHY_ANT01AUTO | BWN_TX_PHY_TXPWR); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_BEACON_PHYCTL, ctl); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_ACKCTS_PHYCTL, ctl); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PROBE_RESP_PHYCTL, ctl); } static void bwn_pio_init(struct bwn_mac *mac) { struct bwn_pio *pio = &mac->mac_method.pio; BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_BIGENDIAN); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_RX_PADOFFSET, 0); bwn_pio_set_txqueue(mac, &pio->wme[WME_AC_BK], 0); bwn_pio_set_txqueue(mac, &pio->wme[WME_AC_BE], 1); bwn_pio_set_txqueue(mac, &pio->wme[WME_AC_VI], 2); bwn_pio_set_txqueue(mac, &pio->wme[WME_AC_VO], 3); bwn_pio_set_txqueue(mac, &pio->mcast, 4); bwn_pio_setupqueue_rx(mac, &pio->rx, 0); } static void bwn_pio_set_txqueue(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, int index) { struct bwn_pio_txpkt *tp; struct bwn_softc *sc = mac->mac_sc; unsigned int i; tq->tq_base = bwn_pio_idx2base(mac, index) + BWN_PIO_TXQOFFSET(mac); tq->tq_index = index; tq->tq_free = BWN_PIO_MAX_TXPACKETS; if (bhnd_get_hwrev(sc->sc_dev) >= 8) tq->tq_size = 1920; else { tq->tq_size = bwn_pio_read_2(mac, tq, BWN_PIO_TXQBUFSIZE); tq->tq_size -= 80; } TAILQ_INIT(&tq->tq_pktlist); for (i = 0; i < N(tq->tq_pkts); i++) { tp = &(tq->tq_pkts[i]); tp->tp_index = i; tp->tp_queue = tq; TAILQ_INSERT_TAIL(&tq->tq_pktlist, tp, tp_list); } } static uint16_t bwn_pio_idx2base(struct bwn_mac *mac, int index) { struct bwn_softc *sc = mac->mac_sc; static const uint16_t bases[] = { BWN_PIO_BASE0, BWN_PIO_BASE1, BWN_PIO_BASE2, BWN_PIO_BASE3, BWN_PIO_BASE4, BWN_PIO_BASE5, BWN_PIO_BASE6, BWN_PIO_BASE7, }; static const uint16_t bases_rev11[] = { BWN_PIO11_BASE0, BWN_PIO11_BASE1, BWN_PIO11_BASE2, BWN_PIO11_BASE3, BWN_PIO11_BASE4, BWN_PIO11_BASE5, }; if (bhnd_get_hwrev(sc->sc_dev) >= 11) { if (index >= N(bases_rev11)) device_printf(sc->sc_dev, "%s: warning\n", __func__); return (bases_rev11[index]); } if (index >= N(bases)) device_printf(sc->sc_dev, "%s: warning\n", __func__); return (bases[index]); } static void bwn_pio_setupqueue_rx(struct bwn_mac *mac, struct bwn_pio_rxqueue *prq, int index) { struct bwn_softc *sc = mac->mac_sc; prq->prq_mac = mac; prq->prq_rev = bhnd_get_hwrev(sc->sc_dev); prq->prq_base = bwn_pio_idx2base(mac, index) + BWN_PIO_RXQOFFSET(mac); bwn_dma_rxdirectfifo(mac, index, 1); } static void bwn_destroy_pioqueue_tx(struct bwn_pio_txqueue *tq) { if (tq == NULL) return; bwn_pio_cancel_tx_packets(tq); } static void bwn_destroy_queue_tx(struct bwn_pio_txqueue *pio) { bwn_destroy_pioqueue_tx(pio); } static uint16_t bwn_pio_read_2(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, uint16_t offset) { return (BWN_READ_2(mac, tq->tq_base + offset)); } static void bwn_dma_rxdirectfifo(struct bwn_mac *mac, int idx, uint8_t enable) { uint32_t ctl; uint16_t base; base = bwn_dma_base(mac->mac_dmatype, idx); if (mac->mac_dmatype == BHND_DMA_ADDR_64BIT) { ctl = BWN_READ_4(mac, base + BWN_DMA64_RXCTL); ctl &= ~BWN_DMA64_RXDIRECTFIFO; if (enable) ctl |= BWN_DMA64_RXDIRECTFIFO; BWN_WRITE_4(mac, base + BWN_DMA64_RXCTL, ctl); } else { ctl = BWN_READ_4(mac, base + BWN_DMA32_RXCTL); ctl &= ~BWN_DMA32_RXDIRECTFIFO; if (enable) ctl |= BWN_DMA32_RXDIRECTFIFO; BWN_WRITE_4(mac, base + BWN_DMA32_RXCTL, ctl); } } static void bwn_pio_cancel_tx_packets(struct bwn_pio_txqueue *tq) { struct bwn_pio_txpkt *tp; unsigned int i; for (i = 0; i < N(tq->tq_pkts); i++) { tp = &(tq->tq_pkts[i]); if (tp->tp_m) { m_freem(tp->tp_m); tp->tp_m = NULL; } } } static uint16_t bwn_dma_base(int type, int controller_idx) { static const uint16_t map64[] = { BWN_DMA64_BASE0, BWN_DMA64_BASE1, BWN_DMA64_BASE2, BWN_DMA64_BASE3, BWN_DMA64_BASE4, BWN_DMA64_BASE5, }; static const uint16_t map32[] = { BWN_DMA32_BASE0, BWN_DMA32_BASE1, BWN_DMA32_BASE2, BWN_DMA32_BASE3, BWN_DMA32_BASE4, BWN_DMA32_BASE5, }; if (type == BHND_DMA_ADDR_64BIT) { KASSERT(controller_idx >= 0 && controller_idx < N(map64), ("%s:%d: fail", __func__, __LINE__)); return (map64[controller_idx]); } KASSERT(controller_idx >= 0 && controller_idx < N(map32), ("%s:%d: fail", __func__, __LINE__)); return (map32[controller_idx]); } static void bwn_dma_init(struct bwn_mac *mac) { struct bwn_dma *dma = &mac->mac_method.dma; /* setup TX DMA channels. */ bwn_dma_setup(dma->wme[WME_AC_BK]); bwn_dma_setup(dma->wme[WME_AC_BE]); bwn_dma_setup(dma->wme[WME_AC_VI]); bwn_dma_setup(dma->wme[WME_AC_VO]); bwn_dma_setup(dma->mcast); /* setup RX DMA channel. */ bwn_dma_setup(dma->rx); } static struct bwn_dma_ring * bwn_dma_ringsetup(struct bwn_mac *mac, int controller_index, int for_tx) { struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_dma_ring *dr; struct bwn_dmadesc_generic *desc; struct bwn_dmadesc_meta *mt; struct bwn_softc *sc = mac->mac_sc; int error, i; dr = malloc(sizeof(*dr), M_DEVBUF, M_NOWAIT | M_ZERO); if (dr == NULL) goto out; dr->dr_numslots = BWN_RXRING_SLOTS; if (for_tx) dr->dr_numslots = BWN_TXRING_SLOTS; dr->dr_meta = malloc(dr->dr_numslots * sizeof(struct bwn_dmadesc_meta), M_DEVBUF, M_NOWAIT | M_ZERO); if (dr->dr_meta == NULL) goto fail0; dr->dr_type = mac->mac_dmatype; dr->dr_mac = mac; dr->dr_base = bwn_dma_base(dr->dr_type, controller_index); dr->dr_index = controller_index; if (dr->dr_type == BHND_DMA_ADDR_64BIT) { dr->getdesc = bwn_dma_64_getdesc; dr->setdesc = bwn_dma_64_setdesc; dr->start_transfer = bwn_dma_64_start_transfer; dr->suspend = bwn_dma_64_suspend; dr->resume = bwn_dma_64_resume; dr->get_curslot = bwn_dma_64_get_curslot; dr->set_curslot = bwn_dma_64_set_curslot; } else { dr->getdesc = bwn_dma_32_getdesc; dr->setdesc = bwn_dma_32_setdesc; dr->start_transfer = bwn_dma_32_start_transfer; dr->suspend = bwn_dma_32_suspend; dr->resume = bwn_dma_32_resume; dr->get_curslot = bwn_dma_32_get_curslot; dr->set_curslot = bwn_dma_32_set_curslot; } if (for_tx) { dr->dr_tx = 1; dr->dr_curslot = -1; } else { if (dr->dr_index == 0) { switch (mac->mac_fw.fw_hdr_format) { case BWN_FW_HDR_351: case BWN_FW_HDR_410: dr->dr_rx_bufsize = BWN_DMA0_RX_BUFFERSIZE_FW351; dr->dr_frameoffset = BWN_DMA0_RX_FRAMEOFFSET_FW351; break; case BWN_FW_HDR_598: dr->dr_rx_bufsize = BWN_DMA0_RX_BUFFERSIZE_FW598; dr->dr_frameoffset = BWN_DMA0_RX_FRAMEOFFSET_FW598; break; } } else KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } error = bwn_dma_allocringmemory(dr); if (error) goto fail2; if (for_tx) { /* * Assumption: BWN_TXRING_SLOTS can be divided by * BWN_TX_SLOTS_PER_FRAME */ KASSERT(BWN_TXRING_SLOTS % BWN_TX_SLOTS_PER_FRAME == 0, ("%s:%d: fail", __func__, __LINE__)); dr->dr_txhdr_cache = contigmalloc( (dr->dr_numslots / BWN_TX_SLOTS_PER_FRAME) * BWN_MAXTXHDRSIZE, M_DEVBUF, M_ZERO, 0, BUS_SPACE_MAXADDR, 8, 0); if (dr->dr_txhdr_cache == NULL) { device_printf(sc->sc_dev, "can't allocate TX header DMA memory\n"); goto fail1; } /* * Create TX ring DMA stuffs */ error = bus_dma_tag_create(dma->parent_dtag, BWN_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BWN_HDRSIZE(mac), 1, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &dr->dr_txring_dtag); if (error) { device_printf(sc->sc_dev, "can't create TX ring DMA tag: TODO frees\n"); goto fail2; } for (i = 0; i < dr->dr_numslots; i += 2) { dr->getdesc(dr, i, &desc, &mt); mt->mt_txtype = BWN_DMADESC_METATYPE_HEADER; mt->mt_m = NULL; mt->mt_ni = NULL; mt->mt_islast = 0; error = bus_dmamap_create(dr->dr_txring_dtag, 0, &mt->mt_dmap); if (error) { device_printf(sc->sc_dev, "can't create RX buf DMA map\n"); goto fail2; } dr->getdesc(dr, i + 1, &desc, &mt); mt->mt_txtype = BWN_DMADESC_METATYPE_BODY; mt->mt_m = NULL; mt->mt_ni = NULL; mt->mt_islast = 1; error = bus_dmamap_create(dma->txbuf_dtag, 0, &mt->mt_dmap); if (error) { device_printf(sc->sc_dev, "can't create RX buf DMA map\n"); goto fail2; } } } else { error = bus_dmamap_create(dma->rxbuf_dtag, 0, &dr->dr_spare_dmap); if (error) { device_printf(sc->sc_dev, "can't create RX buf DMA map\n"); goto out; /* XXX wrong! */ } for (i = 0; i < dr->dr_numslots; i++) { dr->getdesc(dr, i, &desc, &mt); error = bus_dmamap_create(dma->rxbuf_dtag, 0, &mt->mt_dmap); if (error) { device_printf(sc->sc_dev, "can't create RX buf DMA map\n"); goto out; /* XXX wrong! */ } error = bwn_dma_newbuf(dr, desc, mt, 1); if (error) { device_printf(sc->sc_dev, "failed to allocate RX buf\n"); goto out; /* XXX wrong! */ } } bus_dmamap_sync(dr->dr_ring_dtag, dr->dr_ring_dmap, BUS_DMASYNC_PREWRITE); dr->dr_usedslot = dr->dr_numslots; } out: return (dr); fail2: if (dr->dr_txhdr_cache != NULL) { contigfree(dr->dr_txhdr_cache, (dr->dr_numslots / BWN_TX_SLOTS_PER_FRAME) * BWN_MAXTXHDRSIZE, M_DEVBUF); } fail1: free(dr->dr_meta, M_DEVBUF); fail0: free(dr, M_DEVBUF); return (NULL); } static void bwn_dma_ringfree(struct bwn_dma_ring **dr) { if (dr == NULL) return; bwn_dma_free_descbufs(*dr); bwn_dma_free_ringmemory(*dr); if ((*dr)->dr_txhdr_cache != NULL) { contigfree((*dr)->dr_txhdr_cache, ((*dr)->dr_numslots / BWN_TX_SLOTS_PER_FRAME) * BWN_MAXTXHDRSIZE, M_DEVBUF); } free((*dr)->dr_meta, M_DEVBUF); free(*dr, M_DEVBUF); *dr = NULL; } static void bwn_dma_32_getdesc(struct bwn_dma_ring *dr, int slot, struct bwn_dmadesc_generic **gdesc, struct bwn_dmadesc_meta **meta) { struct bwn_dmadesc32 *desc; *meta = &(dr->dr_meta[slot]); desc = dr->dr_ring_descbase; desc = &(desc[slot]); *gdesc = (struct bwn_dmadesc_generic *)desc; } static void bwn_dma_32_setdesc(struct bwn_dma_ring *dr, struct bwn_dmadesc_generic *desc, bus_addr_t dmaaddr, uint16_t bufsize, int start, int end, int irq) { struct bwn_dmadesc32 *descbase; struct bwn_dma *dma; struct bhnd_dma_translation *dt; uint32_t addr, addrext, ctl; int slot; descbase = dr->dr_ring_descbase; dma = &dr->dr_mac->mac_method.dma; dt = &dma->translation; slot = (int)(&(desc->dma.dma32) - descbase); KASSERT(slot >= 0 && slot < dr->dr_numslots, ("%s:%d: fail", __func__, __LINE__)); addr = (dmaaddr & dt->addr_mask) | dt->base_addr; addrext = ((dmaaddr & dt->addrext_mask) >> dma->addrext_shift); ctl = bufsize & BWN_DMA32_DCTL_BYTECNT; if (slot == dr->dr_numslots - 1) ctl |= BWN_DMA32_DCTL_DTABLEEND; if (start) ctl |= BWN_DMA32_DCTL_FRAMESTART; if (end) ctl |= BWN_DMA32_DCTL_FRAMEEND; if (irq) ctl |= BWN_DMA32_DCTL_IRQ; ctl |= (addrext << BWN_DMA32_DCTL_ADDREXT_SHIFT) & BWN_DMA32_DCTL_ADDREXT_MASK; desc->dma.dma32.control = htole32(ctl); desc->dma.dma32.address = htole32(addr); } static void bwn_dma_32_start_transfer(struct bwn_dma_ring *dr, int slot) { BWN_DMA_WRITE(dr, BWN_DMA32_TXINDEX, (uint32_t)(slot * sizeof(struct bwn_dmadesc32))); } static void bwn_dma_32_suspend(struct bwn_dma_ring *dr) { BWN_DMA_WRITE(dr, BWN_DMA32_TXCTL, BWN_DMA_READ(dr, BWN_DMA32_TXCTL) | BWN_DMA32_TXSUSPEND); } static void bwn_dma_32_resume(struct bwn_dma_ring *dr) { BWN_DMA_WRITE(dr, BWN_DMA32_TXCTL, BWN_DMA_READ(dr, BWN_DMA32_TXCTL) & ~BWN_DMA32_TXSUSPEND); } static int bwn_dma_32_get_curslot(struct bwn_dma_ring *dr) { uint32_t val; val = BWN_DMA_READ(dr, BWN_DMA32_RXSTATUS); val &= BWN_DMA32_RXDPTR; return (val / sizeof(struct bwn_dmadesc32)); } static void bwn_dma_32_set_curslot(struct bwn_dma_ring *dr, int slot) { BWN_DMA_WRITE(dr, BWN_DMA32_RXINDEX, (uint32_t) (slot * sizeof(struct bwn_dmadesc32))); } static void bwn_dma_64_getdesc(struct bwn_dma_ring *dr, int slot, struct bwn_dmadesc_generic **gdesc, struct bwn_dmadesc_meta **meta) { struct bwn_dmadesc64 *desc; *meta = &(dr->dr_meta[slot]); desc = dr->dr_ring_descbase; desc = &(desc[slot]); *gdesc = (struct bwn_dmadesc_generic *)desc; } static void bwn_dma_64_setdesc(struct bwn_dma_ring *dr, struct bwn_dmadesc_generic *desc, bus_addr_t dmaaddr, uint16_t bufsize, int start, int end, int irq) { struct bwn_dmadesc64 *descbase; struct bwn_dma *dma; struct bhnd_dma_translation *dt; bhnd_addr_t addr; uint32_t addrhi, addrlo; uint32_t addrext; uint32_t ctl0, ctl1; int slot; descbase = dr->dr_ring_descbase; dma = &dr->dr_mac->mac_method.dma; dt = &dma->translation; slot = (int)(&(desc->dma.dma64) - descbase); KASSERT(slot >= 0 && slot < dr->dr_numslots, ("%s:%d: fail", __func__, __LINE__)); addr = (dmaaddr & dt->addr_mask) | dt->base_addr; addrhi = (addr >> 32); addrlo = (addr & UINT32_MAX); addrext = ((dmaaddr & dt->addrext_mask) >> dma->addrext_shift); ctl0 = 0; if (slot == dr->dr_numslots - 1) ctl0 |= BWN_DMA64_DCTL0_DTABLEEND; if (start) ctl0 |= BWN_DMA64_DCTL0_FRAMESTART; if (end) ctl0 |= BWN_DMA64_DCTL0_FRAMEEND; if (irq) ctl0 |= BWN_DMA64_DCTL0_IRQ; ctl1 = 0; ctl1 |= bufsize & BWN_DMA64_DCTL1_BYTECNT; ctl1 |= (addrext << BWN_DMA64_DCTL1_ADDREXT_SHIFT) & BWN_DMA64_DCTL1_ADDREXT_MASK; desc->dma.dma64.control0 = htole32(ctl0); desc->dma.dma64.control1 = htole32(ctl1); desc->dma.dma64.address_low = htole32(addrlo); desc->dma.dma64.address_high = htole32(addrhi); } static void bwn_dma_64_start_transfer(struct bwn_dma_ring *dr, int slot) { BWN_DMA_WRITE(dr, BWN_DMA64_TXINDEX, (uint32_t)(slot * sizeof(struct bwn_dmadesc64))); } static void bwn_dma_64_suspend(struct bwn_dma_ring *dr) { BWN_DMA_WRITE(dr, BWN_DMA64_TXCTL, BWN_DMA_READ(dr, BWN_DMA64_TXCTL) | BWN_DMA64_TXSUSPEND); } static void bwn_dma_64_resume(struct bwn_dma_ring *dr) { BWN_DMA_WRITE(dr, BWN_DMA64_TXCTL, BWN_DMA_READ(dr, BWN_DMA64_TXCTL) & ~BWN_DMA64_TXSUSPEND); } static int bwn_dma_64_get_curslot(struct bwn_dma_ring *dr) { uint32_t val; val = BWN_DMA_READ(dr, BWN_DMA64_RXSTATUS); val &= BWN_DMA64_RXSTATDPTR; return (val / sizeof(struct bwn_dmadesc64)); } static void bwn_dma_64_set_curslot(struct bwn_dma_ring *dr, int slot) { BWN_DMA_WRITE(dr, BWN_DMA64_RXINDEX, (uint32_t)(slot * sizeof(struct bwn_dmadesc64))); } static int bwn_dma_allocringmemory(struct bwn_dma_ring *dr) { struct bwn_mac *mac = dr->dr_mac; struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_softc *sc = mac->mac_sc; int error; error = bus_dma_tag_create(dma->parent_dtag, BWN_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BWN_DMA_RINGMEMSIZE, 1, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &dr->dr_ring_dtag); if (error) { device_printf(sc->sc_dev, "can't create TX ring DMA tag: TODO frees\n"); return (-1); } error = bus_dmamem_alloc(dr->dr_ring_dtag, &dr->dr_ring_descbase, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dr->dr_ring_dmap); if (error) { device_printf(sc->sc_dev, "can't allocate DMA mem: TODO frees\n"); return (-1); } error = bus_dmamap_load(dr->dr_ring_dtag, dr->dr_ring_dmap, dr->dr_ring_descbase, BWN_DMA_RINGMEMSIZE, bwn_dma_ring_addr, &dr->dr_ring_dmabase, BUS_DMA_NOWAIT); if (error) { device_printf(sc->sc_dev, "can't load DMA mem: TODO free\n"); return (-1); } return (0); } static void bwn_dma_setup(struct bwn_dma_ring *dr) { struct bwn_mac *mac; struct bwn_dma *dma; struct bhnd_dma_translation *dt; bhnd_addr_t addr, paddr; uint32_t addrhi, addrlo, addrext, value; mac = dr->dr_mac; dma = &mac->mac_method.dma; dt = &dma->translation; paddr = dr->dr_ring_dmabase; addr = (paddr & dt->addr_mask) | dt->base_addr; addrhi = (addr >> 32); addrlo = (addr & UINT32_MAX); addrext = ((paddr & dt->addrext_mask) >> dma->addrext_shift); if (dr->dr_tx) { dr->dr_curslot = -1; if (dr->dr_type == BHND_DMA_ADDR_64BIT) { value = BWN_DMA64_TXENABLE; value |= BWN_DMA64_TXPARITY_DISABLE; value |= (addrext << BWN_DMA64_TXADDREXT_SHIFT) & BWN_DMA64_TXADDREXT_MASK; BWN_DMA_WRITE(dr, BWN_DMA64_TXCTL, value); BWN_DMA_WRITE(dr, BWN_DMA64_TXRINGLO, addrlo); BWN_DMA_WRITE(dr, BWN_DMA64_TXRINGHI, addrhi); } else { value = BWN_DMA32_TXENABLE; value |= BWN_DMA32_TXPARITY_DISABLE; value |= (addrext << BWN_DMA32_TXADDREXT_SHIFT) & BWN_DMA32_TXADDREXT_MASK; BWN_DMA_WRITE(dr, BWN_DMA32_TXCTL, value); BWN_DMA_WRITE(dr, BWN_DMA32_TXRING, addrlo); } return; } /* * set for RX */ dr->dr_usedslot = dr->dr_numslots; if (dr->dr_type == BHND_DMA_ADDR_64BIT) { value = (dr->dr_frameoffset << BWN_DMA64_RXFROFF_SHIFT); value |= BWN_DMA64_RXENABLE; value |= BWN_DMA64_RXPARITY_DISABLE; value |= (addrext << BWN_DMA64_RXADDREXT_SHIFT) & BWN_DMA64_RXADDREXT_MASK; BWN_DMA_WRITE(dr, BWN_DMA64_RXCTL, value); BWN_DMA_WRITE(dr, BWN_DMA64_RXRINGLO, addrlo); BWN_DMA_WRITE(dr, BWN_DMA64_RXRINGHI, addrhi); BWN_DMA_WRITE(dr, BWN_DMA64_RXINDEX, dr->dr_numslots * sizeof(struct bwn_dmadesc64)); } else { value = (dr->dr_frameoffset << BWN_DMA32_RXFROFF_SHIFT); value |= BWN_DMA32_RXENABLE; value |= BWN_DMA32_RXPARITY_DISABLE; value |= (addrext << BWN_DMA32_RXADDREXT_SHIFT) & BWN_DMA32_RXADDREXT_MASK; BWN_DMA_WRITE(dr, BWN_DMA32_RXCTL, value); BWN_DMA_WRITE(dr, BWN_DMA32_RXRING, addrlo); BWN_DMA_WRITE(dr, BWN_DMA32_RXINDEX, dr->dr_numslots * sizeof(struct bwn_dmadesc32)); } } static void bwn_dma_free_ringmemory(struct bwn_dma_ring *dr) { bus_dmamap_unload(dr->dr_ring_dtag, dr->dr_ring_dmap); bus_dmamem_free(dr->dr_ring_dtag, dr->dr_ring_descbase, dr->dr_ring_dmap); } static void bwn_dma_cleanup(struct bwn_dma_ring *dr) { if (dr->dr_tx) { bwn_dma_tx_reset(dr->dr_mac, dr->dr_base, dr->dr_type); if (dr->dr_type == BHND_DMA_ADDR_64BIT) { BWN_DMA_WRITE(dr, BWN_DMA64_TXRINGLO, 0); BWN_DMA_WRITE(dr, BWN_DMA64_TXRINGHI, 0); } else BWN_DMA_WRITE(dr, BWN_DMA32_TXRING, 0); } else { bwn_dma_rx_reset(dr->dr_mac, dr->dr_base, dr->dr_type); if (dr->dr_type == BHND_DMA_ADDR_64BIT) { BWN_DMA_WRITE(dr, BWN_DMA64_RXRINGLO, 0); BWN_DMA_WRITE(dr, BWN_DMA64_RXRINGHI, 0); } else BWN_DMA_WRITE(dr, BWN_DMA32_RXRING, 0); } } static void bwn_dma_free_descbufs(struct bwn_dma_ring *dr) { struct bwn_dmadesc_generic *desc; struct bwn_dmadesc_meta *meta; struct bwn_mac *mac = dr->dr_mac; struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_softc *sc = mac->mac_sc; int i; if (!dr->dr_usedslot) return; for (i = 0; i < dr->dr_numslots; i++) { dr->getdesc(dr, i, &desc, &meta); if (meta->mt_m == NULL) { if (!dr->dr_tx) device_printf(sc->sc_dev, "%s: not TX?\n", __func__); continue; } if (dr->dr_tx) { if (meta->mt_txtype == BWN_DMADESC_METATYPE_HEADER) bus_dmamap_unload(dr->dr_txring_dtag, meta->mt_dmap); else if (meta->mt_txtype == BWN_DMADESC_METATYPE_BODY) bus_dmamap_unload(dma->txbuf_dtag, meta->mt_dmap); } else bus_dmamap_unload(dma->rxbuf_dtag, meta->mt_dmap); bwn_dma_free_descbuf(dr, meta); } } static int bwn_dma_tx_reset(struct bwn_mac *mac, uint16_t base, int type) { struct bwn_softc *sc = mac->mac_sc; uint32_t value; int i; uint16_t offset; for (i = 0; i < 10; i++) { offset = (type == BHND_DMA_ADDR_64BIT) ? BWN_DMA64_TXSTATUS : BWN_DMA32_TXSTATUS; value = BWN_READ_4(mac, base + offset); if (type == BHND_DMA_ADDR_64BIT) { value &= BWN_DMA64_TXSTAT; if (value == BWN_DMA64_TXSTAT_DISABLED || value == BWN_DMA64_TXSTAT_IDLEWAIT || value == BWN_DMA64_TXSTAT_STOPPED) break; } else { value &= BWN_DMA32_TXSTATE; if (value == BWN_DMA32_TXSTAT_DISABLED || value == BWN_DMA32_TXSTAT_IDLEWAIT || value == BWN_DMA32_TXSTAT_STOPPED) break; } DELAY(1000); } offset = (type == BHND_DMA_ADDR_64BIT) ? BWN_DMA64_TXCTL : BWN_DMA32_TXCTL; BWN_WRITE_4(mac, base + offset, 0); for (i = 0; i < 10; i++) { offset = (type == BHND_DMA_ADDR_64BIT) ? BWN_DMA64_TXSTATUS : BWN_DMA32_TXSTATUS; value = BWN_READ_4(mac, base + offset); if (type == BHND_DMA_ADDR_64BIT) { value &= BWN_DMA64_TXSTAT; if (value == BWN_DMA64_TXSTAT_DISABLED) { i = -1; break; } } else { value &= BWN_DMA32_TXSTATE; if (value == BWN_DMA32_TXSTAT_DISABLED) { i = -1; break; } } DELAY(1000); } if (i != -1) { device_printf(sc->sc_dev, "%s: timed out\n", __func__); return (ENODEV); } DELAY(1000); return (0); } static int bwn_dma_rx_reset(struct bwn_mac *mac, uint16_t base, int type) { struct bwn_softc *sc = mac->mac_sc; uint32_t value; int i; uint16_t offset; offset = (type == BHND_DMA_ADDR_64BIT) ? BWN_DMA64_RXCTL : BWN_DMA32_RXCTL; BWN_WRITE_4(mac, base + offset, 0); for (i = 0; i < 10; i++) { offset = (type == BHND_DMA_ADDR_64BIT) ? BWN_DMA64_RXSTATUS : BWN_DMA32_RXSTATUS; value = BWN_READ_4(mac, base + offset); if (type == BHND_DMA_ADDR_64BIT) { value &= BWN_DMA64_RXSTAT; if (value == BWN_DMA64_RXSTAT_DISABLED) { i = -1; break; } } else { value &= BWN_DMA32_RXSTATE; if (value == BWN_DMA32_RXSTAT_DISABLED) { i = -1; break; } } DELAY(1000); } if (i != -1) { device_printf(sc->sc_dev, "%s: timed out\n", __func__); return (ENODEV); } return (0); } static void bwn_dma_free_descbuf(struct bwn_dma_ring *dr, struct bwn_dmadesc_meta *meta) { if (meta->mt_m != NULL) { m_freem(meta->mt_m); meta->mt_m = NULL; } if (meta->mt_ni != NULL) { ieee80211_free_node(meta->mt_ni); meta->mt_ni = NULL; } } static void bwn_dma_set_redzone(struct bwn_dma_ring *dr, struct mbuf *m) { struct bwn_rxhdr4 *rxhdr; unsigned char *frame; rxhdr = mtod(m, struct bwn_rxhdr4 *); rxhdr->frame_len = 0; KASSERT(dr->dr_rx_bufsize >= dr->dr_frameoffset + sizeof(struct bwn_plcp6) + 2, ("%s:%d: fail", __func__, __LINE__)); frame = mtod(m, char *) + dr->dr_frameoffset; memset(frame, 0xff, sizeof(struct bwn_plcp6) + 2 /* padding */); } static uint8_t bwn_dma_check_redzone(struct bwn_dma_ring *dr, struct mbuf *m) { unsigned char *f = mtod(m, char *) + dr->dr_frameoffset; return ((f[0] & f[1] & f[2] & f[3] & f[4] & f[5] & f[6] & f[7]) == 0xff); } static void bwn_wme_init(struct bwn_mac *mac) { bwn_wme_load(mac); /* enable WME support. */ bwn_hf_write(mac, bwn_hf_read(mac) | BWN_HF_EDCF); BWN_WRITE_2(mac, BWN_IFSCTL, BWN_READ_2(mac, BWN_IFSCTL) | BWN_IFSCTL_USE_EDCF); } static void bwn_spu_setdelay(struct bwn_mac *mac, int idle) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint16_t delay; /* microsec */ delay = (mac->mac_phy.type == BWN_PHYTYPE_A) ? 3700 : 1050; if (ic->ic_opmode == IEEE80211_M_IBSS || idle) delay = 500; if ((mac->mac_phy.rf_ver == 0x2050) && (mac->mac_phy.rf_rev == 8)) delay = max(delay, (uint16_t)2400); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_SPU_WAKEUP, delay); } static void bwn_bt_enable(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint64_t hf; if (bwn_bluetooth == 0) return; if ((sc->sc_board_info.board_flags & BHND_BFL_BTCOEX) == 0) return; if (mac->mac_phy.type != BWN_PHYTYPE_B && !mac->mac_phy.gmode) return; hf = bwn_hf_read(mac); if (sc->sc_board_info.board_flags & BHND_BFL_BTC2WIRE_ALTGPIO) hf |= BWN_HF_BT_COEXISTALT; else hf |= BWN_HF_BT_COEXIST; bwn_hf_write(mac, hf); } static void bwn_set_macaddr(struct bwn_mac *mac) { bwn_mac_write_bssid(mac); bwn_mac_setfilter(mac, BWN_MACFILTER_SELF, mac->mac_sc->sc_ic.ic_macaddr); } static void bwn_clear_keys(struct bwn_mac *mac) { int i; for (i = 0; i < mac->mac_max_nr_keys; i++) { KASSERT(i >= 0 && i < mac->mac_max_nr_keys, ("%s:%d: fail", __func__, __LINE__)); bwn_key_dowrite(mac, i, BWN_SEC_ALGO_NONE, NULL, BWN_SEC_KEYSIZE, NULL); if ((i <= 3) && !BWN_SEC_NEWAPI(mac)) { bwn_key_dowrite(mac, i + 4, BWN_SEC_ALGO_NONE, NULL, BWN_SEC_KEYSIZE, NULL); } mac->mac_key[i].keyconf = NULL; } } static void bwn_crypt_init(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; mac->mac_max_nr_keys = (bhnd_get_hwrev(sc->sc_dev) >= 5) ? 58 : 20; KASSERT(mac->mac_max_nr_keys <= N(mac->mac_key), ("%s:%d: fail", __func__, __LINE__)); mac->mac_ktp = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_KEY_TABLEP); mac->mac_ktp *= 2; if (bhnd_get_hwrev(sc->sc_dev) >= 5) BWN_WRITE_2(mac, BWN_RCMTA_COUNT, mac->mac_max_nr_keys - 8); bwn_clear_keys(mac); } static void bwn_chip_exit(struct bwn_mac *mac) { bwn_phy_exit(mac); } static int bwn_fw_fillinfo(struct bwn_mac *mac) { int error; error = bwn_fw_gets(mac, BWN_FWTYPE_DEFAULT); if (error == 0) return (0); error = bwn_fw_gets(mac, BWN_FWTYPE_OPENSOURCE); if (error == 0) return (0); return (error); } /** * Request that the GPIO controller tristate all pins set in @p mask, granting * the MAC core control over the pins. * * @param mac bwn MAC state. * @param pins If the bit position for a pin number is set to one, tristate the * pin. */ int bwn_gpio_control(struct bwn_mac *mac, uint32_t pins) { struct bwn_softc *sc; uint32_t flags[32]; int error; sc = mac->mac_sc; /* Determine desired pin flags */ for (size_t pin = 0; pin < nitems(flags); pin++) { uint32_t pinbit = (1 << pin); if (pins & pinbit) { /* Tristate output */ flags[pin] = GPIO_PIN_OUTPUT|GPIO_PIN_TRISTATE; } else { /* Leave unmodified */ flags[pin] = 0; } } /* Configure all pins */ error = GPIO_PIN_CONFIG_32(sc->sc_gpio, 0, nitems(flags), flags); if (error) { device_printf(sc->sc_dev, "error configuring %s pin flags: " "%d\n", device_get_nameunit(sc->sc_gpio), error); return (error); } return (0); } static int bwn_gpio_init(struct bwn_mac *mac) { struct bwn_softc *sc; uint32_t pins; sc = mac->mac_sc; pins = 0xF; BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_GPOUT_MASK); BWN_WRITE_2(mac, BWN_GPIO_MASK, BWN_READ_2(mac, BWN_GPIO_MASK) | pins); if (sc->sc_board_info.board_flags & BHND_BFL_PACTRL) { /* MAC core is responsible for toggling PAREF via gpio9 */ BWN_WRITE_2(mac, BWN_GPIO_MASK, BWN_READ_2(mac, BWN_GPIO_MASK) | BHND_GPIO_BOARD_PACTRL); pins |= BHND_GPIO_BOARD_PACTRL; } return (bwn_gpio_control(mac, pins)); } static int bwn_fw_loadinitvals(struct bwn_mac *mac) { #define GETFWOFFSET(fwp, offset) \ ((const struct bwn_fwinitvals *)((const char *)fwp.fw->data + offset)) const size_t hdr_len = sizeof(struct bwn_fwhdr); const struct bwn_fwhdr *hdr; struct bwn_fw *fw = &mac->mac_fw; int error; hdr = (const struct bwn_fwhdr *)(fw->initvals.fw->data); error = bwn_fwinitvals_write(mac, GETFWOFFSET(fw->initvals, hdr_len), be32toh(hdr->size), fw->initvals.fw->datasize - hdr_len); if (error) return (error); if (fw->initvals_band.fw) { hdr = (const struct bwn_fwhdr *)(fw->initvals_band.fw->data); error = bwn_fwinitvals_write(mac, GETFWOFFSET(fw->initvals_band, hdr_len), be32toh(hdr->size), fw->initvals_band.fw->datasize - hdr_len); } return (error); #undef GETFWOFFSET } static int bwn_phy_init(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; int error; mac->mac_phy.chan = mac->mac_phy.get_default_chan(mac); mac->mac_phy.rf_onoff(mac, 1); error = mac->mac_phy.init(mac); if (error) { device_printf(sc->sc_dev, "PHY init failed\n"); goto fail0; } error = bwn_switch_channel(mac, mac->mac_phy.get_default_chan(mac)); if (error) { device_printf(sc->sc_dev, "failed to switch default channel\n"); goto fail1; } return (0); fail1: if (mac->mac_phy.exit) mac->mac_phy.exit(mac); fail0: mac->mac_phy.rf_onoff(mac, 0); return (error); } static void bwn_set_txantenna(struct bwn_mac *mac, int antenna) { uint16_t ant; uint16_t tmp; ant = bwn_ant2phy(antenna); /* For ACK/CTS */ tmp = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_ACKCTS_PHYCTL); tmp = (tmp & ~BWN_TX_PHY_ANT) | ant; bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_ACKCTS_PHYCTL, tmp); /* For Probe Resposes */ tmp = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_PROBE_RESP_PHYCTL); tmp = (tmp & ~BWN_TX_PHY_ANT) | ant; bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PROBE_RESP_PHYCTL, tmp); } static void bwn_set_opmode(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint32_t ctl; uint16_t cfp_pretbtt; ctl = BWN_READ_4(mac, BWN_MACCTL); ctl &= ~(BWN_MACCTL_HOSTAP | BWN_MACCTL_PASS_CTL | BWN_MACCTL_PASS_BADPLCP | BWN_MACCTL_PASS_BADFCS | BWN_MACCTL_PROMISC | BWN_MACCTL_BEACON_PROMISC); ctl |= BWN_MACCTL_STA; if (ic->ic_opmode == IEEE80211_M_HOSTAP || ic->ic_opmode == IEEE80211_M_MBSS) ctl |= BWN_MACCTL_HOSTAP; else if (ic->ic_opmode == IEEE80211_M_IBSS) ctl &= ~BWN_MACCTL_STA; ctl |= sc->sc_filters; if (bhnd_get_hwrev(sc->sc_dev) <= 4) ctl |= BWN_MACCTL_PROMISC; BWN_WRITE_4(mac, BWN_MACCTL, ctl); cfp_pretbtt = 2; if ((ctl & BWN_MACCTL_STA) && !(ctl & BWN_MACCTL_HOSTAP)) { if (sc->sc_cid.chip_id == BHND_CHIPID_BCM4306 && sc->sc_cid.chip_rev == 3) cfp_pretbtt = 100; else cfp_pretbtt = 50; } BWN_WRITE_2(mac, 0x612, cfp_pretbtt); } static void bwn_dma_ring_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error) { if (!error) { KASSERT(nseg == 1, ("too many segments(%d)\n", nseg)); *((bus_addr_t *)arg) = seg->ds_addr; } } void bwn_dummy_transmission(struct bwn_mac *mac, int ofdm, int paon) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_softc *sc = mac->mac_sc; unsigned int i, max_loop; uint16_t value; uint32_t buffer[5] = { 0x00000000, 0x00d40000, 0x00000000, 0x01000000, 0x00000000 }; if (ofdm) { max_loop = 0x1e; buffer[0] = 0x000201cc; } else { max_loop = 0xfa; buffer[0] = 0x000b846e; } BWN_ASSERT_LOCKED(mac->mac_sc); for (i = 0; i < 5; i++) bwn_ram_write(mac, i * 4, buffer[i]); BWN_WRITE_2(mac, 0x0568, 0x0000); BWN_WRITE_2(mac, 0x07c0, (bhnd_get_hwrev(sc->sc_dev) < 11) ? 0x0000 : 0x0100); value = (ofdm ? 0x41 : 0x40); BWN_WRITE_2(mac, 0x050c, value); if (phy->type == BWN_PHYTYPE_N || phy->type == BWN_PHYTYPE_LP || phy->type == BWN_PHYTYPE_LCN) BWN_WRITE_2(mac, 0x0514, 0x1a02); BWN_WRITE_2(mac, 0x0508, 0x0000); BWN_WRITE_2(mac, 0x050a, 0x0000); BWN_WRITE_2(mac, 0x054c, 0x0000); BWN_WRITE_2(mac, 0x056a, 0x0014); BWN_WRITE_2(mac, 0x0568, 0x0826); BWN_WRITE_2(mac, 0x0500, 0x0000); /* XXX TODO: n phy pa override? */ switch (phy->type) { case BWN_PHYTYPE_N: case BWN_PHYTYPE_LCN: BWN_WRITE_2(mac, 0x0502, 0x00d0); break; case BWN_PHYTYPE_LP: BWN_WRITE_2(mac, 0x0502, 0x0050); break; default: BWN_WRITE_2(mac, 0x0502, 0x0030); break; } /* flush */ BWN_READ_2(mac, 0x0502); if (phy->rf_ver == 0x2050 && phy->rf_rev <= 0x5) BWN_RF_WRITE(mac, 0x0051, 0x0017); for (i = 0x00; i < max_loop; i++) { value = BWN_READ_2(mac, 0x050e); if (value & 0x0080) break; DELAY(10); } for (i = 0x00; i < 0x0a; i++) { value = BWN_READ_2(mac, 0x050e); if (value & 0x0400) break; DELAY(10); } for (i = 0x00; i < 0x19; i++) { value = BWN_READ_2(mac, 0x0690); if (!(value & 0x0100)) break; DELAY(10); } if (phy->rf_ver == 0x2050 && phy->rf_rev <= 0x5) BWN_RF_WRITE(mac, 0x0051, 0x0037); } void bwn_ram_write(struct bwn_mac *mac, uint16_t offset, uint32_t val) { uint32_t macctl; KASSERT(offset % 4 == 0, ("%s:%d: fail", __func__, __LINE__)); macctl = BWN_READ_4(mac, BWN_MACCTL); if (macctl & BWN_MACCTL_BIGENDIAN) printf("TODO: need swap\n"); BWN_WRITE_4(mac, BWN_RAM_CONTROL, offset); BWN_BARRIER(mac, BWN_RAM_CONTROL, 4, BUS_SPACE_BARRIER_WRITE); BWN_WRITE_4(mac, BWN_RAM_DATA, val); } void bwn_mac_suspend(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; int i; uint32_t tmp; KASSERT(mac->mac_suspended >= 0, ("%s:%d: fail", __func__, __LINE__)); DPRINTF(mac->mac_sc, BWN_DEBUG_RESET, "%s: suspended=%d\n", __func__, mac->mac_suspended); if (mac->mac_suspended == 0) { bwn_psctl(mac, BWN_PS_AWAKE); BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_ON); BWN_READ_4(mac, BWN_MACCTL); for (i = 35; i; i--) { tmp = BWN_READ_4(mac, BWN_INTR_REASON); if (tmp & BWN_INTR_MAC_SUSPENDED) goto out; DELAY(10); } for (i = 40; i; i--) { tmp = BWN_READ_4(mac, BWN_INTR_REASON); if (tmp & BWN_INTR_MAC_SUSPENDED) goto out; DELAY(1000); } device_printf(sc->sc_dev, "MAC suspend failed\n"); } out: mac->mac_suspended++; } void bwn_mac_enable(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint16_t state; DPRINTF(mac->mac_sc, BWN_DEBUG_RESET, "%s: suspended=%d\n", __func__, mac->mac_suspended); state = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODESTAT); if (state != BWN_SHARED_UCODESTAT_SUSPEND && state != BWN_SHARED_UCODESTAT_SLEEP) { DPRINTF(sc, BWN_DEBUG_FW, "%s: warn: firmware state (%d)\n", __func__, state); } mac->mac_suspended--; KASSERT(mac->mac_suspended >= 0, ("%s:%d: fail", __func__, __LINE__)); if (mac->mac_suspended == 0) { BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) | BWN_MACCTL_ON); BWN_WRITE_4(mac, BWN_INTR_REASON, BWN_INTR_MAC_SUSPENDED); BWN_READ_4(mac, BWN_MACCTL); BWN_READ_4(mac, BWN_INTR_REASON); bwn_psctl(mac, 0); } } void bwn_psctl(struct bwn_mac *mac, uint32_t flags) { struct bwn_softc *sc = mac->mac_sc; int i; uint16_t ucstat; KASSERT(!((flags & BWN_PS_ON) && (flags & BWN_PS_OFF)), ("%s:%d: fail", __func__, __LINE__)); KASSERT(!((flags & BWN_PS_AWAKE) && (flags & BWN_PS_ASLEEP)), ("%s:%d: fail", __func__, __LINE__)); /* XXX forcibly awake and hwps-off */ BWN_WRITE_4(mac, BWN_MACCTL, (BWN_READ_4(mac, BWN_MACCTL) | BWN_MACCTL_AWAKE) & ~BWN_MACCTL_HWPS); BWN_READ_4(mac, BWN_MACCTL); if (bhnd_get_hwrev(sc->sc_dev) >= 5) { for (i = 0; i < 100; i++) { ucstat = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODESTAT); if (ucstat != BWN_SHARED_UCODESTAT_SLEEP) break; DELAY(10); } } DPRINTF(mac->mac_sc, BWN_DEBUG_RESET, "%s: ucstat=%d\n", __func__, ucstat); } static int bwn_fw_gets(struct bwn_mac *mac, enum bwn_fwtype type) { struct bwn_softc *sc = mac->mac_sc; struct bwn_fw *fw = &mac->mac_fw; const uint8_t rev = bhnd_get_hwrev(sc->sc_dev); const char *filename; uint16_t iost; int error; /* microcode */ filename = NULL; switch (rev) { case 42: if (mac->mac_phy.type == BWN_PHYTYPE_AC) filename = "ucode42"; break; case 40: if (mac->mac_phy.type == BWN_PHYTYPE_AC) filename = "ucode40"; break; case 33: if (mac->mac_phy.type == BWN_PHYTYPE_LCN40) filename = "ucode33_lcn40"; break; case 30: if (mac->mac_phy.type == BWN_PHYTYPE_N) filename = "ucode30_mimo"; break; case 29: if (mac->mac_phy.type == BWN_PHYTYPE_HT) filename = "ucode29_mimo"; break; case 26: if (mac->mac_phy.type == BWN_PHYTYPE_HT) filename = "ucode26_mimo"; break; case 28: case 25: if (mac->mac_phy.type == BWN_PHYTYPE_N) filename = "ucode25_mimo"; else if (mac->mac_phy.type == BWN_PHYTYPE_LCN) filename = "ucode25_lcn"; break; case 24: if (mac->mac_phy.type == BWN_PHYTYPE_LCN) filename = "ucode24_lcn"; break; case 23: if (mac->mac_phy.type == BWN_PHYTYPE_N) filename = "ucode16_mimo"; break; case 16: case 17: case 18: case 19: if (mac->mac_phy.type == BWN_PHYTYPE_N) filename = "ucode16_mimo"; else if (mac->mac_phy.type == BWN_PHYTYPE_LP) filename = "ucode16_lp"; break; case 15: filename = "ucode15"; break; case 14: filename = "ucode14"; break; case 13: filename = "ucode13"; break; case 12: case 11: filename = "ucode11"; break; case 10: case 9: case 8: case 7: case 6: case 5: filename = "ucode5"; break; default: device_printf(sc->sc_dev, "no ucode for rev %d\n", rev); bwn_release_firmware(mac); return (EOPNOTSUPP); } device_printf(sc->sc_dev, "ucode fw: %s\n", filename); error = bwn_fw_get(mac, type, filename, &fw->ucode); if (error) { bwn_release_firmware(mac); return (error); } /* PCM */ KASSERT(fw->no_pcmfile == 0, ("%s:%d fail", __func__, __LINE__)); if (rev >= 5 && rev <= 10) { error = bwn_fw_get(mac, type, "pcm5", &fw->pcm); if (error == ENOENT) fw->no_pcmfile = 1; else if (error) { bwn_release_firmware(mac); return (error); } } else if (rev < 11) { device_printf(sc->sc_dev, "no PCM for rev %d\n", rev); bwn_release_firmware(mac); return (EOPNOTSUPP); } /* initvals */ error = bhnd_read_iost(sc->sc_dev, &iost); if (error) goto fail1; switch (mac->mac_phy.type) { case BWN_PHYTYPE_A: if (rev < 5 || rev > 10) goto fail1; if (iost & BWN_IOST_HAVE_2GHZ) filename = "a0g1initvals5"; else filename = "a0g0initvals5"; break; case BWN_PHYTYPE_G: if (rev >= 5 && rev <= 10) filename = "b0g0initvals5"; else if (rev >= 13) filename = "b0g0initvals13"; else goto fail1; break; case BWN_PHYTYPE_LP: if (rev == 13) filename = "lp0initvals13"; else if (rev == 14) filename = "lp0initvals14"; else if (rev >= 15) filename = "lp0initvals15"; else goto fail1; break; case BWN_PHYTYPE_N: if (rev == 30) filename = "n16initvals30"; else if (rev == 28 || rev == 25) filename = "n0initvals25"; else if (rev == 24) filename = "n0initvals24"; else if (rev == 23) filename = "n0initvals16"; else if (rev >= 16 && rev <= 18) filename = "n0initvals16"; else if (rev >= 11 && rev <= 12) filename = "n0initvals11"; else goto fail1; break; default: goto fail1; } error = bwn_fw_get(mac, type, filename, &fw->initvals); if (error) { bwn_release_firmware(mac); return (error); } /* bandswitch initvals */ switch (mac->mac_phy.type) { case BWN_PHYTYPE_A: if (rev >= 5 && rev <= 10) { if (iost & BWN_IOST_HAVE_2GHZ) filename = "a0g1bsinitvals5"; else filename = "a0g0bsinitvals5"; } else if (rev >= 11) filename = NULL; else goto fail1; break; case BWN_PHYTYPE_G: if (rev >= 5 && rev <= 10) filename = "b0g0bsinitvals5"; else if (rev >= 11) filename = NULL; else goto fail1; break; case BWN_PHYTYPE_LP: if (rev == 13) filename = "lp0bsinitvals13"; else if (rev == 14) filename = "lp0bsinitvals14"; else if (rev >= 15) filename = "lp0bsinitvals15"; else goto fail1; break; case BWN_PHYTYPE_N: if (rev == 30) filename = "n16bsinitvals30"; else if (rev == 28 || rev == 25) filename = "n0bsinitvals25"; else if (rev == 24) filename = "n0bsinitvals24"; else if (rev == 23) filename = "n0bsinitvals16"; else if (rev >= 16 && rev <= 18) filename = "n0bsinitvals16"; else if (rev >= 11 && rev <= 12) filename = "n0bsinitvals11"; else goto fail1; break; default: device_printf(sc->sc_dev, "unknown phy (%d)\n", mac->mac_phy.type); goto fail1; } error = bwn_fw_get(mac, type, filename, &fw->initvals_band); if (error) { bwn_release_firmware(mac); return (error); } return (0); fail1: device_printf(sc->sc_dev, "no INITVALS for rev %d, phy.type %d\n", rev, mac->mac_phy.type); bwn_release_firmware(mac); return (EOPNOTSUPP); } static int bwn_fw_get(struct bwn_mac *mac, enum bwn_fwtype type, const char *name, struct bwn_fwfile *bfw) { const struct bwn_fwhdr *hdr; struct bwn_softc *sc = mac->mac_sc; const struct firmware *fw; char namebuf[64]; if (name == NULL) { bwn_do_release_fw(bfw); return (0); } if (bfw->filename != NULL) { if (bfw->type == type && (strcmp(bfw->filename, name) == 0)) return (0); bwn_do_release_fw(bfw); } snprintf(namebuf, sizeof(namebuf), "bwn%s_v4_%s%s", (type == BWN_FWTYPE_OPENSOURCE) ? "-open" : "", (mac->mac_phy.type == BWN_PHYTYPE_LP) ? "lp_" : "", name); /* XXX Sleeping on "fwload" with the non-sleepable locks held */ fw = firmware_get(namebuf); if (fw == NULL) { device_printf(sc->sc_dev, "the fw file(%s) not found\n", namebuf); return (ENOENT); } if (fw->datasize < sizeof(struct bwn_fwhdr)) goto fail; hdr = (const struct bwn_fwhdr *)(fw->data); switch (hdr->type) { case BWN_FWTYPE_UCODE: case BWN_FWTYPE_PCM: if (be32toh(hdr->size) != (fw->datasize - sizeof(struct bwn_fwhdr))) goto fail; /* FALLTHROUGH */ case BWN_FWTYPE_IV: if (hdr->ver != 1) goto fail; break; default: goto fail; } bfw->filename = name; bfw->fw = fw; bfw->type = type; return (0); fail: device_printf(sc->sc_dev, "the fw file(%s) format error\n", namebuf); if (fw != NULL) firmware_put(fw, FIRMWARE_UNLOAD); return (EPROTO); } static void bwn_release_firmware(struct bwn_mac *mac) { bwn_do_release_fw(&mac->mac_fw.ucode); bwn_do_release_fw(&mac->mac_fw.pcm); bwn_do_release_fw(&mac->mac_fw.initvals); bwn_do_release_fw(&mac->mac_fw.initvals_band); } static void bwn_do_release_fw(struct bwn_fwfile *bfw) { if (bfw->fw != NULL) firmware_put(bfw->fw, FIRMWARE_UNLOAD); bfw->fw = NULL; bfw->filename = NULL; } static int bwn_fw_loaducode(struct bwn_mac *mac) { #define GETFWOFFSET(fwp, offset) \ ((const uint32_t *)((const char *)fwp.fw->data + offset)) #define GETFWSIZE(fwp, offset) \ ((fwp.fw->datasize - offset) / sizeof(uint32_t)) struct bwn_softc *sc = mac->mac_sc; const uint32_t *data; unsigned int i; uint32_t ctl; uint16_t date, fwcaps, time; int error = 0; ctl = BWN_READ_4(mac, BWN_MACCTL); ctl |= BWN_MACCTL_MCODE_JMP0; KASSERT(!(ctl & BWN_MACCTL_MCODE_RUN), ("%s:%d: fail", __func__, __LINE__)); BWN_WRITE_4(mac, BWN_MACCTL, ctl); for (i = 0; i < 64; i++) bwn_shm_write_2(mac, BWN_SCRATCH, i, 0); for (i = 0; i < 4096; i += 2) bwn_shm_write_2(mac, BWN_SHARED, i, 0); data = GETFWOFFSET(mac->mac_fw.ucode, sizeof(struct bwn_fwhdr)); bwn_shm_ctlword(mac, BWN_UCODE | BWN_SHARED_AUTOINC, 0x0000); for (i = 0; i < GETFWSIZE(mac->mac_fw.ucode, sizeof(struct bwn_fwhdr)); i++) { BWN_WRITE_4(mac, BWN_SHM_DATA, be32toh(data[i])); DELAY(10); } if (mac->mac_fw.pcm.fw) { data = GETFWOFFSET(mac->mac_fw.pcm, sizeof(struct bwn_fwhdr)); bwn_shm_ctlword(mac, BWN_HW, 0x01ea); BWN_WRITE_4(mac, BWN_SHM_DATA, 0x00004000); bwn_shm_ctlword(mac, BWN_HW, 0x01eb); for (i = 0; i < GETFWSIZE(mac->mac_fw.pcm, sizeof(struct bwn_fwhdr)); i++) { BWN_WRITE_4(mac, BWN_SHM_DATA, be32toh(data[i])); DELAY(10); } } BWN_WRITE_4(mac, BWN_INTR_REASON, BWN_INTR_ALL); BWN_WRITE_4(mac, BWN_MACCTL, (BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_MCODE_JMP0) | BWN_MACCTL_MCODE_RUN); for (i = 0; i < 21; i++) { if (BWN_READ_4(mac, BWN_INTR_REASON) == BWN_INTR_MAC_SUSPENDED) break; if (i >= 20) { device_printf(sc->sc_dev, "ucode timeout\n"); error = ENXIO; goto error; } DELAY(50000); } BWN_READ_4(mac, BWN_INTR_REASON); mac->mac_fw.rev = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODE_REV); if (mac->mac_fw.rev <= 0x128) { device_printf(sc->sc_dev, "the firmware is too old\n"); error = EOPNOTSUPP; goto error; } /* * Determine firmware header version; needed for TX/RX packet * handling. */ if (mac->mac_fw.rev >= 598) mac->mac_fw.fw_hdr_format = BWN_FW_HDR_598; else if (mac->mac_fw.rev >= 410) mac->mac_fw.fw_hdr_format = BWN_FW_HDR_410; else mac->mac_fw.fw_hdr_format = BWN_FW_HDR_351; /* * We don't support rev 598 or later; that requires * another round of changes to the TX/RX descriptor * and status layout. * * So, complain this is the case and exit out, rather * than attaching and then failing. */ #if 0 if (mac->mac_fw.fw_hdr_format == BWN_FW_HDR_598) { device_printf(sc->sc_dev, "firmware is too new (>=598); not supported\n"); error = EOPNOTSUPP; goto error; } #endif mac->mac_fw.patch = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODE_PATCH); date = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODE_DATE); mac->mac_fw.opensource = (date == 0xffff); if (bwn_wme != 0) mac->mac_flags |= BWN_MAC_FLAG_WME; mac->mac_flags |= BWN_MAC_FLAG_HWCRYPTO; time = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODE_TIME); if (mac->mac_fw.opensource == 0) { device_printf(sc->sc_dev, "firmware version (rev %u patch %u date %#x time %#x)\n", mac->mac_fw.rev, mac->mac_fw.patch, date, time); if (mac->mac_fw.no_pcmfile) device_printf(sc->sc_dev, "no HW crypto acceleration due to pcm5\n"); } else { mac->mac_fw.patch = time; fwcaps = bwn_fwcaps_read(mac); if (!(fwcaps & BWN_FWCAPS_HWCRYPTO) || mac->mac_fw.no_pcmfile) { device_printf(sc->sc_dev, "disabling HW crypto acceleration\n"); mac->mac_flags &= ~BWN_MAC_FLAG_HWCRYPTO; } if (!(fwcaps & BWN_FWCAPS_WME)) { device_printf(sc->sc_dev, "disabling WME support\n"); mac->mac_flags &= ~BWN_MAC_FLAG_WME; } } if (BWN_ISOLDFMT(mac)) device_printf(sc->sc_dev, "using old firmware image\n"); return (0); error: BWN_WRITE_4(mac, BWN_MACCTL, (BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_MCODE_RUN) | BWN_MACCTL_MCODE_JMP0); return (error); #undef GETFWSIZE #undef GETFWOFFSET } /* OpenFirmware only */ static uint16_t bwn_fwcaps_read(struct bwn_mac *mac) { KASSERT(mac->mac_fw.opensource == 1, ("%s:%d: fail", __func__, __LINE__)); return (bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_FWCAPS)); } static int bwn_fwinitvals_write(struct bwn_mac *mac, const struct bwn_fwinitvals *ivals, size_t count, size_t array_size) { #define GET_NEXTIV16(iv) \ ((const struct bwn_fwinitvals *)((const uint8_t *)(iv) + \ sizeof(uint16_t) + sizeof(uint16_t))) #define GET_NEXTIV32(iv) \ ((const struct bwn_fwinitvals *)((const uint8_t *)(iv) + \ sizeof(uint16_t) + sizeof(uint32_t))) struct bwn_softc *sc = mac->mac_sc; const struct bwn_fwinitvals *iv; uint16_t offset; size_t i; uint8_t bit32; KASSERT(sizeof(struct bwn_fwinitvals) == 6, ("%s:%d: fail", __func__, __LINE__)); iv = ivals; for (i = 0; i < count; i++) { if (array_size < sizeof(iv->offset_size)) goto fail; array_size -= sizeof(iv->offset_size); offset = be16toh(iv->offset_size); bit32 = (offset & BWN_FWINITVALS_32BIT) ? 1 : 0; offset &= BWN_FWINITVALS_OFFSET_MASK; if (offset >= 0x1000) goto fail; if (bit32) { if (array_size < sizeof(iv->data.d32)) goto fail; array_size -= sizeof(iv->data.d32); BWN_WRITE_4(mac, offset, be32toh(iv->data.d32)); iv = GET_NEXTIV32(iv); } else { if (array_size < sizeof(iv->data.d16)) goto fail; array_size -= sizeof(iv->data.d16); BWN_WRITE_2(mac, offset, be16toh(iv->data.d16)); iv = GET_NEXTIV16(iv); } } if (array_size != 0) goto fail; return (0); fail: device_printf(sc->sc_dev, "initvals: invalid format\n"); return (EPROTO); #undef GET_NEXTIV16 #undef GET_NEXTIV32 } int bwn_switch_channel(struct bwn_mac *mac, int chan) { struct bwn_phy *phy = &(mac->mac_phy); struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint16_t channelcookie, savedcookie; int error; if (chan == 0xffff) chan = phy->get_default_chan(mac); channelcookie = chan; if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) channelcookie |= 0x100; savedcookie = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_CHAN); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_CHAN, channelcookie); error = phy->switch_channel(mac, chan); if (error) goto fail; mac->mac_phy.chan = chan; DELAY(8000); return (0); fail: device_printf(sc->sc_dev, "failed to switch channel\n"); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_CHAN, savedcookie); return (error); } static uint16_t bwn_ant2phy(int antenna) { switch (antenna) { case BWN_ANT0: return (BWN_TX_PHY_ANT0); case BWN_ANT1: return (BWN_TX_PHY_ANT1); case BWN_ANT2: return (BWN_TX_PHY_ANT2); case BWN_ANT3: return (BWN_TX_PHY_ANT3); case BWN_ANTAUTO: return (BWN_TX_PHY_ANT01AUTO); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (0); } static void bwn_wme_load(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; int i; KASSERT(N(bwn_wme_shm_offsets) == N(sc->sc_wmeParams), ("%s:%d: fail", __func__, __LINE__)); bwn_mac_suspend(mac); for (i = 0; i < N(sc->sc_wmeParams); i++) bwn_wme_loadparams(mac, &(sc->sc_wmeParams[i]), bwn_wme_shm_offsets[i]); bwn_mac_enable(mac); } static void bwn_wme_loadparams(struct bwn_mac *mac, const struct wmeParams *p, uint16_t shm_offset) { #define SM(_v, _f) (((_v) << _f##_S) & _f) struct bwn_softc *sc = mac->mac_sc; uint16_t params[BWN_NR_WMEPARAMS]; int slot, tmp; unsigned int i; slot = BWN_READ_2(mac, BWN_RNG) & SM(p->wmep_logcwmin, WME_PARAM_LOGCWMIN); memset(¶ms, 0, sizeof(params)); DPRINTF(sc, BWN_DEBUG_WME, "wmep_txopLimit %d wmep_logcwmin %d " "wmep_logcwmax %d wmep_aifsn %d\n", p->wmep_txopLimit, p->wmep_logcwmin, p->wmep_logcwmax, p->wmep_aifsn); params[BWN_WMEPARAM_TXOP] = p->wmep_txopLimit * 32; params[BWN_WMEPARAM_CWMIN] = SM(p->wmep_logcwmin, WME_PARAM_LOGCWMIN); params[BWN_WMEPARAM_CWMAX] = SM(p->wmep_logcwmax, WME_PARAM_LOGCWMAX); params[BWN_WMEPARAM_CWCUR] = SM(p->wmep_logcwmin, WME_PARAM_LOGCWMIN); params[BWN_WMEPARAM_AIFS] = p->wmep_aifsn; params[BWN_WMEPARAM_BSLOTS] = slot; params[BWN_WMEPARAM_REGGAP] = slot + p->wmep_aifsn; for (i = 0; i < N(params); i++) { if (i == BWN_WMEPARAM_STATUS) { tmp = bwn_shm_read_2(mac, BWN_SHARED, shm_offset + (i * 2)); tmp |= 0x100; bwn_shm_write_2(mac, BWN_SHARED, shm_offset + (i * 2), tmp); } else { bwn_shm_write_2(mac, BWN_SHARED, shm_offset + (i * 2), params[i]); } } } static void bwn_mac_write_bssid(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint32_t tmp; int i; uint8_t mac_bssid[IEEE80211_ADDR_LEN * 2]; bwn_mac_setfilter(mac, BWN_MACFILTER_BSSID, sc->sc_bssid); memcpy(mac_bssid, sc->sc_ic.ic_macaddr, IEEE80211_ADDR_LEN); memcpy(mac_bssid + IEEE80211_ADDR_LEN, sc->sc_bssid, IEEE80211_ADDR_LEN); for (i = 0; i < N(mac_bssid); i += sizeof(uint32_t)) { tmp = (uint32_t) (mac_bssid[i + 0]); tmp |= (uint32_t) (mac_bssid[i + 1]) << 8; tmp |= (uint32_t) (mac_bssid[i + 2]) << 16; tmp |= (uint32_t) (mac_bssid[i + 3]) << 24; bwn_ram_write(mac, 0x20 + i, tmp); } } static void bwn_mac_setfilter(struct bwn_mac *mac, uint16_t offset, const uint8_t *macaddr) { static const uint8_t zero[IEEE80211_ADDR_LEN] = { 0 }; uint16_t data; if (!mac) macaddr = zero; offset |= 0x0020; BWN_WRITE_2(mac, BWN_MACFILTER_CONTROL, offset); data = macaddr[0]; data |= macaddr[1] << 8; BWN_WRITE_2(mac, BWN_MACFILTER_DATA, data); data = macaddr[2]; data |= macaddr[3] << 8; BWN_WRITE_2(mac, BWN_MACFILTER_DATA, data); data = macaddr[4]; data |= macaddr[5] << 8; BWN_WRITE_2(mac, BWN_MACFILTER_DATA, data); } static void bwn_key_dowrite(struct bwn_mac *mac, uint8_t index, uint8_t algorithm, const uint8_t *key, size_t key_len, const uint8_t *mac_addr) { uint8_t buf[BWN_SEC_KEYSIZE] = { 0, }; uint8_t per_sta_keys_start = 8; if (BWN_SEC_NEWAPI(mac)) per_sta_keys_start = 4; KASSERT(index < mac->mac_max_nr_keys, ("%s:%d: fail", __func__, __LINE__)); KASSERT(key_len <= BWN_SEC_KEYSIZE, ("%s:%d: fail", __func__, __LINE__)); if (index >= per_sta_keys_start) bwn_key_macwrite(mac, index, NULL); if (key) memcpy(buf, key, key_len); bwn_key_write(mac, index, algorithm, buf); if (index >= per_sta_keys_start) bwn_key_macwrite(mac, index, mac_addr); mac->mac_key[index].algorithm = algorithm; } static void bwn_key_macwrite(struct bwn_mac *mac, uint8_t index, const uint8_t *addr) { struct bwn_softc *sc = mac->mac_sc; uint32_t addrtmp[2] = { 0, 0 }; uint8_t start = 8; if (BWN_SEC_NEWAPI(mac)) start = 4; KASSERT(index >= start, ("%s:%d: fail", __func__, __LINE__)); index -= start; if (addr) { addrtmp[0] = addr[0]; addrtmp[0] |= ((uint32_t) (addr[1]) << 8); addrtmp[0] |= ((uint32_t) (addr[2]) << 16); addrtmp[0] |= ((uint32_t) (addr[3]) << 24); addrtmp[1] = addr[4]; addrtmp[1] |= ((uint32_t) (addr[5]) << 8); } if (bhnd_get_hwrev(sc->sc_dev) >= 5) { bwn_shm_write_4(mac, BWN_RCMTA, (index * 2) + 0, addrtmp[0]); bwn_shm_write_2(mac, BWN_RCMTA, (index * 2) + 1, addrtmp[1]); } else { if (index >= 8) { bwn_shm_write_4(mac, BWN_SHARED, BWN_SHARED_PSM + (index * 6) + 0, addrtmp[0]); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PSM + (index * 6) + 4, addrtmp[1]); } } } static void bwn_key_write(struct bwn_mac *mac, uint8_t index, uint8_t algorithm, const uint8_t *key) { unsigned int i; uint32_t offset; uint16_t kidx, value; kidx = BWN_SEC_KEY2FW(mac, index); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_KEYIDX_BLOCK + (kidx * 2), (kidx << 4) | algorithm); offset = mac->mac_ktp + (index * BWN_SEC_KEYSIZE); for (i = 0; i < BWN_SEC_KEYSIZE; i += 2) { value = key[i]; value |= (uint16_t)(key[i + 1]) << 8; bwn_shm_write_2(mac, BWN_SHARED, offset + i, value); } } static void bwn_phy_exit(struct bwn_mac *mac) { mac->mac_phy.rf_onoff(mac, 0); if (mac->mac_phy.exit != NULL) mac->mac_phy.exit(mac); } static void bwn_dma_free(struct bwn_mac *mac) { struct bwn_dma *dma; if ((mac->mac_flags & BWN_MAC_FLAG_DMA) == 0) return; dma = &mac->mac_method.dma; bwn_dma_ringfree(&dma->rx); bwn_dma_ringfree(&dma->wme[WME_AC_BK]); bwn_dma_ringfree(&dma->wme[WME_AC_BE]); bwn_dma_ringfree(&dma->wme[WME_AC_VI]); bwn_dma_ringfree(&dma->wme[WME_AC_VO]); bwn_dma_ringfree(&dma->mcast); } static void bwn_core_stop(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; BWN_ASSERT_LOCKED(sc); if (mac->mac_status < BWN_MAC_STATUS_STARTED) return; callout_stop(&sc->sc_rfswitch_ch); callout_stop(&sc->sc_task_ch); callout_stop(&sc->sc_watchdog_ch); sc->sc_watchdog_timer = 0; BWN_WRITE_4(mac, BWN_INTR_MASK, 0); BWN_READ_4(mac, BWN_INTR_MASK); bwn_mac_suspend(mac); mac->mac_status = BWN_MAC_STATUS_INITED; } static int bwn_switch_band(struct bwn_softc *sc, struct ieee80211_channel *chan) { struct bwn_mac *up_dev = NULL; struct bwn_mac *down_dev; struct bwn_mac *mac; int err, status; uint8_t gmode; BWN_ASSERT_LOCKED(sc); TAILQ_FOREACH(mac, &sc->sc_maclist, mac_list) { if (IEEE80211_IS_CHAN_2GHZ(chan) && mac->mac_phy.supports_2ghz) { up_dev = mac; gmode = 1; } else if (IEEE80211_IS_CHAN_5GHZ(chan) && mac->mac_phy.supports_5ghz) { up_dev = mac; gmode = 0; } else { KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (EINVAL); } if (up_dev != NULL) break; } if (up_dev == NULL) { device_printf(sc->sc_dev, "Could not find a device\n"); return (ENODEV); } if (up_dev == sc->sc_curmac && sc->sc_curmac->mac_phy.gmode == gmode) return (0); DPRINTF(sc, BWN_DEBUG_RF | BWN_DEBUG_PHY | BWN_DEBUG_RESET, "switching to %s-GHz band\n", IEEE80211_IS_CHAN_2GHZ(chan) ? "2" : "5"); down_dev = sc->sc_curmac; status = down_dev->mac_status; if (status >= BWN_MAC_STATUS_STARTED) bwn_core_stop(down_dev); if (status >= BWN_MAC_STATUS_INITED) bwn_core_exit(down_dev); if (down_dev != up_dev) { err = bwn_phy_reset(down_dev); if (err) goto fail; } up_dev->mac_phy.gmode = gmode; if (status >= BWN_MAC_STATUS_INITED) { err = bwn_core_init(up_dev); if (err) { device_printf(sc->sc_dev, "fatal: failed to initialize for %s-GHz\n", IEEE80211_IS_CHAN_2GHZ(chan) ? "2" : "5"); goto fail; } } if (status >= BWN_MAC_STATUS_STARTED) bwn_core_start(up_dev); KASSERT(up_dev->mac_status == status, ("%s: fail", __func__)); sc->sc_curmac = up_dev; return (0); fail: sc->sc_curmac = NULL; return (err); } static void bwn_rf_turnon(struct bwn_mac *mac) { DPRINTF(mac->mac_sc, BWN_DEBUG_RESET, "%s: called\n", __func__); bwn_mac_suspend(mac); mac->mac_phy.rf_onoff(mac, 1); mac->mac_phy.rf_on = 1; bwn_mac_enable(mac); } static void bwn_rf_turnoff(struct bwn_mac *mac) { DPRINTF(mac->mac_sc, BWN_DEBUG_RESET, "%s: called\n", __func__); bwn_mac_suspend(mac); mac->mac_phy.rf_onoff(mac, 0); mac->mac_phy.rf_on = 0; bwn_mac_enable(mac); } /* * PHY reset. */ static int bwn_phy_reset(struct bwn_mac *mac) { struct bwn_softc *sc; uint16_t iost, mask; int error; sc = mac->mac_sc; iost = BWN_IOCTL_PHYRESET | BHND_IOCTL_CLK_FORCE; mask = iost | BWN_IOCTL_SUPPORT_G; if ((error = bhnd_write_ioctl(sc->sc_dev, iost, mask))) return (error); DELAY(1000); iost &= ~BHND_IOCTL_CLK_FORCE; if ((error = bhnd_write_ioctl(sc->sc_dev, iost, mask))) return (error); DELAY(1000); return (0); } static int bwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct bwn_vap *bvp = BWN_VAP(vap); struct ieee80211com *ic= vap->iv_ic; enum ieee80211_state ostate = vap->iv_state; struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac = sc->sc_curmac; int error; DPRINTF(sc, BWN_DEBUG_STATE, "%s: %s -> %s\n", __func__, ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]); error = bvp->bv_newstate(vap, nstate, arg); if (error != 0) return (error); BWN_LOCK(sc); bwn_led_newstate(mac, nstate); /* * Clear the BSSID when we stop a STA */ if (vap->iv_opmode == IEEE80211_M_STA) { if (ostate == IEEE80211_S_RUN && nstate != IEEE80211_S_RUN) { /* * Clear out the BSSID. If we reassociate to * the same AP, this will reinialize things * correctly... */ if (ic->ic_opmode == IEEE80211_M_STA && (sc->sc_flags & BWN_FLAG_INVALID) == 0) { memset(sc->sc_bssid, 0, IEEE80211_ADDR_LEN); bwn_set_macaddr(mac); } } } if (vap->iv_opmode == IEEE80211_M_MONITOR || vap->iv_opmode == IEEE80211_M_AHDEMO) { /* XXX nothing to do? */ } else if (nstate == IEEE80211_S_RUN) { memcpy(sc->sc_bssid, vap->iv_bss->ni_bssid, IEEE80211_ADDR_LEN); bwn_set_opmode(mac); bwn_set_pretbtt(mac); bwn_spu_setdelay(mac, 0); bwn_set_macaddr(mac); } BWN_UNLOCK(sc); return (error); } static void bwn_set_pretbtt(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint16_t pretbtt; if (ic->ic_opmode == IEEE80211_M_IBSS) pretbtt = 2; else pretbtt = (mac->mac_phy.type == BWN_PHYTYPE_A) ? 120 : 250; bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PRETBTT, pretbtt); BWN_WRITE_2(mac, BWN_TSF_CFP_PRETBTT, pretbtt); } static int bwn_intr(void *arg) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; uint32_t reason; if (mac->mac_status < BWN_MAC_STATUS_STARTED || (sc->sc_flags & BWN_FLAG_INVALID)) return (FILTER_STRAY); DPRINTF(sc, BWN_DEBUG_INTR, "%s: called\n", __func__); reason = BWN_READ_4(mac, BWN_INTR_REASON); if (reason == 0xffffffff) /* shared IRQ */ return (FILTER_STRAY); reason &= mac->mac_intr_mask; if (reason == 0) return (FILTER_HANDLED); DPRINTF(sc, BWN_DEBUG_INTR, "%s: reason=0x%08x\n", __func__, reason); mac->mac_reason[0] = BWN_READ_4(mac, BWN_DMA0_REASON) & 0x0001dc00; mac->mac_reason[1] = BWN_READ_4(mac, BWN_DMA1_REASON) & 0x0000dc00; mac->mac_reason[2] = BWN_READ_4(mac, BWN_DMA2_REASON) & 0x0000dc00; mac->mac_reason[3] = BWN_READ_4(mac, BWN_DMA3_REASON) & 0x0001dc00; mac->mac_reason[4] = BWN_READ_4(mac, BWN_DMA4_REASON) & 0x0000dc00; BWN_WRITE_4(mac, BWN_INTR_REASON, reason); BWN_WRITE_4(mac, BWN_DMA0_REASON, mac->mac_reason[0]); BWN_WRITE_4(mac, BWN_DMA1_REASON, mac->mac_reason[1]); BWN_WRITE_4(mac, BWN_DMA2_REASON, mac->mac_reason[2]); BWN_WRITE_4(mac, BWN_DMA3_REASON, mac->mac_reason[3]); BWN_WRITE_4(mac, BWN_DMA4_REASON, mac->mac_reason[4]); /* Disable interrupts. */ BWN_WRITE_4(mac, BWN_INTR_MASK, 0); mac->mac_reason_intr = reason; BWN_BARRIER(mac, 0, 0, BUS_SPACE_BARRIER_READ|BUS_SPACE_BARRIER_WRITE); taskqueue_enqueue(sc->sc_tq, &mac->mac_intrtask); return (FILTER_HANDLED); } static void bwn_intrtask(void *arg, int npending) { struct epoch_tracker et; struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; uint32_t merged = 0; int i, tx = 0, rx = 0; BWN_LOCK(sc); if (mac->mac_status < BWN_MAC_STATUS_STARTED || (sc->sc_flags & BWN_FLAG_INVALID)) { BWN_UNLOCK(sc); return; } for (i = 0; i < N(mac->mac_reason); i++) merged |= mac->mac_reason[i]; if (mac->mac_reason_intr & BWN_INTR_MAC_TXERR) device_printf(sc->sc_dev, "MAC trans error\n"); if (mac->mac_reason_intr & BWN_INTR_PHY_TXERR) { DPRINTF(sc, BWN_DEBUG_INTR, "%s: PHY trans error\n", __func__); mac->mac_phy.txerrors--; if (mac->mac_phy.txerrors == 0) { mac->mac_phy.txerrors = BWN_TXERROR_MAX; bwn_restart(mac, "PHY TX errors"); } } if (merged & (BWN_DMAINTR_FATALMASK | BWN_DMAINTR_NONFATALMASK)) { if (merged & BWN_DMAINTR_FATALMASK) { device_printf(sc->sc_dev, "Fatal DMA error: %#x %#x %#x %#x %#x %#x\n", mac->mac_reason[0], mac->mac_reason[1], mac->mac_reason[2], mac->mac_reason[3], mac->mac_reason[4], mac->mac_reason[5]); bwn_restart(mac, "DMA error"); BWN_UNLOCK(sc); return; } if (merged & BWN_DMAINTR_NONFATALMASK) { device_printf(sc->sc_dev, "DMA error: %#x %#x %#x %#x %#x %#x\n", mac->mac_reason[0], mac->mac_reason[1], mac->mac_reason[2], mac->mac_reason[3], mac->mac_reason[4], mac->mac_reason[5]); } } if (mac->mac_reason_intr & BWN_INTR_UCODE_DEBUG) bwn_intr_ucode_debug(mac); if (mac->mac_reason_intr & BWN_INTR_TBTT_INDI) bwn_intr_tbtt_indication(mac); if (mac->mac_reason_intr & BWN_INTR_ATIM_END) bwn_intr_atim_end(mac); if (mac->mac_reason_intr & BWN_INTR_BEACON) bwn_intr_beacon(mac); if (mac->mac_reason_intr & BWN_INTR_PMQ) bwn_intr_pmq(mac); if (mac->mac_reason_intr & BWN_INTR_NOISESAMPLE_OK) bwn_intr_noise(mac); NET_EPOCH_ENTER(et); if (mac->mac_flags & BWN_MAC_FLAG_DMA) { if (mac->mac_reason[0] & BWN_DMAINTR_RX_DONE) { bwn_dma_rx(mac->mac_method.dma.rx); rx = 1; } } else rx = bwn_pio_rx(&mac->mac_method.pio.rx); NET_EPOCH_EXIT(et); KASSERT(!(mac->mac_reason[1] & BWN_DMAINTR_RX_DONE), ("%s", __func__)); KASSERT(!(mac->mac_reason[2] & BWN_DMAINTR_RX_DONE), ("%s", __func__)); KASSERT(!(mac->mac_reason[3] & BWN_DMAINTR_RX_DONE), ("%s", __func__)); KASSERT(!(mac->mac_reason[4] & BWN_DMAINTR_RX_DONE), ("%s", __func__)); KASSERT(!(mac->mac_reason[5] & BWN_DMAINTR_RX_DONE), ("%s", __func__)); if (mac->mac_reason_intr & BWN_INTR_TX_OK) { bwn_intr_txeof(mac); tx = 1; } BWN_WRITE_4(mac, BWN_INTR_MASK, mac->mac_intr_mask); if (sc->sc_blink_led != NULL && sc->sc_led_blink) { int evt = BWN_LED_EVENT_NONE; if (tx && rx) { if (sc->sc_rx_rate > sc->sc_tx_rate) evt = BWN_LED_EVENT_RX; else evt = BWN_LED_EVENT_TX; } else if (tx) { evt = BWN_LED_EVENT_TX; } else if (rx) { evt = BWN_LED_EVENT_RX; } else if (rx == 0) { evt = BWN_LED_EVENT_POLL; } if (evt != BWN_LED_EVENT_NONE) bwn_led_event(mac, evt); } if (mbufq_first(&sc->sc_snd) != NULL) bwn_start(sc); BWN_BARRIER(mac, 0, 0, BUS_SPACE_BARRIER_READ|BUS_SPACE_BARRIER_WRITE); BWN_UNLOCK(sc); } static void bwn_restart(struct bwn_mac *mac, const char *msg) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; if (mac->mac_status < BWN_MAC_STATUS_INITED) return; device_printf(sc->sc_dev, "HW reset: %s\n", msg); ieee80211_runtask(ic, &mac->mac_hwreset); } static void bwn_intr_ucode_debug(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint16_t reason; if (mac->mac_fw.opensource == 0) return; reason = bwn_shm_read_2(mac, BWN_SCRATCH, BWN_DEBUGINTR_REASON_REG); switch (reason) { case BWN_DEBUGINTR_PANIC: bwn_handle_fwpanic(mac); break; case BWN_DEBUGINTR_DUMP_SHM: device_printf(sc->sc_dev, "BWN_DEBUGINTR_DUMP_SHM\n"); break; case BWN_DEBUGINTR_DUMP_REGS: device_printf(sc->sc_dev, "BWN_DEBUGINTR_DUMP_REGS\n"); break; case BWN_DEBUGINTR_MARKER: device_printf(sc->sc_dev, "BWN_DEBUGINTR_MARKER\n"); break; default: device_printf(sc->sc_dev, "ucode debug unknown reason: %#x\n", reason); } bwn_shm_write_2(mac, BWN_SCRATCH, BWN_DEBUGINTR_REASON_REG, BWN_DEBUGINTR_ACK); } static void bwn_intr_tbtt_indication(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; if (ic->ic_opmode != IEEE80211_M_HOSTAP) bwn_psctl(mac, 0); if (ic->ic_opmode == IEEE80211_M_IBSS) mac->mac_flags |= BWN_MAC_FLAG_DFQVALID; } static void bwn_intr_atim_end(struct bwn_mac *mac) { if (mac->mac_flags & BWN_MAC_FLAG_DFQVALID) { BWN_WRITE_4(mac, BWN_MACCMD, BWN_READ_4(mac, BWN_MACCMD) | BWN_MACCMD_DFQ_VALID); mac->mac_flags &= ~BWN_MAC_FLAG_DFQVALID; } } static void bwn_intr_beacon(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint32_t cmd, beacon0, beacon1; if (ic->ic_opmode == IEEE80211_M_HOSTAP || ic->ic_opmode == IEEE80211_M_MBSS) return; mac->mac_intr_mask &= ~BWN_INTR_BEACON; cmd = BWN_READ_4(mac, BWN_MACCMD); beacon0 = (cmd & BWN_MACCMD_BEACON0_VALID); beacon1 = (cmd & BWN_MACCMD_BEACON1_VALID); if (beacon0 && beacon1) { BWN_WRITE_4(mac, BWN_INTR_REASON, BWN_INTR_BEACON); mac->mac_intr_mask |= BWN_INTR_BEACON; return; } if (sc->sc_flags & BWN_FLAG_NEED_BEACON_TP) { sc->sc_flags &= ~BWN_FLAG_NEED_BEACON_TP; bwn_load_beacon0(mac); bwn_load_beacon1(mac); cmd = BWN_READ_4(mac, BWN_MACCMD); cmd |= BWN_MACCMD_BEACON0_VALID; BWN_WRITE_4(mac, BWN_MACCMD, cmd); } else { if (!beacon0) { bwn_load_beacon0(mac); cmd = BWN_READ_4(mac, BWN_MACCMD); cmd |= BWN_MACCMD_BEACON0_VALID; BWN_WRITE_4(mac, BWN_MACCMD, cmd); } else if (!beacon1) { bwn_load_beacon1(mac); cmd = BWN_READ_4(mac, BWN_MACCMD); cmd |= BWN_MACCMD_BEACON1_VALID; BWN_WRITE_4(mac, BWN_MACCMD, cmd); } } } static void bwn_intr_pmq(struct bwn_mac *mac) { uint32_t tmp; while (1) { tmp = BWN_READ_4(mac, BWN_PS_STATUS); if (!(tmp & 0x00000008)) break; } BWN_WRITE_2(mac, BWN_PS_STATUS, 0x0002); } static void bwn_intr_noise(struct bwn_mac *mac) { struct bwn_phy_g *pg = &mac->mac_phy.phy_g; uint16_t tmp; uint8_t noise[4]; uint8_t i, j; int32_t average; if (mac->mac_phy.type != BWN_PHYTYPE_G) return; KASSERT(mac->mac_noise.noi_running, ("%s: fail", __func__)); *((uint32_t *)noise) = htole32(bwn_jssi_read(mac)); if (noise[0] == 0x7f || noise[1] == 0x7f || noise[2] == 0x7f || noise[3] == 0x7f) goto new; KASSERT(mac->mac_noise.noi_nsamples < 8, ("%s:%d: fail", __func__, __LINE__)); i = mac->mac_noise.noi_nsamples; noise[0] = MIN(MAX(noise[0], 0), N(pg->pg_nrssi_lt) - 1); noise[1] = MIN(MAX(noise[1], 0), N(pg->pg_nrssi_lt) - 1); noise[2] = MIN(MAX(noise[2], 0), N(pg->pg_nrssi_lt) - 1); noise[3] = MIN(MAX(noise[3], 0), N(pg->pg_nrssi_lt) - 1); mac->mac_noise.noi_samples[i][0] = pg->pg_nrssi_lt[noise[0]]; mac->mac_noise.noi_samples[i][1] = pg->pg_nrssi_lt[noise[1]]; mac->mac_noise.noi_samples[i][2] = pg->pg_nrssi_lt[noise[2]]; mac->mac_noise.noi_samples[i][3] = pg->pg_nrssi_lt[noise[3]]; mac->mac_noise.noi_nsamples++; if (mac->mac_noise.noi_nsamples == 8) { average = 0; for (i = 0; i < 8; i++) { for (j = 0; j < 4; j++) average += mac->mac_noise.noi_samples[i][j]; } average = (((average / 32) * 125) + 64) / 128; tmp = (bwn_shm_read_2(mac, BWN_SHARED, 0x40c) / 128) & 0x1f; if (tmp >= 8) average += 2; else average -= 25; average -= (tmp == 8) ? 72 : 48; mac->mac_stats.link_noise = average; mac->mac_noise.noi_running = 0; return; } new: bwn_noise_gensample(mac); } static int bwn_pio_rx(struct bwn_pio_rxqueue *prq) { struct bwn_mac *mac = prq->prq_mac; struct bwn_softc *sc = mac->mac_sc; unsigned int i; BWN_ASSERT_LOCKED(sc); if (mac->mac_status < BWN_MAC_STATUS_STARTED) return (0); for (i = 0; i < 5000; i++) { if (bwn_pio_rxeof(prq) == 0) break; } if (i >= 5000) device_printf(sc->sc_dev, "too many RX frames in PIO mode\n"); return ((i > 0) ? 1 : 0); } static void bwn_dma_rx(struct bwn_dma_ring *dr) { int slot, curslot; KASSERT(!dr->dr_tx, ("%s:%d: fail", __func__, __LINE__)); curslot = dr->get_curslot(dr); KASSERT(curslot >= 0 && curslot < dr->dr_numslots, ("%s:%d: fail", __func__, __LINE__)); slot = dr->dr_curslot; for (; slot != curslot; slot = bwn_dma_nextslot(dr, slot)) bwn_dma_rxeof(dr, &slot); bus_dmamap_sync(dr->dr_ring_dtag, dr->dr_ring_dmap, BUS_DMASYNC_PREWRITE); dr->set_curslot(dr, slot); dr->dr_curslot = slot; } static void bwn_intr_txeof(struct bwn_mac *mac) { struct bwn_txstatus stat; uint32_t stat0, stat1; uint16_t tmp; BWN_ASSERT_LOCKED(mac->mac_sc); while (1) { stat0 = BWN_READ_4(mac, BWN_XMITSTAT_0); if (!(stat0 & 0x00000001)) break; stat1 = BWN_READ_4(mac, BWN_XMITSTAT_1); DPRINTF(mac->mac_sc, BWN_DEBUG_XMIT, "%s: stat0=0x%08x, stat1=0x%08x\n", __func__, stat0, stat1); stat.cookie = (stat0 >> 16); stat.seq = (stat1 & 0x0000ffff); stat.phy_stat = ((stat1 & 0x00ff0000) >> 16); tmp = (stat0 & 0x0000ffff); stat.framecnt = ((tmp & 0xf000) >> 12); stat.rtscnt = ((tmp & 0x0f00) >> 8); stat.sreason = ((tmp & 0x001c) >> 2); stat.pm = (tmp & 0x0080) ? 1 : 0; stat.im = (tmp & 0x0040) ? 1 : 0; stat.ampdu = (tmp & 0x0020) ? 1 : 0; stat.ack = (tmp & 0x0002) ? 1 : 0; DPRINTF(mac->mac_sc, BWN_DEBUG_XMIT, "%s: cookie=%d, seq=%d, phystat=0x%02x, framecnt=%d, " "rtscnt=%d, sreason=%d, pm=%d, im=%d, ampdu=%d, ack=%d\n", __func__, stat.cookie, stat.seq, stat.phy_stat, stat.framecnt, stat.rtscnt, stat.sreason, stat.pm, stat.im, stat.ampdu, stat.ack); bwn_handle_txeof(mac, &stat); } } static void bwn_hwreset(void *arg, int npending) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; int error = 0; int prev_status; BWN_LOCK(sc); prev_status = mac->mac_status; if (prev_status >= BWN_MAC_STATUS_STARTED) bwn_core_stop(mac); if (prev_status >= BWN_MAC_STATUS_INITED) bwn_core_exit(mac); if (prev_status >= BWN_MAC_STATUS_INITED) { error = bwn_core_init(mac); if (error) goto out; } if (prev_status >= BWN_MAC_STATUS_STARTED) bwn_core_start(mac); out: if (error) { device_printf(sc->sc_dev, "%s: failed (%d)\n", __func__, error); sc->sc_curmac = NULL; } BWN_UNLOCK(sc); } static void bwn_handle_fwpanic(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint16_t reason; reason = bwn_shm_read_2(mac, BWN_SCRATCH, BWN_FWPANIC_REASON_REG); device_printf(sc->sc_dev,"fw panic (%u)\n", reason); if (reason == BWN_FWPANIC_RESTART) bwn_restart(mac, "ucode panic"); } static void bwn_load_beacon0(struct bwn_mac *mac) { KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } static void bwn_load_beacon1(struct bwn_mac *mac) { KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } static uint32_t bwn_jssi_read(struct bwn_mac *mac) { uint32_t val = 0; val = bwn_shm_read_2(mac, BWN_SHARED, 0x08a); val <<= 16; val |= bwn_shm_read_2(mac, BWN_SHARED, 0x088); return (val); } static void bwn_noise_gensample(struct bwn_mac *mac) { uint32_t jssi = 0x7f7f7f7f; bwn_shm_write_2(mac, BWN_SHARED, 0x088, (jssi & 0x0000ffff)); bwn_shm_write_2(mac, BWN_SHARED, 0x08a, (jssi & 0xffff0000) >> 16); BWN_WRITE_4(mac, BWN_MACCMD, BWN_READ_4(mac, BWN_MACCMD) | BWN_MACCMD_BGNOISE); } static int bwn_dma_freeslot(struct bwn_dma_ring *dr) { BWN_ASSERT_LOCKED(dr->dr_mac->mac_sc); return (dr->dr_numslots - dr->dr_usedslot); } static int bwn_dma_nextslot(struct bwn_dma_ring *dr, int slot) { BWN_ASSERT_LOCKED(dr->dr_mac->mac_sc); KASSERT(slot >= -1 && slot <= dr->dr_numslots - 1, ("%s:%d: fail", __func__, __LINE__)); if (slot == dr->dr_numslots - 1) return (0); return (slot + 1); } static void bwn_dma_rxeof(struct bwn_dma_ring *dr, int *slot) { struct bwn_mac *mac = dr->dr_mac; struct bwn_softc *sc = mac->mac_sc; struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_dmadesc_generic *desc; struct bwn_dmadesc_meta *meta; struct bwn_rxhdr4 *rxhdr; struct mbuf *m; uint32_t macstat; int32_t tmp; int cnt = 0; uint16_t len; dr->getdesc(dr, *slot, &desc, &meta); bus_dmamap_sync(dma->rxbuf_dtag, meta->mt_dmap, BUS_DMASYNC_POSTREAD); m = meta->mt_m; if (bwn_dma_newbuf(dr, desc, meta, 0)) { counter_u64_add(sc->sc_ic.ic_ierrors, 1); return; } rxhdr = mtod(m, struct bwn_rxhdr4 *); len = le16toh(rxhdr->frame_len); if (len <= 0) { counter_u64_add(sc->sc_ic.ic_ierrors, 1); return; } if (bwn_dma_check_redzone(dr, m)) { device_printf(sc->sc_dev, "redzone error.\n"); bwn_dma_set_redzone(dr, m); bus_dmamap_sync(dma->rxbuf_dtag, meta->mt_dmap, BUS_DMASYNC_PREWRITE); return; } if (len > dr->dr_rx_bufsize) { tmp = len; while (1) { dr->getdesc(dr, *slot, &desc, &meta); bwn_dma_set_redzone(dr, meta->mt_m); bus_dmamap_sync(dma->rxbuf_dtag, meta->mt_dmap, BUS_DMASYNC_PREWRITE); *slot = bwn_dma_nextslot(dr, *slot); cnt++; tmp -= dr->dr_rx_bufsize; if (tmp <= 0) break; } device_printf(sc->sc_dev, "too small buffer " "(len %u buffer %u dropped %d)\n", len, dr->dr_rx_bufsize, cnt); return; } switch (mac->mac_fw.fw_hdr_format) { case BWN_FW_HDR_351: case BWN_FW_HDR_410: macstat = le32toh(rxhdr->ps4.r351.mac_status); break; case BWN_FW_HDR_598: macstat = le32toh(rxhdr->ps4.r598.mac_status); break; } if (macstat & BWN_RX_MAC_FCSERR) { if (!(mac->mac_sc->sc_filters & BWN_MACCTL_PASS_BADFCS)) { device_printf(sc->sc_dev, "RX drop\n"); return; } } m->m_len = m->m_pkthdr.len = len + dr->dr_frameoffset; m_adj(m, dr->dr_frameoffset); bwn_rxeof(dr->dr_mac, m, rxhdr); } static void bwn_handle_txeof(struct bwn_mac *mac, const struct bwn_txstatus *status) { struct bwn_softc *sc = mac->mac_sc; struct bwn_stats *stats = &mac->mac_stats; BWN_ASSERT_LOCKED(mac->mac_sc); if (status->im) device_printf(sc->sc_dev, "TODO: STATUS IM\n"); if (status->ampdu) device_printf(sc->sc_dev, "TODO: STATUS AMPDU\n"); if (status->rtscnt) { if (status->rtscnt == 0xf) stats->rtsfail++; else stats->rts++; } if (mac->mac_flags & BWN_MAC_FLAG_DMA) { bwn_dma_handle_txeof(mac, status); } else { bwn_pio_handle_txeof(mac, status); } bwn_phy_txpower_check(mac, 0); } static uint8_t bwn_pio_rxeof(struct bwn_pio_rxqueue *prq) { struct bwn_mac *mac = prq->prq_mac; struct bwn_softc *sc = mac->mac_sc; struct bwn_rxhdr4 rxhdr; struct mbuf *m; uint32_t ctl32, macstat, v32; unsigned int i, padding; uint16_t ctl16, len, totlen, v16; unsigned char *mp; char *data; memset(&rxhdr, 0, sizeof(rxhdr)); if (prq->prq_rev >= 8) { ctl32 = bwn_pio_rx_read_4(prq, BWN_PIO8_RXCTL); if (!(ctl32 & BWN_PIO8_RXCTL_FRAMEREADY)) return (0); bwn_pio_rx_write_4(prq, BWN_PIO8_RXCTL, BWN_PIO8_RXCTL_FRAMEREADY); for (i = 0; i < 10; i++) { ctl32 = bwn_pio_rx_read_4(prq, BWN_PIO8_RXCTL); if (ctl32 & BWN_PIO8_RXCTL_DATAREADY) goto ready; DELAY(10); } } else { ctl16 = bwn_pio_rx_read_2(prq, BWN_PIO_RXCTL); if (!(ctl16 & BWN_PIO_RXCTL_FRAMEREADY)) return (0); bwn_pio_rx_write_2(prq, BWN_PIO_RXCTL, BWN_PIO_RXCTL_FRAMEREADY); for (i = 0; i < 10; i++) { ctl16 = bwn_pio_rx_read_2(prq, BWN_PIO_RXCTL); if (ctl16 & BWN_PIO_RXCTL_DATAREADY) goto ready; DELAY(10); } } device_printf(sc->sc_dev, "%s: timed out\n", __func__); return (1); ready: if (prq->prq_rev >= 8) { bus_read_multi_4(sc->sc_mem_res, prq->prq_base + BWN_PIO8_RXDATA, (void *)&rxhdr, sizeof(rxhdr)); } else { bus_read_multi_2(sc->sc_mem_res, prq->prq_base + BWN_PIO_RXDATA, (void *)&rxhdr, sizeof(rxhdr)); } len = le16toh(rxhdr.frame_len); if (len > 0x700) { device_printf(sc->sc_dev, "%s: len is too big\n", __func__); goto error; } if (len == 0) { device_printf(sc->sc_dev, "%s: len is 0\n", __func__); goto error; } switch (mac->mac_fw.fw_hdr_format) { case BWN_FW_HDR_351: case BWN_FW_HDR_410: macstat = le32toh(rxhdr.ps4.r351.mac_status); break; case BWN_FW_HDR_598: macstat = le32toh(rxhdr.ps4.r598.mac_status); break; } if (macstat & BWN_RX_MAC_FCSERR) { if (!(mac->mac_sc->sc_filters & BWN_MACCTL_PASS_BADFCS)) { device_printf(sc->sc_dev, "%s: FCS error", __func__); goto error; } } padding = (macstat & BWN_RX_MAC_PADDING) ? 2 : 0; totlen = len + padding; KASSERT(totlen <= MCLBYTES, ("too big..\n")); m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { device_printf(sc->sc_dev, "%s: out of memory", __func__); goto error; } mp = mtod(m, unsigned char *); if (prq->prq_rev >= 8) { bus_read_multi_4(sc->sc_mem_res, prq->prq_base + BWN_PIO8_RXDATA, (void *)mp, (totlen & ~3)); if (totlen & 3) { v32 = bwn_pio_rx_read_4(prq, BWN_PIO8_RXDATA); data = &(mp[totlen - 1]); switch (totlen & 3) { case 3: *data = (v32 >> 16); data--; case 2: *data = (v32 >> 8); data--; case 1: *data = v32; } } } else { bus_read_multi_2(sc->sc_mem_res, prq->prq_base + BWN_PIO_RXDATA, (void *)mp, (totlen & ~1)); if (totlen & 1) { v16 = bwn_pio_rx_read_2(prq, BWN_PIO_RXDATA); mp[totlen - 1] = v16; } } m->m_len = m->m_pkthdr.len = totlen; bwn_rxeof(prq->prq_mac, m, &rxhdr); return (1); error: if (prq->prq_rev >= 8) bwn_pio_rx_write_4(prq, BWN_PIO8_RXCTL, BWN_PIO8_RXCTL_DATAREADY); else bwn_pio_rx_write_2(prq, BWN_PIO_RXCTL, BWN_PIO_RXCTL_DATAREADY); return (1); } static int bwn_dma_newbuf(struct bwn_dma_ring *dr, struct bwn_dmadesc_generic *desc, struct bwn_dmadesc_meta *meta, int init) { struct bwn_mac *mac = dr->dr_mac; struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_rxhdr4 *hdr; bus_dmamap_t map; bus_addr_t paddr; struct mbuf *m; int error; m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { error = ENOBUFS; /* * If the NIC is up and running, we need to: * - Clear RX buffer's header. * - Restore RX descriptor settings. */ if (init) return (error); else goto back; } m->m_len = m->m_pkthdr.len = MCLBYTES; bwn_dma_set_redzone(dr, m); /* * Try to load RX buf into temporary DMA map */ error = bus_dmamap_load_mbuf(dma->rxbuf_dtag, dr->dr_spare_dmap, m, bwn_dma_buf_addr, &paddr, BUS_DMA_NOWAIT); if (error) { m_freem(m); /* * See the comment above */ if (init) return (error); else goto back; } if (!init) bus_dmamap_unload(dma->rxbuf_dtag, meta->mt_dmap); meta->mt_m = m; meta->mt_paddr = paddr; /* * Swap RX buf's DMA map with the loaded temporary one */ map = meta->mt_dmap; meta->mt_dmap = dr->dr_spare_dmap; dr->dr_spare_dmap = map; back: /* * Clear RX buf header */ hdr = mtod(meta->mt_m, struct bwn_rxhdr4 *); bzero(hdr, sizeof(*hdr)); bus_dmamap_sync(dma->rxbuf_dtag, meta->mt_dmap, BUS_DMASYNC_PREWRITE); /* * Setup RX buf descriptor */ dr->setdesc(dr, desc, meta->mt_paddr, meta->mt_m->m_len - sizeof(*hdr), 0, 0, 0); return (error); } static void bwn_dma_buf_addr(void *arg, bus_dma_segment_t *seg, int nseg, bus_size_t mapsz __unused, int error) { if (!error) { KASSERT(nseg == 1, ("too many segments(%d)\n", nseg)); *((bus_addr_t *)arg) = seg->ds_addr; } } static int bwn_hwrate2ieeerate(int rate) { switch (rate) { case BWN_CCK_RATE_1MB: return (2); case BWN_CCK_RATE_2MB: return (4); case BWN_CCK_RATE_5MB: return (11); case BWN_CCK_RATE_11MB: return (22); case BWN_OFDM_RATE_6MB: return (12); case BWN_OFDM_RATE_9MB: return (18); case BWN_OFDM_RATE_12MB: return (24); case BWN_OFDM_RATE_18MB: return (36); case BWN_OFDM_RATE_24MB: return (48); case BWN_OFDM_RATE_36MB: return (72); case BWN_OFDM_RATE_48MB: return (96); case BWN_OFDM_RATE_54MB: return (108); default: printf("Ooops\n"); return (0); } } /* * Post process the RX provided RSSI. * * Valid for A, B, G, LP PHYs. */ static int8_t bwn_rx_rssi_calc(struct bwn_mac *mac, uint8_t in_rssi, int ofdm, int adjust_2053, int adjust_2050) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *gphy = &phy->phy_g; int tmp; switch (phy->rf_ver) { case 0x2050: if (ofdm) { tmp = in_rssi; if (tmp > 127) tmp -= 256; tmp = tmp * 73 / 64; if (adjust_2050) tmp += 25; else tmp -= 3; } else { if (mac->mac_sc->sc_board_info.board_flags & BHND_BFL_ADCDIV) { if (in_rssi > 63) in_rssi = 63; tmp = gphy->pg_nrssi_lt[in_rssi]; tmp = (31 - tmp) * -131 / 128 - 57; } else { tmp = in_rssi; tmp = (31 - tmp) * -149 / 128 - 68; } if (phy->type == BWN_PHYTYPE_G && adjust_2050) tmp += 25; } break; case 0x2060: if (in_rssi > 127) tmp = in_rssi - 256; else tmp = in_rssi; break; default: tmp = in_rssi; tmp = (tmp - 11) * 103 / 64; if (adjust_2053) tmp -= 109; else tmp -= 83; } return (tmp); } static void bwn_rxeof(struct bwn_mac *mac, struct mbuf *m, const void *_rxhdr) { const struct bwn_rxhdr4 *rxhdr = _rxhdr; struct bwn_plcp6 *plcp; struct bwn_softc *sc = mac->mac_sc; struct ieee80211_frame_min *wh; struct ieee80211_node *ni; struct ieee80211com *ic = &sc->sc_ic; uint32_t macstat; int padding, rate, rssi = 0, noise = 0, type; uint16_t phytype, phystat0, phystat3, chanstat; unsigned char *mp = mtod(m, unsigned char *); BWN_ASSERT_LOCKED(sc); phystat0 = le16toh(rxhdr->phy_status0); /* * XXX Note: phy_status3 doesn't exist for HT-PHY; it's only * used for LP-PHY. */ phystat3 = le16toh(rxhdr->ps3.lp.phy_status3); switch (mac->mac_fw.fw_hdr_format) { case BWN_FW_HDR_351: case BWN_FW_HDR_410: macstat = le32toh(rxhdr->ps4.r351.mac_status); chanstat = le16toh(rxhdr->ps4.r351.channel); break; case BWN_FW_HDR_598: macstat = le32toh(rxhdr->ps4.r598.mac_status); chanstat = le16toh(rxhdr->ps4.r598.channel); break; } phytype = chanstat & BWN_RX_CHAN_PHYTYPE; if (macstat & BWN_RX_MAC_FCSERR) device_printf(sc->sc_dev, "TODO RX: RX_FLAG_FAILED_FCS_CRC\n"); if (phystat0 & (BWN_RX_PHYST0_PLCPHCF | BWN_RX_PHYST0_PLCPFV)) device_printf(sc->sc_dev, "TODO RX: RX_FLAG_FAILED_PLCP_CRC\n"); if (macstat & BWN_RX_MAC_DECERR) goto drop; padding = (macstat & BWN_RX_MAC_PADDING) ? 2 : 0; if (m->m_pkthdr.len < (sizeof(struct bwn_plcp6) + padding)) { device_printf(sc->sc_dev, "frame too short (length=%d)\n", m->m_pkthdr.len); goto drop; } plcp = (struct bwn_plcp6 *)(mp + padding); m_adj(m, sizeof(struct bwn_plcp6) + padding); if (m->m_pkthdr.len < IEEE80211_MIN_LEN) { device_printf(sc->sc_dev, "frame too short (length=%d)\n", m->m_pkthdr.len); goto drop; } wh = mtod(m, struct ieee80211_frame_min *); if (macstat & BWN_RX_MAC_DEC) { DPRINTF(sc, BWN_DEBUG_HWCRYPTO, "RX decryption attempted (old %d keyidx %#x)\n", BWN_ISOLDFMT(mac), (macstat & BWN_RX_MAC_KEYIDX) >> BWN_RX_MAC_KEYIDX_SHIFT); } if (phystat0 & BWN_RX_PHYST0_OFDM) rate = bwn_plcp_get_ofdmrate(mac, plcp, phytype == BWN_PHYTYPE_A); else rate = bwn_plcp_get_cckrate(mac, plcp); if (rate == -1) { if (!(mac->mac_sc->sc_filters & BWN_MACCTL_PASS_BADPLCP)) goto drop; } sc->sc_rx_rate = bwn_hwrate2ieeerate(rate); /* rssi/noise */ switch (phytype) { case BWN_PHYTYPE_A: case BWN_PHYTYPE_B: case BWN_PHYTYPE_G: case BWN_PHYTYPE_LP: rssi = bwn_rx_rssi_calc(mac, rxhdr->phy.abg.rssi, !! (phystat0 & BWN_RX_PHYST0_OFDM), !! (phystat0 & BWN_RX_PHYST0_GAINCTL), !! (phystat3 & BWN_RX_PHYST3_TRSTATE)); break; case BWN_PHYTYPE_N: /* Broadcom has code for min/avg, but always used max */ if (rxhdr->phy.n.power0 == 16 || rxhdr->phy.n.power0 == 32) rssi = max(rxhdr->phy.n.power1, rxhdr->ps2.n.power2); else rssi = max(rxhdr->phy.n.power0, rxhdr->phy.n.power1); #if 0 DPRINTF(mac->mac_sc, BWN_DEBUG_RECV, "%s: power0=%d, power1=%d, power2=%d\n", __func__, rxhdr->phy.n.power0, rxhdr->phy.n.power1, rxhdr->ps2.n.power2); #endif break; default: /* XXX TODO: implement rssi for other PHYs */ break; } /* * RSSI here is absolute, not relative to the noise floor. */ noise = mac->mac_stats.link_noise; rssi = rssi - noise; /* RX radio tap */ if (ieee80211_radiotap_active(ic)) bwn_rx_radiotap(mac, m, rxhdr, plcp, rate, rssi, noise); m_adj(m, -IEEE80211_CRC_LEN); BWN_UNLOCK(sc); ni = ieee80211_find_rxnode(ic, wh); if (ni != NULL) { type = ieee80211_input(ni, m, rssi, noise); ieee80211_free_node(ni); } else type = ieee80211_input_all(ic, m, rssi, noise); BWN_LOCK(sc); return; drop: device_printf(sc->sc_dev, "%s: dropped\n", __func__); } static void bwn_ratectl_tx_complete(const struct ieee80211_node *ni, const struct bwn_txstatus *status) { struct ieee80211_ratectl_tx_status txs; int retrycnt = 0; /* * If we don't get an ACK, then we should log the * full framecnt. That may be 0 if it's a PHY * failure, so ensure that gets logged as some * retry attempt. */ txs.flags = IEEE80211_RATECTL_STATUS_LONG_RETRY; if (status->ack) { txs.status = IEEE80211_RATECTL_TX_SUCCESS; retrycnt = status->framecnt - 1; } else { txs.status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED; retrycnt = status->framecnt; if (retrycnt == 0) retrycnt = 1; } txs.long_retries = retrycnt; ieee80211_ratectl_tx_complete(ni, &txs); } static void bwn_dma_handle_txeof(struct bwn_mac *mac, const struct bwn_txstatus *status) { struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_dma_ring *dr; struct bwn_dmadesc_generic *desc; struct bwn_dmadesc_meta *meta; struct bwn_softc *sc = mac->mac_sc; int slot; BWN_ASSERT_LOCKED(sc); dr = bwn_dma_parse_cookie(mac, status, status->cookie, &slot); if (dr == NULL) { device_printf(sc->sc_dev, "failed to parse cookie\n"); return; } KASSERT(dr->dr_tx, ("%s:%d: fail", __func__, __LINE__)); while (1) { KASSERT(slot >= 0 && slot < dr->dr_numslots, ("%s:%d: fail", __func__, __LINE__)); dr->getdesc(dr, slot, &desc, &meta); if (meta->mt_txtype == BWN_DMADESC_METATYPE_HEADER) bus_dmamap_unload(dr->dr_txring_dtag, meta->mt_dmap); else if (meta->mt_txtype == BWN_DMADESC_METATYPE_BODY) bus_dmamap_unload(dma->txbuf_dtag, meta->mt_dmap); if (meta->mt_islast) { KASSERT(meta->mt_m != NULL, ("%s:%d: fail", __func__, __LINE__)); bwn_ratectl_tx_complete(meta->mt_ni, status); ieee80211_tx_complete(meta->mt_ni, meta->mt_m, 0); meta->mt_ni = NULL; meta->mt_m = NULL; } else KASSERT(meta->mt_m == NULL, ("%s:%d: fail", __func__, __LINE__)); dr->dr_usedslot--; if (meta->mt_islast) break; slot = bwn_dma_nextslot(dr, slot); } sc->sc_watchdog_timer = 0; if (dr->dr_stop) { KASSERT(bwn_dma_freeslot(dr) >= BWN_TX_SLOTS_PER_FRAME, ("%s:%d: fail", __func__, __LINE__)); dr->dr_stop = 0; } } static void bwn_pio_handle_txeof(struct bwn_mac *mac, const struct bwn_txstatus *status) { struct bwn_pio_txqueue *tq; struct bwn_pio_txpkt *tp = NULL; struct bwn_softc *sc = mac->mac_sc; BWN_ASSERT_LOCKED(sc); tq = bwn_pio_parse_cookie(mac, status->cookie, &tp); if (tq == NULL) return; tq->tq_used -= roundup(tp->tp_m->m_pkthdr.len + BWN_HDRSIZE(mac), 4); tq->tq_free++; if (tp->tp_ni != NULL) { /* * Do any tx complete callback. Note this must * be done before releasing the node reference. */ bwn_ratectl_tx_complete(tp->tp_ni, status); } ieee80211_tx_complete(tp->tp_ni, tp->tp_m, 0); tp->tp_ni = NULL; tp->tp_m = NULL; TAILQ_INSERT_TAIL(&tq->tq_pktlist, tp, tp_list); sc->sc_watchdog_timer = 0; } static void bwn_phy_txpower_check(struct bwn_mac *mac, uint32_t flags) { struct bwn_softc *sc = mac->mac_sc; struct bwn_phy *phy = &mac->mac_phy; struct ieee80211com *ic = &sc->sc_ic; unsigned long now; bwn_txpwr_result_t result; BWN_GETTIME(now); if (!(flags & BWN_TXPWR_IGNORE_TIME) && ieee80211_time_before(now, phy->nexttime)) return; phy->nexttime = now + 2 * 1000; if (sc->sc_board_info.board_vendor == PCI_VENDOR_BROADCOM && sc->sc_board_info.board_type == BHND_BOARD_BU4306) return; if (phy->recalc_txpwr != NULL) { result = phy->recalc_txpwr(mac, (flags & BWN_TXPWR_IGNORE_TSSI) ? 1 : 0); if (result == BWN_TXPWR_RES_DONE) return; KASSERT(result == BWN_TXPWR_RES_NEED_ADJUST, ("%s: fail", __func__)); KASSERT(phy->set_txpwr != NULL, ("%s: fail", __func__)); ieee80211_runtask(ic, &mac->mac_txpower); } } static uint16_t bwn_pio_rx_read_2(struct bwn_pio_rxqueue *prq, uint16_t offset) { return (BWN_READ_2(prq->prq_mac, prq->prq_base + offset)); } static uint32_t bwn_pio_rx_read_4(struct bwn_pio_rxqueue *prq, uint16_t offset) { return (BWN_READ_4(prq->prq_mac, prq->prq_base + offset)); } static void bwn_pio_rx_write_2(struct bwn_pio_rxqueue *prq, uint16_t offset, uint16_t value) { BWN_WRITE_2(prq->prq_mac, prq->prq_base + offset, value); } static void bwn_pio_rx_write_4(struct bwn_pio_rxqueue *prq, uint16_t offset, uint32_t value) { BWN_WRITE_4(prq->prq_mac, prq->prq_base + offset, value); } static int bwn_ieeerate2hwrate(struct bwn_softc *sc, int rate) { switch (rate) { /* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */ case 12: return (BWN_OFDM_RATE_6MB); case 18: return (BWN_OFDM_RATE_9MB); case 24: return (BWN_OFDM_RATE_12MB); case 36: return (BWN_OFDM_RATE_18MB); case 48: return (BWN_OFDM_RATE_24MB); case 72: return (BWN_OFDM_RATE_36MB); case 96: return (BWN_OFDM_RATE_48MB); case 108: return (BWN_OFDM_RATE_54MB); /* CCK rates (NB: not IEEE std, device-specific) */ case 2: return (BWN_CCK_RATE_1MB); case 4: return (BWN_CCK_RATE_2MB); case 11: return (BWN_CCK_RATE_5MB); case 22: return (BWN_CCK_RATE_11MB); } device_printf(sc->sc_dev, "unsupported rate %d\n", rate); return (BWN_CCK_RATE_1MB); } static uint16_t bwn_set_txhdr_phyctl1(struct bwn_mac *mac, uint8_t bitrate) { struct bwn_phy *phy = &mac->mac_phy; uint16_t control = 0; uint16_t bw; /* XXX TODO: this is for LP phy, what about N-PHY, etc? */ bw = BWN_TXH_PHY1_BW_20; if (BWN_ISCCKRATE(bitrate) && phy->type != BWN_PHYTYPE_LP) { control = bw; } else { control = bw; /* Figure out coding rate and modulation */ /* XXX TODO: table-ize, for MCS transmit */ /* Note: this is BWN_*_RATE values */ switch (bitrate) { case BWN_CCK_RATE_1MB: control |= 0; break; case BWN_CCK_RATE_2MB: control |= 1; break; case BWN_CCK_RATE_5MB: control |= 2; break; case BWN_CCK_RATE_11MB: control |= 3; break; case BWN_OFDM_RATE_6MB: control |= BWN_TXH_PHY1_CRATE_1_2; control |= BWN_TXH_PHY1_MODUL_BPSK; break; case BWN_OFDM_RATE_9MB: control |= BWN_TXH_PHY1_CRATE_3_4; control |= BWN_TXH_PHY1_MODUL_BPSK; break; case BWN_OFDM_RATE_12MB: control |= BWN_TXH_PHY1_CRATE_1_2; control |= BWN_TXH_PHY1_MODUL_QPSK; break; case BWN_OFDM_RATE_18MB: control |= BWN_TXH_PHY1_CRATE_3_4; control |= BWN_TXH_PHY1_MODUL_QPSK; break; case BWN_OFDM_RATE_24MB: control |= BWN_TXH_PHY1_CRATE_1_2; control |= BWN_TXH_PHY1_MODUL_QAM16; break; case BWN_OFDM_RATE_36MB: control |= BWN_TXH_PHY1_CRATE_3_4; control |= BWN_TXH_PHY1_MODUL_QAM16; break; case BWN_OFDM_RATE_48MB: control |= BWN_TXH_PHY1_CRATE_1_2; control |= BWN_TXH_PHY1_MODUL_QAM64; break; case BWN_OFDM_RATE_54MB: control |= BWN_TXH_PHY1_CRATE_3_4; control |= BWN_TXH_PHY1_MODUL_QAM64; break; default: break; } control |= BWN_TXH_PHY1_MODE_SISO; } return control; } static int bwn_set_txhdr(struct bwn_mac *mac, struct ieee80211_node *ni, struct mbuf *m, struct bwn_txhdr *txhdr, uint16_t cookie) { const struct bwn_phy *phy = &mac->mac_phy; struct bwn_softc *sc = mac->mac_sc; struct ieee80211_frame *wh; struct ieee80211_frame *protwh; const struct ieee80211_txparam *tp = ni->ni_txparms; struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = &sc->sc_ic; struct mbuf *mprot; uint8_t *prot_ptr; unsigned int len; uint32_t macctl = 0; int rts_rate, rts_rate_fb, ismcast, isshort, rix, type; uint16_t phyctl = 0; uint8_t rate, rate_fb; int fill_phy_ctl1 = 0; wh = mtod(m, struct ieee80211_frame *); memset(txhdr, 0, sizeof(*txhdr)); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); isshort = (ic->ic_flags & IEEE80211_F_SHPREAMBLE) != 0; if ((phy->type == BWN_PHYTYPE_N) || (phy->type == BWN_PHYTYPE_LP) || (phy->type == BWN_PHYTYPE_HT)) fill_phy_ctl1 = 1; /* * Find TX rate */ if (type != IEEE80211_FC0_TYPE_DATA || (m->m_flags & M_EAPOL)) rate = rate_fb = tp->mgmtrate; else if (ismcast) rate = rate_fb = tp->mcastrate; else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) rate = rate_fb = tp->ucastrate; else { rix = ieee80211_ratectl_rate(ni, NULL, 0); rate = ni->ni_txrate; if (rix > 0) rate_fb = ni->ni_rates.rs_rates[rix - 1] & IEEE80211_RATE_VAL; else rate_fb = rate; } sc->sc_tx_rate = rate; /* Note: this maps the select ieee80211 rate to hardware rate */ rate = bwn_ieeerate2hwrate(sc, rate); rate_fb = bwn_ieeerate2hwrate(sc, rate_fb); txhdr->phyrate = (BWN_ISOFDMRATE(rate)) ? bwn_plcp_getofdm(rate) : bwn_plcp_getcck(rate); bcopy(wh->i_fc, txhdr->macfc, sizeof(txhdr->macfc)); bcopy(wh->i_addr1, txhdr->addr1, IEEE80211_ADDR_LEN); /* XXX rate/rate_fb is the hardware rate */ if ((rate_fb == rate) || (*(u_int16_t *)wh->i_dur & htole16(0x8000)) || (*(u_int16_t *)wh->i_dur == htole16(0))) txhdr->dur_fb = *(u_int16_t *)wh->i_dur; else txhdr->dur_fb = ieee80211_compute_duration(ic->ic_rt, m->m_pkthdr.len, rate, isshort); /* XXX TX encryption */ switch (mac->mac_fw.fw_hdr_format) { case BWN_FW_HDR_351: bwn_plcp_genhdr((struct bwn_plcp4 *)(&txhdr->body.r351.plcp), m->m_pkthdr.len + IEEE80211_CRC_LEN, rate); break; case BWN_FW_HDR_410: bwn_plcp_genhdr((struct bwn_plcp4 *)(&txhdr->body.r410.plcp), m->m_pkthdr.len + IEEE80211_CRC_LEN, rate); break; case BWN_FW_HDR_598: bwn_plcp_genhdr((struct bwn_plcp4 *)(&txhdr->body.r598.plcp), m->m_pkthdr.len + IEEE80211_CRC_LEN, rate); break; } bwn_plcp_genhdr((struct bwn_plcp4 *)(&txhdr->plcp_fb), m->m_pkthdr.len + IEEE80211_CRC_LEN, rate_fb); txhdr->eftypes |= (BWN_ISOFDMRATE(rate_fb)) ? BWN_TX_EFT_FB_OFDM : BWN_TX_EFT_FB_CCK; txhdr->chan = phy->chan; phyctl |= (BWN_ISOFDMRATE(rate)) ? BWN_TX_PHY_ENC_OFDM : BWN_TX_PHY_ENC_CCK; /* XXX preamble? obey net80211 */ if (isshort && (rate == BWN_CCK_RATE_2MB || rate == BWN_CCK_RATE_5MB || rate == BWN_CCK_RATE_11MB)) phyctl |= BWN_TX_PHY_SHORTPRMBL; if (! phy->gmode) macctl |= BWN_TX_MAC_5GHZ; /* XXX TX antenna selection */ switch (bwn_antenna_sanitize(mac, 0)) { case 0: phyctl |= BWN_TX_PHY_ANT01AUTO; break; case 1: phyctl |= BWN_TX_PHY_ANT0; break; case 2: phyctl |= BWN_TX_PHY_ANT1; break; case 3: phyctl |= BWN_TX_PHY_ANT2; break; case 4: phyctl |= BWN_TX_PHY_ANT3; break; default: KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } if (!ismcast) macctl |= BWN_TX_MAC_ACK; macctl |= (BWN_TX_MAC_HWSEQ | BWN_TX_MAC_START_MSDU); if (!IEEE80211_IS_MULTICAST(wh->i_addr1) && m->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) macctl |= BWN_TX_MAC_LONGFRAME; if ((ic->ic_flags & IEEE80211_F_USEPROT) && ic->ic_protmode != IEEE80211_PROT_NONE) { /* Note: don't fall back to CCK rates for 5G */ if (phy->gmode) rts_rate = BWN_CCK_RATE_1MB; else rts_rate = BWN_OFDM_RATE_6MB; rts_rate_fb = bwn_get_fbrate(rts_rate); /* XXX 'rate' here is hardware rate now, not the net80211 rate */ mprot = ieee80211_alloc_prot(ni, m, rate, ic->ic_protmode); if (mprot == NULL) { if_inc_counter(vap->iv_ifp, IFCOUNTER_OERRORS, 1); device_printf(sc->sc_dev, "could not allocate mbuf for protection mode %d\n", ic->ic_protmode); return (ENOBUFS); } switch (mac->mac_fw.fw_hdr_format) { case BWN_FW_HDR_351: prot_ptr = txhdr->body.r351.rts_frame; break; case BWN_FW_HDR_410: prot_ptr = txhdr->body.r410.rts_frame; break; case BWN_FW_HDR_598: prot_ptr = txhdr->body.r598.rts_frame; break; } bcopy(mtod(mprot, uint8_t *), prot_ptr, mprot->m_pkthdr.len); m_freem(mprot); if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) { macctl |= BWN_TX_MAC_SEND_CTSTOSELF; len = sizeof(struct ieee80211_frame_cts); } else { macctl |= BWN_TX_MAC_SEND_RTSCTS; len = sizeof(struct ieee80211_frame_rts); } len += IEEE80211_CRC_LEN; switch (mac->mac_fw.fw_hdr_format) { case BWN_FW_HDR_351: bwn_plcp_genhdr((struct bwn_plcp4 *) &txhdr->body.r351.rts_plcp, len, rts_rate); break; case BWN_FW_HDR_410: bwn_plcp_genhdr((struct bwn_plcp4 *) &txhdr->body.r410.rts_plcp, len, rts_rate); break; case BWN_FW_HDR_598: bwn_plcp_genhdr((struct bwn_plcp4 *) &txhdr->body.r598.rts_plcp, len, rts_rate); break; } bwn_plcp_genhdr((struct bwn_plcp4 *)&txhdr->rts_plcp_fb, len, rts_rate_fb); switch (mac->mac_fw.fw_hdr_format) { case BWN_FW_HDR_351: protwh = (struct ieee80211_frame *) &txhdr->body.r351.rts_frame; break; case BWN_FW_HDR_410: protwh = (struct ieee80211_frame *) &txhdr->body.r410.rts_frame; break; case BWN_FW_HDR_598: protwh = (struct ieee80211_frame *) &txhdr->body.r598.rts_frame; break; } txhdr->rts_dur_fb = *(u_int16_t *)protwh->i_dur; if (BWN_ISOFDMRATE(rts_rate)) { txhdr->eftypes |= BWN_TX_EFT_RTS_OFDM; txhdr->phyrate_rts = bwn_plcp_getofdm(rts_rate); } else { txhdr->eftypes |= BWN_TX_EFT_RTS_CCK; txhdr->phyrate_rts = bwn_plcp_getcck(rts_rate); } txhdr->eftypes |= (BWN_ISOFDMRATE(rts_rate_fb)) ? BWN_TX_EFT_RTS_FBOFDM : BWN_TX_EFT_RTS_FBCCK; if (fill_phy_ctl1) { txhdr->phyctl_1rts = htole16(bwn_set_txhdr_phyctl1(mac, rts_rate)); txhdr->phyctl_1rtsfb = htole16(bwn_set_txhdr_phyctl1(mac, rts_rate_fb)); } } if (fill_phy_ctl1) { txhdr->phyctl_1 = htole16(bwn_set_txhdr_phyctl1(mac, rate)); txhdr->phyctl_1fb = htole16(bwn_set_txhdr_phyctl1(mac, rate_fb)); } switch (mac->mac_fw.fw_hdr_format) { case BWN_FW_HDR_351: txhdr->body.r351.cookie = htole16(cookie); break; case BWN_FW_HDR_410: txhdr->body.r410.cookie = htole16(cookie); break; case BWN_FW_HDR_598: txhdr->body.r598.cookie = htole16(cookie); break; } txhdr->macctl = htole32(macctl); txhdr->phyctl = htole16(phyctl); /* * TX radio tap */ if (ieee80211_radiotap_active_vap(vap)) { sc->sc_tx_th.wt_flags = 0; if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; if (isshort && (rate == BWN_CCK_RATE_2MB || rate == BWN_CCK_RATE_5MB || rate == BWN_CCK_RATE_11MB)) sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; sc->sc_tx_th.wt_rate = rate; ieee80211_radiotap_tx(vap, m); } return (0); } static void bwn_plcp_genhdr(struct bwn_plcp4 *plcp, const uint16_t octets, const uint8_t rate) { uint32_t d, plen; uint8_t *raw = plcp->o.raw; if (BWN_ISOFDMRATE(rate)) { d = bwn_plcp_getofdm(rate); KASSERT(!(octets & 0xf000), ("%s:%d: fail", __func__, __LINE__)); d |= (octets << 5); plcp->o.data = htole32(d); } else { plen = octets * 16 / rate; if ((octets * 16 % rate) > 0) { plen++; if ((rate == BWN_CCK_RATE_11MB) && ((octets * 8 % 11) < 4)) { raw[1] = 0x84; } else raw[1] = 0x04; } else raw[1] = 0x04; plcp->o.data |= htole32(plen << 16); raw[0] = bwn_plcp_getcck(rate); } } static uint8_t bwn_antenna_sanitize(struct bwn_mac *mac, uint8_t n) { struct bwn_softc *sc = mac->mac_sc; uint8_t mask; if (n == 0) return (0); if (mac->mac_phy.gmode) mask = sc->sc_ant2g; else mask = sc->sc_ant5g; if (!(mask & (1 << (n - 1)))) return (0); return (n); } /* * Return a fallback rate for the given rate. * * Note: Don't fall back from OFDM to CCK. */ static uint8_t bwn_get_fbrate(uint8_t bitrate) { switch (bitrate) { /* CCK */ case BWN_CCK_RATE_1MB: return (BWN_CCK_RATE_1MB); case BWN_CCK_RATE_2MB: return (BWN_CCK_RATE_1MB); case BWN_CCK_RATE_5MB: return (BWN_CCK_RATE_2MB); case BWN_CCK_RATE_11MB: return (BWN_CCK_RATE_5MB); /* OFDM */ case BWN_OFDM_RATE_6MB: return (BWN_OFDM_RATE_6MB); case BWN_OFDM_RATE_9MB: return (BWN_OFDM_RATE_6MB); case BWN_OFDM_RATE_12MB: return (BWN_OFDM_RATE_9MB); case BWN_OFDM_RATE_18MB: return (BWN_OFDM_RATE_12MB); case BWN_OFDM_RATE_24MB: return (BWN_OFDM_RATE_18MB); case BWN_OFDM_RATE_36MB: return (BWN_OFDM_RATE_24MB); case BWN_OFDM_RATE_48MB: return (BWN_OFDM_RATE_36MB); case BWN_OFDM_RATE_54MB: return (BWN_OFDM_RATE_48MB); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (0); } static uint32_t bwn_pio_write_multi_4(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, uint32_t ctl, const void *_data, int len) { struct bwn_softc *sc = mac->mac_sc; uint32_t value = 0; const uint8_t *data = _data; ctl |= BWN_PIO8_TXCTL_0_7 | BWN_PIO8_TXCTL_8_15 | BWN_PIO8_TXCTL_16_23 | BWN_PIO8_TXCTL_24_31; bwn_pio_write_4(mac, tq, BWN_PIO8_TXCTL, ctl); bus_write_multi_4(sc->sc_mem_res, tq->tq_base + BWN_PIO8_TXDATA, __DECONST(void *, data), (len & ~3)); if (len & 3) { ctl &= ~(BWN_PIO8_TXCTL_8_15 | BWN_PIO8_TXCTL_16_23 | BWN_PIO8_TXCTL_24_31); data = &(data[len - 1]); switch (len & 3) { case 3: ctl |= BWN_PIO8_TXCTL_16_23; value |= (uint32_t)(*data) << 16; data--; case 2: ctl |= BWN_PIO8_TXCTL_8_15; value |= (uint32_t)(*data) << 8; data--; case 1: value |= (uint32_t)(*data); } bwn_pio_write_4(mac, tq, BWN_PIO8_TXCTL, ctl); bwn_pio_write_4(mac, tq, BWN_PIO8_TXDATA, value); } return (ctl); } static void bwn_pio_write_4(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, uint16_t offset, uint32_t value) { BWN_WRITE_4(mac, tq->tq_base + offset, value); } static uint16_t bwn_pio_write_multi_2(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, uint16_t ctl, const void *_data, int len) { struct bwn_softc *sc = mac->mac_sc; const uint8_t *data = _data; ctl |= BWN_PIO_TXCTL_WRITELO | BWN_PIO_TXCTL_WRITEHI; BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl); bus_write_multi_2(sc->sc_mem_res, tq->tq_base + BWN_PIO_TXDATA, __DECONST(void *, data), (len & ~1)); if (len & 1) { ctl &= ~BWN_PIO_TXCTL_WRITEHI; BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl); BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXDATA, data[len - 1]); } return (ctl); } static uint16_t bwn_pio_write_mbuf_2(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, uint16_t ctl, struct mbuf *m0) { int i, j = 0; uint16_t data = 0; const uint8_t *buf; struct mbuf *m = m0; ctl |= BWN_PIO_TXCTL_WRITELO | BWN_PIO_TXCTL_WRITEHI; BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl); for (; m != NULL; m = m->m_next) { buf = mtod(m, const uint8_t *); for (i = 0; i < m->m_len; i++) { if (!((j++) % 2)) data |= buf[i]; else { data |= (buf[i] << 8); BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXDATA, data); data = 0; } } } if (m0->m_pkthdr.len % 2) { ctl &= ~BWN_PIO_TXCTL_WRITEHI; BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl); BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXDATA, data); } return (ctl); } static void bwn_set_slot_time(struct bwn_mac *mac, uint16_t time) { /* XXX should exit if 5GHz band .. */ if (mac->mac_phy.type != BWN_PHYTYPE_G) return; BWN_WRITE_2(mac, 0x684, 510 + time); /* Disabled in Linux b43, can adversely effect performance */ #if 0 bwn_shm_write_2(mac, BWN_SHARED, 0x0010, time); #endif } static struct bwn_dma_ring * bwn_dma_select(struct bwn_mac *mac, uint8_t prio) { if ((mac->mac_flags & BWN_MAC_FLAG_WME) == 0) return (mac->mac_method.dma.wme[WME_AC_BE]); switch (prio) { case 3: return (mac->mac_method.dma.wme[WME_AC_VO]); case 2: return (mac->mac_method.dma.wme[WME_AC_VI]); case 0: return (mac->mac_method.dma.wme[WME_AC_BE]); case 1: return (mac->mac_method.dma.wme[WME_AC_BK]); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (NULL); } static int bwn_dma_getslot(struct bwn_dma_ring *dr) { int slot; BWN_ASSERT_LOCKED(dr->dr_mac->mac_sc); KASSERT(dr->dr_tx, ("%s:%d: fail", __func__, __LINE__)); KASSERT(!(dr->dr_stop), ("%s:%d: fail", __func__, __LINE__)); KASSERT(bwn_dma_freeslot(dr) != 0, ("%s:%d: fail", __func__, __LINE__)); slot = bwn_dma_nextslot(dr, dr->dr_curslot); KASSERT(!(slot & ~0x0fff), ("%s:%d: fail", __func__, __LINE__)); dr->dr_curslot = slot; dr->dr_usedslot++; return (slot); } static struct bwn_pio_txqueue * bwn_pio_parse_cookie(struct bwn_mac *mac, uint16_t cookie, struct bwn_pio_txpkt **pack) { struct bwn_pio *pio = &mac->mac_method.pio; struct bwn_pio_txqueue *tq = NULL; unsigned int index; switch (cookie & 0xf000) { case 0x1000: tq = &pio->wme[WME_AC_BK]; break; case 0x2000: tq = &pio->wme[WME_AC_BE]; break; case 0x3000: tq = &pio->wme[WME_AC_VI]; break; case 0x4000: tq = &pio->wme[WME_AC_VO]; break; case 0x5000: tq = &pio->mcast; break; } KASSERT(tq != NULL, ("%s:%d: fail", __func__, __LINE__)); if (tq == NULL) return (NULL); index = (cookie & 0x0fff); KASSERT(index < N(tq->tq_pkts), ("%s:%d: fail", __func__, __LINE__)); if (index >= N(tq->tq_pkts)) return (NULL); *pack = &tq->tq_pkts[index]; KASSERT(*pack != NULL, ("%s:%d: fail", __func__, __LINE__)); return (tq); } static void bwn_txpwr(void *arg, int npending) { struct bwn_mac *mac = arg; struct bwn_softc *sc; if (mac == NULL) return; sc = mac->mac_sc; BWN_LOCK(sc); if (mac->mac_status >= BWN_MAC_STATUS_STARTED && mac->mac_phy.set_txpwr != NULL) mac->mac_phy.set_txpwr(mac); BWN_UNLOCK(sc); } static void bwn_task_15s(struct bwn_mac *mac) { uint16_t reg; if (mac->mac_fw.opensource) { reg = bwn_shm_read_2(mac, BWN_SCRATCH, BWN_WATCHDOG_REG); if (reg) { bwn_restart(mac, "fw watchdog"); return; } bwn_shm_write_2(mac, BWN_SCRATCH, BWN_WATCHDOG_REG, 1); } if (mac->mac_phy.task_15s) mac->mac_phy.task_15s(mac); mac->mac_phy.txerrors = BWN_TXERROR_MAX; } static void bwn_task_30s(struct bwn_mac *mac) { if (mac->mac_phy.type != BWN_PHYTYPE_G || mac->mac_noise.noi_running) return; mac->mac_noise.noi_running = 1; mac->mac_noise.noi_nsamples = 0; bwn_noise_gensample(mac); } static void bwn_task_60s(struct bwn_mac *mac) { if (mac->mac_phy.task_60s) mac->mac_phy.task_60s(mac); bwn_phy_txpower_check(mac, BWN_TXPWR_IGNORE_TIME); } static void bwn_tasks(void *arg) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; BWN_ASSERT_LOCKED(sc); if (mac->mac_status != BWN_MAC_STATUS_STARTED) return; if (mac->mac_task_state % 4 == 0) bwn_task_60s(mac); if (mac->mac_task_state % 2 == 0) bwn_task_30s(mac); bwn_task_15s(mac); mac->mac_task_state++; callout_reset(&sc->sc_task_ch, hz * 15, bwn_tasks, mac); } static int bwn_plcp_get_ofdmrate(struct bwn_mac *mac, struct bwn_plcp6 *plcp, uint8_t a) { struct bwn_softc *sc = mac->mac_sc; KASSERT(a == 0, ("not support APHY\n")); switch (plcp->o.raw[0] & 0xf) { case 0xb: return (BWN_OFDM_RATE_6MB); case 0xf: return (BWN_OFDM_RATE_9MB); case 0xa: return (BWN_OFDM_RATE_12MB); case 0xe: return (BWN_OFDM_RATE_18MB); case 0x9: return (BWN_OFDM_RATE_24MB); case 0xd: return (BWN_OFDM_RATE_36MB); case 0x8: return (BWN_OFDM_RATE_48MB); case 0xc: return (BWN_OFDM_RATE_54MB); } device_printf(sc->sc_dev, "incorrect OFDM rate %d\n", plcp->o.raw[0] & 0xf); return (-1); } static int bwn_plcp_get_cckrate(struct bwn_mac *mac, struct bwn_plcp6 *plcp) { struct bwn_softc *sc = mac->mac_sc; switch (plcp->o.raw[0]) { case 0x0a: return (BWN_CCK_RATE_1MB); case 0x14: return (BWN_CCK_RATE_2MB); case 0x37: return (BWN_CCK_RATE_5MB); case 0x6e: return (BWN_CCK_RATE_11MB); } device_printf(sc->sc_dev, "incorrect CCK rate %d\n", plcp->o.raw[0]); return (-1); } static void bwn_rx_radiotap(struct bwn_mac *mac, struct mbuf *m, const struct bwn_rxhdr4 *rxhdr, struct bwn_plcp6 *plcp, int rate, int rssi, int noise) { struct bwn_softc *sc = mac->mac_sc; const struct ieee80211_frame_min *wh; uint64_t tsf; uint16_t low_mactime_now; uint16_t mt; if (htole16(rxhdr->phy_status0) & BWN_RX_PHYST0_SHORTPRMBL) sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; wh = mtod(m, const struct ieee80211_frame_min *); if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_WEP; bwn_tsf_read(mac, &tsf); low_mactime_now = tsf; tsf = tsf & ~0xffffULL; switch (mac->mac_fw.fw_hdr_format) { case BWN_FW_HDR_351: case BWN_FW_HDR_410: mt = le16toh(rxhdr->ps4.r351.mac_time); break; case BWN_FW_HDR_598: mt = le16toh(rxhdr->ps4.r598.mac_time); break; } tsf += mt; if (low_mactime_now < mt) tsf -= 0x10000; sc->sc_rx_th.wr_tsf = tsf; sc->sc_rx_th.wr_rate = rate; sc->sc_rx_th.wr_antsignal = rssi; sc->sc_rx_th.wr_antnoise = noise; } static void bwn_tsf_read(struct bwn_mac *mac, uint64_t *tsf) { uint32_t low, high; KASSERT(bhnd_get_hwrev(mac->mac_sc->sc_dev) >= 3, ("%s:%d: fail", __func__, __LINE__)); low = BWN_READ_4(mac, BWN_REV3PLUS_TSF_LOW); high = BWN_READ_4(mac, BWN_REV3PLUS_TSF_HIGH); *tsf = high; *tsf <<= 32; *tsf |= low; } static int bwn_dma_attach(struct bwn_mac *mac) { struct bwn_dma *dma; struct bwn_softc *sc; struct bhnd_dma_translation *dt, dma_translation; bhnd_addr_t addrext_req; bus_dma_tag_t dmat; bus_addr_t lowaddr; u_int addrext_shift, addr_width; int error; dma = &mac->mac_method.dma; sc = mac->mac_sc; dt = NULL; if (sc->sc_quirks & BWN_QUIRK_NODMA) return (0); KASSERT(bhnd_get_hwrev(sc->sc_dev) >= 5, ("%s: fail", __func__)); /* Use the DMA engine's maximum host address width to determine the * addrext constraints, and supported device address width. */ switch (mac->mac_dmatype) { case BHND_DMA_ADDR_30BIT: /* 32-bit engine without addrext support */ addrext_req = 0x0; addrext_shift = 0; /* We can address the full 32-bit device address space */ addr_width = BHND_DMA_ADDR_32BIT; break; case BHND_DMA_ADDR_32BIT: /* 32-bit engine with addrext support */ addrext_req = BWN_DMA32_ADDREXT_MASK; addrext_shift = BWN_DMA32_ADDREXT_SHIFT; addr_width = BHND_DMA_ADDR_32BIT; break; case BHND_DMA_ADDR_64BIT: /* 64-bit engine with addrext support */ addrext_req = BWN_DMA64_ADDREXT_MASK; addrext_shift = BWN_DMA64_ADDREXT_SHIFT; addr_width = BHND_DMA_ADDR_64BIT; break; default: device_printf(sc->sc_dev, "unsupported DMA address width: %d\n", mac->mac_dmatype); return (ENXIO); } /* Fetch our device->host DMA translation and tag */ error = bhnd_get_dma_translation(sc->sc_dev, addr_width, 0, &dmat, &dma_translation); if (error) { device_printf(sc->sc_dev, "error fetching DMA translation: " "%d\n", error); return (error); } /* Verify that our DMA engine's addrext constraints are compatible with * our DMA translation */ if (addrext_req != 0x0 && (dma_translation.addrext_mask & addrext_req) != addrext_req) { device_printf(sc->sc_dev, "bus addrext mask %#jx incompatible " "with device addrext mask %#jx, disabling extended address " "support\n", (uintmax_t)dma_translation.addrext_mask, (uintmax_t)addrext_req); addrext_req = 0x0; addrext_shift = 0; } /* Apply our addrext translation constraint */ dma_translation.addrext_mask = addrext_req; /* Initialize our DMA engine configuration */ mac->mac_flags |= BWN_MAC_FLAG_DMA; dma->addrext_shift = addrext_shift; dma->translation = dma_translation; dt = &dma->translation; /* Dermine our translation's maximum supported address */ lowaddr = MIN((dt->addr_mask | dt->addrext_mask), BUS_SPACE_MAXADDR); /* * Create top level DMA tag */ error = bus_dma_tag_create(dmat, /* parent */ BWN_ALIGN, 0, /* alignment, bounds */ lowaddr, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE, /* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &dma->parent_dtag); if (error) { device_printf(sc->sc_dev, "can't create parent DMA tag\n"); return (error); } /* * Create TX/RX mbuf DMA tag */ error = bus_dma_tag_create(dma->parent_dtag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &dma->rxbuf_dtag); if (error) { device_printf(sc->sc_dev, "can't create mbuf DMA tag\n"); goto fail0; } error = bus_dma_tag_create(dma->parent_dtag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &dma->txbuf_dtag); if (error) { device_printf(sc->sc_dev, "can't create mbuf DMA tag\n"); goto fail1; } dma->wme[WME_AC_BK] = bwn_dma_ringsetup(mac, 0, 1); if (!dma->wme[WME_AC_BK]) goto fail2; dma->wme[WME_AC_BE] = bwn_dma_ringsetup(mac, 1, 1); if (!dma->wme[WME_AC_BE]) goto fail3; dma->wme[WME_AC_VI] = bwn_dma_ringsetup(mac, 2, 1); if (!dma->wme[WME_AC_VI]) goto fail4; dma->wme[WME_AC_VO] = bwn_dma_ringsetup(mac, 3, 1); if (!dma->wme[WME_AC_VO]) goto fail5; dma->mcast = bwn_dma_ringsetup(mac, 4, 1); if (!dma->mcast) goto fail6; dma->rx = bwn_dma_ringsetup(mac, 0, 0); if (!dma->rx) goto fail7; return (error); fail7: bwn_dma_ringfree(&dma->mcast); fail6: bwn_dma_ringfree(&dma->wme[WME_AC_VO]); fail5: bwn_dma_ringfree(&dma->wme[WME_AC_VI]); fail4: bwn_dma_ringfree(&dma->wme[WME_AC_BE]); fail3: bwn_dma_ringfree(&dma->wme[WME_AC_BK]); fail2: bus_dma_tag_destroy(dma->txbuf_dtag); fail1: bus_dma_tag_destroy(dma->rxbuf_dtag); fail0: bus_dma_tag_destroy(dma->parent_dtag); return (error); } static struct bwn_dma_ring * bwn_dma_parse_cookie(struct bwn_mac *mac, const struct bwn_txstatus *status, uint16_t cookie, int *slot) { struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_dma_ring *dr; struct bwn_softc *sc = mac->mac_sc; BWN_ASSERT_LOCKED(mac->mac_sc); switch (cookie & 0xf000) { case 0x1000: dr = dma->wme[WME_AC_BK]; break; case 0x2000: dr = dma->wme[WME_AC_BE]; break; case 0x3000: dr = dma->wme[WME_AC_VI]; break; case 0x4000: dr = dma->wme[WME_AC_VO]; break; case 0x5000: dr = dma->mcast; break; default: dr = NULL; KASSERT(0 == 1, ("invalid cookie value %d", cookie & 0xf000)); } *slot = (cookie & 0x0fff); if (*slot < 0 || *slot >= dr->dr_numslots) { /* * XXX FIXME: sometimes H/W returns TX DONE events duplicately * that it occurs events which have same H/W sequence numbers. * When it's occurred just prints a WARNING msgs and ignores. */ KASSERT(status->seq == dma->lastseq, ("%s:%d: fail", __func__, __LINE__)); device_printf(sc->sc_dev, "out of slot ranges (0 < %d < %d)\n", *slot, dr->dr_numslots); return (NULL); } dma->lastseq = status->seq; return (dr); } static void bwn_dma_stop(struct bwn_mac *mac) { struct bwn_dma *dma; if ((mac->mac_flags & BWN_MAC_FLAG_DMA) == 0) return; dma = &mac->mac_method.dma; bwn_dma_ringstop(&dma->rx); bwn_dma_ringstop(&dma->wme[WME_AC_BK]); bwn_dma_ringstop(&dma->wme[WME_AC_BE]); bwn_dma_ringstop(&dma->wme[WME_AC_VI]); bwn_dma_ringstop(&dma->wme[WME_AC_VO]); bwn_dma_ringstop(&dma->mcast); } static void bwn_dma_ringstop(struct bwn_dma_ring **dr) { if (dr == NULL) return; bwn_dma_cleanup(*dr); } static void bwn_pio_stop(struct bwn_mac *mac) { struct bwn_pio *pio; if (mac->mac_flags & BWN_MAC_FLAG_DMA) return; pio = &mac->mac_method.pio; bwn_destroy_queue_tx(&pio->mcast); bwn_destroy_queue_tx(&pio->wme[WME_AC_VO]); bwn_destroy_queue_tx(&pio->wme[WME_AC_VI]); bwn_destroy_queue_tx(&pio->wme[WME_AC_BE]); bwn_destroy_queue_tx(&pio->wme[WME_AC_BK]); } static int bwn_led_attach(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; const uint8_t *led_act = NULL; int error; int i; sc->sc_led_idle = (2350 * hz) / 1000; sc->sc_led_blink = 1; for (i = 0; i < N(bwn_vendor_led_act); ++i) { if (sc->sc_board_info.board_vendor == bwn_vendor_led_act[i].vid) { led_act = bwn_vendor_led_act[i].led_act; break; } } if (led_act == NULL) led_act = bwn_default_led_act; _Static_assert(nitems(bwn_led_vars) == BWN_LED_MAX, "invalid NVRAM variable name array"); for (i = 0; i < BWN_LED_MAX; ++i) { struct bwn_led *led; uint8_t val; led = &sc->sc_leds[i]; KASSERT(i < nitems(bwn_led_vars), ("unknown LED index")); error = bhnd_nvram_getvar_uint8(sc->sc_dev, bwn_led_vars[i], &val); if (error) { if (error != ENOENT) { device_printf(sc->sc_dev, "NVRAM variable %s " "unreadable: %d", bwn_led_vars[i], error); return (error); } /* Not found; use default */ led->led_act = led_act[i]; } else { if (val & BWN_LED_ACT_LOW) led->led_flags |= BWN_LED_F_ACTLOW; led->led_act = val & BWN_LED_ACT_MASK; } led->led_mask = (1 << i); if (led->led_act == BWN_LED_ACT_BLINK_SLOW || led->led_act == BWN_LED_ACT_BLINK_POLL || led->led_act == BWN_LED_ACT_BLINK) { led->led_flags |= BWN_LED_F_BLINK; if (led->led_act == BWN_LED_ACT_BLINK_POLL) led->led_flags |= BWN_LED_F_POLLABLE; else if (led->led_act == BWN_LED_ACT_BLINK_SLOW) led->led_flags |= BWN_LED_F_SLOW; if (sc->sc_blink_led == NULL) { sc->sc_blink_led = led; if (led->led_flags & BWN_LED_F_SLOW) BWN_LED_SLOWDOWN(sc->sc_led_idle); } } DPRINTF(sc, BWN_DEBUG_LED, "%dth led, act %d, lowact %d\n", i, led->led_act, led->led_flags & BWN_LED_F_ACTLOW); } callout_init_mtx(&sc->sc_led_blink_ch, &sc->sc_mtx, 0); return (0); } static __inline uint16_t bwn_led_onoff(const struct bwn_led *led, uint16_t val, int on) { if (led->led_flags & BWN_LED_F_ACTLOW) on = !on; if (on) val |= led->led_mask; else val &= ~led->led_mask; return val; } static void bwn_led_newstate(struct bwn_mac *mac, enum ieee80211_state nstate) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint16_t val; int i; if (nstate == IEEE80211_S_INIT) { callout_stop(&sc->sc_led_blink_ch); sc->sc_led_blinking = 0; } if ((sc->sc_flags & BWN_FLAG_RUNNING) == 0) return; val = BWN_READ_2(mac, BWN_GPIO_CONTROL); for (i = 0; i < BWN_LED_MAX; ++i) { struct bwn_led *led = &sc->sc_leds[i]; int on; if (led->led_act == BWN_LED_ACT_UNKN || led->led_act == BWN_LED_ACT_NULL) continue; if ((led->led_flags & BWN_LED_F_BLINK) && nstate != IEEE80211_S_INIT) continue; switch (led->led_act) { case BWN_LED_ACT_ON: /* Always on */ on = 1; break; case BWN_LED_ACT_OFF: /* Always off */ case BWN_LED_ACT_5GHZ: /* TODO: 11A */ on = 0; break; default: on = 1; switch (nstate) { case IEEE80211_S_INIT: on = 0; break; case IEEE80211_S_RUN: if (led->led_act == BWN_LED_ACT_11G && ic->ic_curmode != IEEE80211_MODE_11G) on = 0; break; default: if (led->led_act == BWN_LED_ACT_ASSOC) on = 0; break; } break; } val = bwn_led_onoff(led, val, on); } BWN_WRITE_2(mac, BWN_GPIO_CONTROL, val); } static void bwn_led_event(struct bwn_mac *mac, int event) { struct bwn_softc *sc = mac->mac_sc; struct bwn_led *led = sc->sc_blink_led; int rate; if (event == BWN_LED_EVENT_POLL) { if ((led->led_flags & BWN_LED_F_POLLABLE) == 0) return; if (ticks - sc->sc_led_ticks < sc->sc_led_idle) return; } sc->sc_led_ticks = ticks; if (sc->sc_led_blinking) return; switch (event) { case BWN_LED_EVENT_RX: rate = sc->sc_rx_rate; break; case BWN_LED_EVENT_TX: rate = sc->sc_tx_rate; break; case BWN_LED_EVENT_POLL: rate = 0; break; default: panic("unknown LED event %d\n", event); break; } bwn_led_blink_start(mac, bwn_led_duration[rate].on_dur, bwn_led_duration[rate].off_dur); } static void bwn_led_blink_start(struct bwn_mac *mac, int on_dur, int off_dur) { struct bwn_softc *sc = mac->mac_sc; struct bwn_led *led = sc->sc_blink_led; uint16_t val; val = BWN_READ_2(mac, BWN_GPIO_CONTROL); val = bwn_led_onoff(led, val, 1); BWN_WRITE_2(mac, BWN_GPIO_CONTROL, val); if (led->led_flags & BWN_LED_F_SLOW) { BWN_LED_SLOWDOWN(on_dur); BWN_LED_SLOWDOWN(off_dur); } sc->sc_led_blinking = 1; sc->sc_led_blink_offdur = off_dur; callout_reset(&sc->sc_led_blink_ch, on_dur, bwn_led_blink_next, mac); } static void bwn_led_blink_next(void *arg) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; uint16_t val; val = BWN_READ_2(mac, BWN_GPIO_CONTROL); val = bwn_led_onoff(sc->sc_blink_led, val, 0); BWN_WRITE_2(mac, BWN_GPIO_CONTROL, val); callout_reset(&sc->sc_led_blink_ch, sc->sc_led_blink_offdur, bwn_led_blink_end, mac); } static void bwn_led_blink_end(void *arg) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; sc->sc_led_blinking = 0; } static int bwn_suspend(device_t dev) { struct bwn_softc *sc = device_get_softc(dev); BWN_LOCK(sc); bwn_stop(sc); BWN_UNLOCK(sc); return (0); } static int bwn_resume(device_t dev) { struct bwn_softc *sc = device_get_softc(dev); int error = EDOOFUS; BWN_LOCK(sc); if (sc->sc_ic.ic_nrunning > 0) error = bwn_init(sc); BWN_UNLOCK(sc); if (error == 0) ieee80211_start_all(&sc->sc_ic); return (0); } static void bwn_rfswitch(void *arg) { struct bwn_softc *sc = arg; struct bwn_mac *mac = sc->sc_curmac; int cur = 0, prev = 0; KASSERT(mac->mac_status >= BWN_MAC_STATUS_STARTED, ("%s: invalid MAC status %d", __func__, mac->mac_status)); if (mac->mac_phy.rev >= 3 || mac->mac_phy.type == BWN_PHYTYPE_LP || mac->mac_phy.type == BWN_PHYTYPE_N) { if (!(BWN_READ_4(mac, BWN_RF_HWENABLED_HI) & BWN_RF_HWENABLED_HI_MASK)) cur = 1; } else { if (BWN_READ_2(mac, BWN_RF_HWENABLED_LO) & BWN_RF_HWENABLED_LO_MASK) cur = 1; } if (mac->mac_flags & BWN_MAC_FLAG_RADIO_ON) prev = 1; DPRINTF(sc, BWN_DEBUG_RESET, "%s: called; cur=%d, prev=%d\n", __func__, cur, prev); if (cur != prev) { if (cur) mac->mac_flags |= BWN_MAC_FLAG_RADIO_ON; else mac->mac_flags &= ~BWN_MAC_FLAG_RADIO_ON; device_printf(sc->sc_dev, "status of RF switch is changed to %s\n", cur ? "ON" : "OFF"); if (cur != mac->mac_phy.rf_on) { if (cur) bwn_rf_turnon(mac); else bwn_rf_turnoff(mac); } } callout_schedule(&sc->sc_rfswitch_ch, hz); } static void bwn_sysctl_node(struct bwn_softc *sc) { device_t dev = sc->sc_dev; struct bwn_mac *mac; struct bwn_stats *stats; /* XXX assume that count of MAC is only 1. */ if ((mac = sc->sc_curmac) == NULL) return; stats = &mac->mac_stats; SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "linknoise", CTLFLAG_RW, &stats->rts, 0, "Noise level"); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "rts", CTLFLAG_RW, &stats->rts, 0, "RTS"); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "rtsfail", CTLFLAG_RW, &stats->rtsfail, 0, "RTS failed to send"); #ifdef BWN_DEBUG SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", CTLFLAG_RW, &sc->sc_debug, 0, "Debug flags"); #endif } static device_method_t bwn_methods[] = { /* Device interface */ DEVMETHOD(device_probe, bwn_probe), DEVMETHOD(device_attach, bwn_attach), DEVMETHOD(device_detach, bwn_detach), DEVMETHOD(device_suspend, bwn_suspend), DEVMETHOD(device_resume, bwn_resume), DEVMETHOD_END }; static driver_t bwn_driver = { "bwn", bwn_methods, sizeof(struct bwn_softc) }; static devclass_t bwn_devclass; DRIVER_MODULE(bwn, bhnd, bwn_driver, bwn_devclass, 0, 0); MODULE_DEPEND(bwn, bhnd, 1, 1, 1); MODULE_DEPEND(bwn, gpiobus, 1, 1, 1); MODULE_DEPEND(bwn, wlan, 1, 1, 1); /* 802.11 media layer */ MODULE_DEPEND(bwn, firmware, 1, 1, 1); /* firmware support */ MODULE_DEPEND(bwn, wlan_amrr, 1, 1, 1); MODULE_VERSION(bwn, 1); diff --git a/sys/dev/bxe/bxe.c b/sys/dev/bxe/bxe.c index a1231ac40bd8..7dc0154ecda4 100644 --- a/sys/dev/bxe/bxe.c +++ b/sys/dev/bxe/bxe.c @@ -1,19577 +1,19577 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #define BXE_DRIVER_VERSION "1.78.91" #include "bxe.h" #include "ecore_sp.h" #include "ecore_init.h" #include "ecore_init_ops.h" #include "57710_int_offsets.h" #include "57711_int_offsets.h" #include "57712_int_offsets.h" /* * CTLTYPE_U64 and sysctl_handle_64 were added in r217616. Define these * explicitly here for older kernels that don't include this changeset. */ #ifndef CTLTYPE_U64 #define CTLTYPE_U64 CTLTYPE_QUAD #define sysctl_handle_64 sysctl_handle_quad #endif /* * CSUM_TCP_IPV6 and CSUM_UDP_IPV6 were added in r236170. Define these * here as zero(0) for older kernels that don't include this changeset * thereby masking the functionality. */ #ifndef CSUM_TCP_IPV6 #define CSUM_TCP_IPV6 0 #define CSUM_UDP_IPV6 0 #endif /* * pci_find_cap was added in r219865. Re-define this at pci_find_extcap * for older kernels that don't include this changeset. */ #if __FreeBSD_version < 900035 #define pci_find_cap pci_find_extcap #endif #define BXE_DEF_SB_ATT_IDX 0x0001 #define BXE_DEF_SB_IDX 0x0002 /* * FLR Support - bxe_pf_flr_clnup() is called during nic_load in the per * function HW initialization. */ #define FLR_WAIT_USEC 10000 /* 10 msecs */ #define FLR_WAIT_INTERVAL 50 /* usecs */ #define FLR_POLL_CNT (FLR_WAIT_USEC / FLR_WAIT_INTERVAL) /* 200 */ struct pbf_pN_buf_regs { int pN; uint32_t init_crd; uint32_t crd; uint32_t crd_freed; }; struct pbf_pN_cmd_regs { int pN; uint32_t lines_occup; uint32_t lines_freed; }; /* * PCI Device ID Table used by bxe_probe(). */ #define BXE_DEVDESC_MAX 64 static struct bxe_device_type bxe_devs[] = { { BRCM_VENDORID, CHIP_NUM_57710, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57710 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57711, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57711 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57711E, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57711E 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57712, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57712 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57712_MF, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57712 MF 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57800, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57800 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57800_MF, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57800 MF 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57810, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57810 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57810_MF, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57810 MF 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57811, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57811 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57811_MF, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57811 MF 10GbE" }, { BRCM_VENDORID, CHIP_NUM_57840_4_10, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57840 4x10GbE" }, { QLOGIC_VENDORID, CHIP_NUM_57840_4_10, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57840 4x10GbE" }, { BRCM_VENDORID, CHIP_NUM_57840_2_20, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57840 2x20GbE" }, { BRCM_VENDORID, CHIP_NUM_57840_MF, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57840 MF 10GbE" }, { 0, 0, 0, 0, NULL } }; MALLOC_DECLARE(M_BXE_ILT); MALLOC_DEFINE(M_BXE_ILT, "bxe_ilt", "bxe ILT pointer"); /* * FreeBSD device entry points. */ static int bxe_probe(device_t); static int bxe_attach(device_t); static int bxe_detach(device_t); static int bxe_shutdown(device_t); /* * FreeBSD KLD module/device interface event handler method. */ static device_method_t bxe_methods[] = { /* Device interface (device_if.h) */ DEVMETHOD(device_probe, bxe_probe), DEVMETHOD(device_attach, bxe_attach), DEVMETHOD(device_detach, bxe_detach), DEVMETHOD(device_shutdown, bxe_shutdown), /* Bus interface (bus_if.h) */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), KOBJMETHOD_END }; /* * FreeBSD KLD Module data declaration */ static driver_t bxe_driver = { "bxe", /* module name */ bxe_methods, /* event handler */ sizeof(struct bxe_softc) /* extra data */ }; /* * FreeBSD dev class is needed to manage dev instances and * to associate with a bus type */ static devclass_t bxe_devclass; MODULE_DEPEND(bxe, pci, 1, 1, 1); MODULE_DEPEND(bxe, ether, 1, 1, 1); DRIVER_MODULE(bxe, pci, bxe_driver, bxe_devclass, 0, 0); DEBUGNET_DEFINE(bxe); /* resources needed for unloading a previously loaded device */ #define BXE_PREV_WAIT_NEEDED 1 struct mtx bxe_prev_mtx; MTX_SYSINIT(bxe_prev_mtx, &bxe_prev_mtx, "bxe_prev_lock", MTX_DEF); struct bxe_prev_list_node { LIST_ENTRY(bxe_prev_list_node) node; uint8_t bus; uint8_t slot; uint8_t path; uint8_t aer; /* XXX automatic error recovery */ uint8_t undi; }; static LIST_HEAD(, bxe_prev_list_node) bxe_prev_list = LIST_HEAD_INITIALIZER(bxe_prev_list); static int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ /* Tunable device values... */ SYSCTL_NODE(_hw, OID_AUTO, bxe, CTLFLAG_RD, 0, "bxe driver parameters"); /* Debug */ unsigned long bxe_debug = 0; SYSCTL_ULONG(_hw_bxe, OID_AUTO, debug, CTLFLAG_RDTUN, &bxe_debug, 0, "Debug logging mode"); /* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */ static int bxe_interrupt_mode = INTR_MODE_MSIX; SYSCTL_INT(_hw_bxe, OID_AUTO, interrupt_mode, CTLFLAG_RDTUN, &bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode"); /* Number of Queues: 0 (Auto) or 1 to 16 (fixed queue number) */ static int bxe_queue_count = 4; SYSCTL_INT(_hw_bxe, OID_AUTO, queue_count, CTLFLAG_RDTUN, &bxe_queue_count, 0, "Multi-Queue queue count"); /* max number of buffers per queue (default RX_BD_USABLE) */ static int bxe_max_rx_bufs = 0; SYSCTL_INT(_hw_bxe, OID_AUTO, max_rx_bufs, CTLFLAG_RDTUN, &bxe_max_rx_bufs, 0, "Maximum Number of Rx Buffers Per Queue"); /* Host interrupt coalescing RX tick timer (usecs) */ static int bxe_hc_rx_ticks = 25; SYSCTL_INT(_hw_bxe, OID_AUTO, hc_rx_ticks, CTLFLAG_RDTUN, &bxe_hc_rx_ticks, 0, "Host Coalescing Rx ticks"); /* Host interrupt coalescing TX tick timer (usecs) */ static int bxe_hc_tx_ticks = 50; SYSCTL_INT(_hw_bxe, OID_AUTO, hc_tx_ticks, CTLFLAG_RDTUN, &bxe_hc_tx_ticks, 0, "Host Coalescing Tx ticks"); /* Maximum number of Rx packets to process at a time */ static int bxe_rx_budget = 0xffffffff; SYSCTL_INT(_hw_bxe, OID_AUTO, rx_budget, CTLFLAG_TUN, &bxe_rx_budget, 0, "Rx processing budget"); /* Maximum LRO aggregation size */ static int bxe_max_aggregation_size = 0; SYSCTL_INT(_hw_bxe, OID_AUTO, max_aggregation_size, CTLFLAG_TUN, &bxe_max_aggregation_size, 0, "max aggregation size"); /* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */ static int bxe_mrrs = -1; SYSCTL_INT(_hw_bxe, OID_AUTO, mrrs, CTLFLAG_RDTUN, &bxe_mrrs, 0, "PCIe maximum read request size"); /* AutoGrEEEn: 0 (hardware default), 1 (force on), 2 (force off) */ static int bxe_autogreeen = 0; SYSCTL_INT(_hw_bxe, OID_AUTO, autogreeen, CTLFLAG_RDTUN, &bxe_autogreeen, 0, "AutoGrEEEn support"); /* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */ static int bxe_udp_rss = 0; SYSCTL_INT(_hw_bxe, OID_AUTO, udp_rss, CTLFLAG_RDTUN, &bxe_udp_rss, 0, "UDP RSS support"); #define STAT_NAME_LEN 32 /* no stat names below can be longer than this */ #define STATS_OFFSET32(stat_name) \ (offsetof(struct bxe_eth_stats, stat_name) / 4) #define Q_STATS_OFFSET32(stat_name) \ (offsetof(struct bxe_eth_q_stats, stat_name) / 4) static const struct { uint32_t offset; uint32_t size; uint32_t flags; #define STATS_FLAGS_PORT 1 #define STATS_FLAGS_FUNC 2 /* MF only cares about function stats */ #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT) char string[STAT_NAME_LEN]; } bxe_eth_stats_arr[] = { { STATS_OFFSET32(total_bytes_received_hi), 8, STATS_FLAGS_BOTH, "rx_bytes" }, { STATS_OFFSET32(error_bytes_received_hi), 8, STATS_FLAGS_BOTH, "rx_error_bytes" }, { STATS_OFFSET32(total_unicast_packets_received_hi), 8, STATS_FLAGS_BOTH, "rx_ucast_packets" }, { STATS_OFFSET32(total_multicast_packets_received_hi), 8, STATS_FLAGS_BOTH, "rx_mcast_packets" }, { STATS_OFFSET32(total_broadcast_packets_received_hi), 8, STATS_FLAGS_BOTH, "rx_bcast_packets" }, { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi), 8, STATS_FLAGS_PORT, "rx_crc_errors" }, { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi), 8, STATS_FLAGS_PORT, "rx_align_errors" }, { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi), 8, STATS_FLAGS_PORT, "rx_undersize_packets" }, { STATS_OFFSET32(etherstatsoverrsizepkts_hi), 8, STATS_FLAGS_PORT, "rx_oversize_packets" }, { STATS_OFFSET32(rx_stat_etherstatsfragments_hi), 8, STATS_FLAGS_PORT, "rx_fragments" }, { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), 8, STATS_FLAGS_PORT, "rx_jabbers" }, { STATS_OFFSET32(no_buff_discard_hi), 8, STATS_FLAGS_BOTH, "rx_discards" }, { STATS_OFFSET32(mac_filter_discard), 4, STATS_FLAGS_PORT, "rx_filtered_packets" }, { STATS_OFFSET32(mf_tag_discard), 4, STATS_FLAGS_PORT, "rx_mf_tag_discard" }, { STATS_OFFSET32(pfc_frames_received_hi), 8, STATS_FLAGS_PORT, "pfc_frames_received" }, { STATS_OFFSET32(pfc_frames_sent_hi), 8, STATS_FLAGS_PORT, "pfc_frames_sent" }, { STATS_OFFSET32(brb_drop_hi), 8, STATS_FLAGS_PORT, "rx_brb_discard" }, { STATS_OFFSET32(brb_truncate_hi), 8, STATS_FLAGS_PORT, "rx_brb_truncate" }, { STATS_OFFSET32(pause_frames_received_hi), 8, STATS_FLAGS_PORT, "rx_pause_frames" }, { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi), 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" }, { STATS_OFFSET32(nig_timer_max), 4, STATS_FLAGS_PORT, "rx_constant_pause_events" }, { STATS_OFFSET32(total_bytes_transmitted_hi), 8, STATS_FLAGS_BOTH, "tx_bytes" }, { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), 8, STATS_FLAGS_PORT, "tx_error_bytes" }, { STATS_OFFSET32(total_unicast_packets_transmitted_hi), 8, STATS_FLAGS_BOTH, "tx_ucast_packets" }, { STATS_OFFSET32(total_multicast_packets_transmitted_hi), 8, STATS_FLAGS_BOTH, "tx_mcast_packets" }, { STATS_OFFSET32(total_broadcast_packets_transmitted_hi), 8, STATS_FLAGS_BOTH, "tx_bcast_packets" }, { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi), 8, STATS_FLAGS_PORT, "tx_mac_errors" }, { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi), 8, STATS_FLAGS_PORT, "tx_carrier_errors" }, { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi), 8, STATS_FLAGS_PORT, "tx_single_collisions" }, { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi), 8, STATS_FLAGS_PORT, "tx_multi_collisions" }, { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi), 8, STATS_FLAGS_PORT, "tx_deferred" }, { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi), 8, STATS_FLAGS_PORT, "tx_excess_collisions" }, { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi), 8, STATS_FLAGS_PORT, "tx_late_collisions" }, { STATS_OFFSET32(tx_stat_etherstatscollisions_hi), 8, STATS_FLAGS_PORT, "tx_total_collisions" }, { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi), 8, STATS_FLAGS_PORT, "tx_64_byte_packets" }, { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi), 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" }, { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi), 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" }, { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi), 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" }, { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi), 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" }, { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi), 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" }, { STATS_OFFSET32(etherstatspktsover1522octets_hi), 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" }, { STATS_OFFSET32(pause_frames_sent_hi), 8, STATS_FLAGS_PORT, "tx_pause_frames" }, { STATS_OFFSET32(total_tpa_aggregations_hi), 8, STATS_FLAGS_FUNC, "tpa_aggregations" }, { STATS_OFFSET32(total_tpa_aggregated_frames_hi), 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"}, { STATS_OFFSET32(total_tpa_bytes_hi), 8, STATS_FLAGS_FUNC, "tpa_bytes"}, { STATS_OFFSET32(eee_tx_lpi), 4, STATS_FLAGS_PORT, "eee_tx_lpi"}, { STATS_OFFSET32(rx_calls), 4, STATS_FLAGS_FUNC, "rx_calls"}, { STATS_OFFSET32(rx_pkts), 4, STATS_FLAGS_FUNC, "rx_pkts"}, { STATS_OFFSET32(rx_tpa_pkts), 4, STATS_FLAGS_FUNC, "rx_tpa_pkts"}, { STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts), 4, STATS_FLAGS_FUNC, "rx_erroneous_jumbo_sge_pkts"}, { STATS_OFFSET32(rx_bxe_service_rxsgl), 4, STATS_FLAGS_FUNC, "rx_bxe_service_rxsgl"}, { STATS_OFFSET32(rx_jumbo_sge_pkts), 4, STATS_FLAGS_FUNC, "rx_jumbo_sge_pkts"}, { STATS_OFFSET32(rx_soft_errors), 4, STATS_FLAGS_FUNC, "rx_soft_errors"}, { STATS_OFFSET32(rx_hw_csum_errors), 4, STATS_FLAGS_FUNC, "rx_hw_csum_errors"}, { STATS_OFFSET32(rx_ofld_frames_csum_ip), 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_ip"}, { STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp), 4, STATS_FLAGS_FUNC, "rx_ofld_frames_csum_tcp_udp"}, { STATS_OFFSET32(rx_budget_reached), 4, STATS_FLAGS_FUNC, "rx_budget_reached"}, { STATS_OFFSET32(tx_pkts), 4, STATS_FLAGS_FUNC, "tx_pkts"}, { STATS_OFFSET32(tx_soft_errors), 4, STATS_FLAGS_FUNC, "tx_soft_errors"}, { STATS_OFFSET32(tx_ofld_frames_csum_ip), 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_ip"}, { STATS_OFFSET32(tx_ofld_frames_csum_tcp), 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_tcp"}, { STATS_OFFSET32(tx_ofld_frames_csum_udp), 4, STATS_FLAGS_FUNC, "tx_ofld_frames_csum_udp"}, { STATS_OFFSET32(tx_ofld_frames_lso), 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso"}, { STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits), 4, STATS_FLAGS_FUNC, "tx_ofld_frames_lso_hdr_splits"}, { STATS_OFFSET32(tx_encap_failures), 4, STATS_FLAGS_FUNC, "tx_encap_failures"}, { STATS_OFFSET32(tx_hw_queue_full), 4, STATS_FLAGS_FUNC, "tx_hw_queue_full"}, { STATS_OFFSET32(tx_hw_max_queue_depth), 4, STATS_FLAGS_FUNC, "tx_hw_max_queue_depth"}, { STATS_OFFSET32(tx_dma_mapping_failure), 4, STATS_FLAGS_FUNC, "tx_dma_mapping_failure"}, { STATS_OFFSET32(tx_max_drbr_queue_depth), 4, STATS_FLAGS_FUNC, "tx_max_drbr_queue_depth"}, { STATS_OFFSET32(tx_window_violation_std), 4, STATS_FLAGS_FUNC, "tx_window_violation_std"}, { STATS_OFFSET32(tx_window_violation_tso), 4, STATS_FLAGS_FUNC, "tx_window_violation_tso"}, { STATS_OFFSET32(tx_chain_lost_mbuf), 4, STATS_FLAGS_FUNC, "tx_chain_lost_mbuf"}, { STATS_OFFSET32(tx_frames_deferred), 4, STATS_FLAGS_FUNC, "tx_frames_deferred"}, { STATS_OFFSET32(tx_queue_xoff), 4, STATS_FLAGS_FUNC, "tx_queue_xoff"}, { STATS_OFFSET32(mbuf_defrag_attempts), 4, STATS_FLAGS_FUNC, "mbuf_defrag_attempts"}, { STATS_OFFSET32(mbuf_defrag_failures), 4, STATS_FLAGS_FUNC, "mbuf_defrag_failures"}, { STATS_OFFSET32(mbuf_rx_bd_alloc_failed), 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_alloc_failed"}, { STATS_OFFSET32(mbuf_rx_bd_mapping_failed), 4, STATS_FLAGS_FUNC, "mbuf_rx_bd_mapping_failed"}, { STATS_OFFSET32(mbuf_rx_tpa_alloc_failed), 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_alloc_failed"}, { STATS_OFFSET32(mbuf_rx_tpa_mapping_failed), 4, STATS_FLAGS_FUNC, "mbuf_rx_tpa_mapping_failed"}, { STATS_OFFSET32(mbuf_rx_sge_alloc_failed), 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_alloc_failed"}, { STATS_OFFSET32(mbuf_rx_sge_mapping_failed), 4, STATS_FLAGS_FUNC, "mbuf_rx_sge_mapping_failed"}, { STATS_OFFSET32(mbuf_alloc_tx), 4, STATS_FLAGS_FUNC, "mbuf_alloc_tx"}, { STATS_OFFSET32(mbuf_alloc_rx), 4, STATS_FLAGS_FUNC, "mbuf_alloc_rx"}, { STATS_OFFSET32(mbuf_alloc_sge), 4, STATS_FLAGS_FUNC, "mbuf_alloc_sge"}, { STATS_OFFSET32(mbuf_alloc_tpa), 4, STATS_FLAGS_FUNC, "mbuf_alloc_tpa"}, { STATS_OFFSET32(tx_queue_full_return), 4, STATS_FLAGS_FUNC, "tx_queue_full_return"}, { STATS_OFFSET32(bxe_tx_mq_sc_state_failures), 4, STATS_FLAGS_FUNC, "bxe_tx_mq_sc_state_failures"}, { STATS_OFFSET32(tx_request_link_down_failures), 4, STATS_FLAGS_FUNC, "tx_request_link_down_failures"}, { STATS_OFFSET32(bd_avail_too_less_failures), 4, STATS_FLAGS_FUNC, "bd_avail_too_less_failures"}, { STATS_OFFSET32(tx_mq_not_empty), 4, STATS_FLAGS_FUNC, "tx_mq_not_empty"}, { STATS_OFFSET32(nsegs_path1_errors), 4, STATS_FLAGS_FUNC, "nsegs_path1_errors"}, { STATS_OFFSET32(nsegs_path2_errors), 4, STATS_FLAGS_FUNC, "nsegs_path2_errors"} }; static const struct { uint32_t offset; uint32_t size; char string[STAT_NAME_LEN]; } bxe_eth_q_stats_arr[] = { { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "rx_bytes" }, { Q_STATS_OFFSET32(total_unicast_packets_received_hi), 8, "rx_ucast_packets" }, { Q_STATS_OFFSET32(total_multicast_packets_received_hi), 8, "rx_mcast_packets" }, { Q_STATS_OFFSET32(total_broadcast_packets_received_hi), 8, "rx_bcast_packets" }, { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "rx_discards" }, { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "tx_bytes" }, { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi), 8, "tx_ucast_packets" }, { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi), 8, "tx_mcast_packets" }, { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi), 8, "tx_bcast_packets" }, { Q_STATS_OFFSET32(total_tpa_aggregations_hi), 8, "tpa_aggregations" }, { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi), 8, "tpa_aggregated_frames"}, { Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "tpa_bytes"}, { Q_STATS_OFFSET32(rx_calls), 4, "rx_calls"}, { Q_STATS_OFFSET32(rx_pkts), 4, "rx_pkts"}, { Q_STATS_OFFSET32(rx_tpa_pkts), 4, "rx_tpa_pkts"}, { Q_STATS_OFFSET32(rx_erroneous_jumbo_sge_pkts), 4, "rx_erroneous_jumbo_sge_pkts"}, { Q_STATS_OFFSET32(rx_bxe_service_rxsgl), 4, "rx_bxe_service_rxsgl"}, { Q_STATS_OFFSET32(rx_jumbo_sge_pkts), 4, "rx_jumbo_sge_pkts"}, { Q_STATS_OFFSET32(rx_soft_errors), 4, "rx_soft_errors"}, { Q_STATS_OFFSET32(rx_hw_csum_errors), 4, "rx_hw_csum_errors"}, { Q_STATS_OFFSET32(rx_ofld_frames_csum_ip), 4, "rx_ofld_frames_csum_ip"}, { Q_STATS_OFFSET32(rx_ofld_frames_csum_tcp_udp), 4, "rx_ofld_frames_csum_tcp_udp"}, { Q_STATS_OFFSET32(rx_budget_reached), 4, "rx_budget_reached"}, { Q_STATS_OFFSET32(tx_pkts), 4, "tx_pkts"}, { Q_STATS_OFFSET32(tx_soft_errors), 4, "tx_soft_errors"}, { Q_STATS_OFFSET32(tx_ofld_frames_csum_ip), 4, "tx_ofld_frames_csum_ip"}, { Q_STATS_OFFSET32(tx_ofld_frames_csum_tcp), 4, "tx_ofld_frames_csum_tcp"}, { Q_STATS_OFFSET32(tx_ofld_frames_csum_udp), 4, "tx_ofld_frames_csum_udp"}, { Q_STATS_OFFSET32(tx_ofld_frames_lso), 4, "tx_ofld_frames_lso"}, { Q_STATS_OFFSET32(tx_ofld_frames_lso_hdr_splits), 4, "tx_ofld_frames_lso_hdr_splits"}, { Q_STATS_OFFSET32(tx_encap_failures), 4, "tx_encap_failures"}, { Q_STATS_OFFSET32(tx_hw_queue_full), 4, "tx_hw_queue_full"}, { Q_STATS_OFFSET32(tx_hw_max_queue_depth), 4, "tx_hw_max_queue_depth"}, { Q_STATS_OFFSET32(tx_dma_mapping_failure), 4, "tx_dma_mapping_failure"}, { Q_STATS_OFFSET32(tx_max_drbr_queue_depth), 4, "tx_max_drbr_queue_depth"}, { Q_STATS_OFFSET32(tx_window_violation_std), 4, "tx_window_violation_std"}, { Q_STATS_OFFSET32(tx_window_violation_tso), 4, "tx_window_violation_tso"}, { Q_STATS_OFFSET32(tx_chain_lost_mbuf), 4, "tx_chain_lost_mbuf"}, { Q_STATS_OFFSET32(tx_frames_deferred), 4, "tx_frames_deferred"}, { Q_STATS_OFFSET32(tx_queue_xoff), 4, "tx_queue_xoff"}, { Q_STATS_OFFSET32(mbuf_defrag_attempts), 4, "mbuf_defrag_attempts"}, { Q_STATS_OFFSET32(mbuf_defrag_failures), 4, "mbuf_defrag_failures"}, { Q_STATS_OFFSET32(mbuf_rx_bd_alloc_failed), 4, "mbuf_rx_bd_alloc_failed"}, { Q_STATS_OFFSET32(mbuf_rx_bd_mapping_failed), 4, "mbuf_rx_bd_mapping_failed"}, { Q_STATS_OFFSET32(mbuf_rx_tpa_alloc_failed), 4, "mbuf_rx_tpa_alloc_failed"}, { Q_STATS_OFFSET32(mbuf_rx_tpa_mapping_failed), 4, "mbuf_rx_tpa_mapping_failed"}, { Q_STATS_OFFSET32(mbuf_rx_sge_alloc_failed), 4, "mbuf_rx_sge_alloc_failed"}, { Q_STATS_OFFSET32(mbuf_rx_sge_mapping_failed), 4, "mbuf_rx_sge_mapping_failed"}, { Q_STATS_OFFSET32(mbuf_alloc_tx), 4, "mbuf_alloc_tx"}, { Q_STATS_OFFSET32(mbuf_alloc_rx), 4, "mbuf_alloc_rx"}, { Q_STATS_OFFSET32(mbuf_alloc_sge), 4, "mbuf_alloc_sge"}, { Q_STATS_OFFSET32(mbuf_alloc_tpa), 4, "mbuf_alloc_tpa"}, { Q_STATS_OFFSET32(tx_queue_full_return), 4, "tx_queue_full_return"}, { Q_STATS_OFFSET32(bxe_tx_mq_sc_state_failures), 4, "bxe_tx_mq_sc_state_failures"}, { Q_STATS_OFFSET32(tx_request_link_down_failures), 4, "tx_request_link_down_failures"}, { Q_STATS_OFFSET32(bd_avail_too_less_failures), 4, "bd_avail_too_less_failures"}, { Q_STATS_OFFSET32(tx_mq_not_empty), 4, "tx_mq_not_empty"}, { Q_STATS_OFFSET32(nsegs_path1_errors), 4, "nsegs_path1_errors"}, { Q_STATS_OFFSET32(nsegs_path2_errors), 4, "nsegs_path2_errors"} }; #define BXE_NUM_ETH_STATS ARRAY_SIZE(bxe_eth_stats_arr) #define BXE_NUM_ETH_Q_STATS ARRAY_SIZE(bxe_eth_q_stats_arr) static void bxe_cmng_fns_init(struct bxe_softc *sc, uint8_t read_cfg, uint8_t cmng_type); static int bxe_get_cmng_fns_mode(struct bxe_softc *sc); static void storm_memset_cmng(struct bxe_softc *sc, struct cmng_init *cmng, uint8_t port); static void bxe_set_reset_global(struct bxe_softc *sc); static void bxe_set_reset_in_progress(struct bxe_softc *sc); static uint8_t bxe_reset_is_done(struct bxe_softc *sc, int engine); static uint8_t bxe_clear_pf_load(struct bxe_softc *sc); static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc, uint8_t *global, uint8_t print); static void bxe_int_disable(struct bxe_softc *sc); static int bxe_release_leader_lock(struct bxe_softc *sc); static void bxe_pf_disable(struct bxe_softc *sc); static void bxe_free_fp_buffers(struct bxe_softc *sc); static inline void bxe_update_rx_prod(struct bxe_softc *sc, struct bxe_fastpath *fp, uint16_t rx_bd_prod, uint16_t rx_cq_prod, uint16_t rx_sge_prod); static void bxe_link_report_locked(struct bxe_softc *sc); static void bxe_link_report(struct bxe_softc *sc); static void bxe_link_status_update(struct bxe_softc *sc); static void bxe_periodic_callout_func(void *xsc); static void bxe_periodic_start(struct bxe_softc *sc); static void bxe_periodic_stop(struct bxe_softc *sc); static int bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp, uint16_t prev_index, uint16_t index); static int bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp, int queue); static int bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp, uint16_t index); static uint8_t bxe_txeof(struct bxe_softc *sc, struct bxe_fastpath *fp); static void bxe_task_fp(struct bxe_fastpath *fp); static __noinline void bxe_dump_mbuf(struct bxe_softc *sc, struct mbuf *m, uint8_t contents); static int bxe_alloc_mem(struct bxe_softc *sc); static void bxe_free_mem(struct bxe_softc *sc); static int bxe_alloc_fw_stats_mem(struct bxe_softc *sc); static void bxe_free_fw_stats_mem(struct bxe_softc *sc); static int bxe_interrupt_attach(struct bxe_softc *sc); static void bxe_interrupt_detach(struct bxe_softc *sc); static void bxe_set_rx_mode(struct bxe_softc *sc); static int bxe_init_locked(struct bxe_softc *sc); static int bxe_stop_locked(struct bxe_softc *sc); static void bxe_sp_err_timeout_task(void *arg, int pending); void bxe_parity_recover(struct bxe_softc *sc); void bxe_handle_error(struct bxe_softc *sc); static __noinline int bxe_nic_load(struct bxe_softc *sc, int load_mode); static __noinline int bxe_nic_unload(struct bxe_softc *sc, uint32_t unload_mode, uint8_t keep_link); static void bxe_handle_sp_tq(void *context, int pending); static void bxe_handle_fp_tq(void *context, int pending); static int bxe_add_cdev(struct bxe_softc *sc); static void bxe_del_cdev(struct bxe_softc *sc); int bxe_grc_dump(struct bxe_softc *sc); static int bxe_alloc_buf_rings(struct bxe_softc *sc); static void bxe_free_buf_rings(struct bxe_softc *sc); /* calculate crc32 on a buffer (NOTE: crc32_length MUST be aligned to 8) */ uint32_t calc_crc32(uint8_t *crc32_packet, uint32_t crc32_length, uint32_t crc32_seed, uint8_t complement) { uint32_t byte = 0; uint32_t bit = 0; uint8_t msb = 0; uint32_t temp = 0; uint32_t shft = 0; uint8_t current_byte = 0; uint32_t crc32_result = crc32_seed; const uint32_t CRC32_POLY = 0x1edc6f41; if ((crc32_packet == NULL) || (crc32_length == 0) || ((crc32_length % 8) != 0)) { return (crc32_result); } for (byte = 0; byte < crc32_length; byte = byte + 1) { current_byte = crc32_packet[byte]; for (bit = 0; bit < 8; bit = bit + 1) { /* msb = crc32_result[31]; */ msb = (uint8_t)(crc32_result >> 31); crc32_result = crc32_result << 1; /* it (msb != current_byte[bit]) */ if (msb != (0x1 & (current_byte >> bit))) { crc32_result = crc32_result ^ CRC32_POLY; /* crc32_result[0] = 1 */ crc32_result |= 1; } } } /* Last step is to: * 1. "mirror" every bit * 2. swap the 4 bytes * 3. complement each bit */ /* Mirror */ temp = crc32_result; shft = sizeof(crc32_result) * 8 - 1; for (crc32_result >>= 1; crc32_result; crc32_result >>= 1) { temp <<= 1; temp |= crc32_result & 1; shft-- ; } /* temp[31-bit] = crc32_result[bit] */ temp <<= shft; /* Swap */ /* crc32_result = {temp[7:0], temp[15:8], temp[23:16], temp[31:24]} */ { uint32_t t0, t1, t2, t3; t0 = (0x000000ff & (temp >> 24)); t1 = (0x0000ff00 & (temp >> 8)); t2 = (0x00ff0000 & (temp << 8)); t3 = (0xff000000 & (temp << 24)); crc32_result = t0 | t1 | t2 | t3; } /* Complement */ if (complement) { crc32_result = ~crc32_result; } return (crc32_result); } int bxe_test_bit(int nr, volatile unsigned long *addr) { return ((atomic_load_acq_long(addr) & (1 << nr)) != 0); } void bxe_set_bit(unsigned int nr, volatile unsigned long *addr) { atomic_set_acq_long(addr, (1 << nr)); } void bxe_clear_bit(int nr, volatile unsigned long *addr) { atomic_clear_acq_long(addr, (1 << nr)); } int bxe_test_and_set_bit(int nr, volatile unsigned long *addr) { unsigned long x; nr = (1 << nr); do { x = *addr; } while (atomic_cmpset_acq_long(addr, x, x | nr) == 0); // if (x & nr) bit_was_set; else bit_was_not_set; return (x & nr); } int bxe_test_and_clear_bit(int nr, volatile unsigned long *addr) { unsigned long x; nr = (1 << nr); do { x = *addr; } while (atomic_cmpset_acq_long(addr, x, x & ~nr) == 0); // if (x & nr) bit_was_set; else bit_was_not_set; return (x & nr); } int bxe_cmpxchg(volatile int *addr, int old, int new) { int x; do { x = *addr; } while (atomic_cmpset_acq_int(addr, old, new) == 0); return (x); } /* * Get DMA memory from the OS. * * Validates that the OS has provided DMA buffers in response to a * bus_dmamap_load call and saves the physical address of those buffers. * When the callback is used the OS will return 0 for the mapping function * (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any * failures back to the caller. * * Returns: * Nothing. */ static void bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct bxe_dma *dma = arg; if (error) { dma->paddr = 0; dma->nseg = 0; BLOGE(dma->sc, "Failed DMA alloc '%s' (%d)!\n", dma->msg, error); } else { dma->paddr = segs->ds_addr; dma->nseg = nseg; } } /* * Allocate a block of memory and map it for DMA. No partial completions * allowed and release any resources acquired if we can't acquire all * resources. * * Returns: * 0 = Success, !0 = Failure */ int bxe_dma_alloc(struct bxe_softc *sc, bus_size_t size, struct bxe_dma *dma, const char *msg) { int rc; if (dma->size > 0) { BLOGE(sc, "dma block '%s' already has size %lu\n", msg, (unsigned long)dma->size); return (1); } memset(dma, 0, sizeof(*dma)); /* sanity */ dma->sc = sc; dma->size = size; snprintf(dma->msg, sizeof(dma->msg), "%s", msg); rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ BCM_PAGE_SIZE, /* alignment */ 0, /* boundary limit */ BUS_SPACE_MAXADDR, /* restricted low */ BUS_SPACE_MAXADDR, /* restricted hi */ NULL, /* addr filter() */ NULL, /* addr filter() arg */ size, /* max map size */ 1, /* num discontinuous */ size, /* max seg size */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lock() */ NULL, /* lock() arg */ &dma->tag); /* returned dma tag */ if (rc != 0) { BLOGE(sc, "Failed to create dma tag for '%s' (%d)\n", msg, rc); memset(dma, 0, sizeof(*dma)); return (1); } rc = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, (BUS_DMA_NOWAIT | BUS_DMA_ZERO), &dma->map); if (rc != 0) { BLOGE(sc, "Failed to alloc dma mem for '%s' (%d)\n", msg, rc); bus_dma_tag_destroy(dma->tag); memset(dma, 0, sizeof(*dma)); return (1); } rc = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, bxe_dma_map_addr, /* BLOGD in here */ dma, BUS_DMA_NOWAIT); if (rc != 0) { BLOGE(sc, "Failed to load dma map for '%s' (%d)\n", msg, rc); bus_dmamem_free(dma->tag, dma->vaddr, dma->map); bus_dma_tag_destroy(dma->tag); memset(dma, 0, sizeof(*dma)); return (1); } return (0); } void bxe_dma_free(struct bxe_softc *sc, struct bxe_dma *dma) { if (dma->size > 0) { DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL")); bus_dmamap_sync(dma->tag, dma->map, (BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)); bus_dmamap_unload(dma->tag, dma->map); bus_dmamem_free(dma->tag, dma->vaddr, dma->map); bus_dma_tag_destroy(dma->tag); } memset(dma, 0, sizeof(*dma)); } /* * These indirect read and write routines are only during init. * The locking is handled by the MCP. */ void bxe_reg_wr_ind(struct bxe_softc *sc, uint32_t addr, uint32_t val) { pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4); pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4); pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); } uint32_t bxe_reg_rd_ind(struct bxe_softc *sc, uint32_t addr) { uint32_t val; pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4); val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4); pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); return (val); } static int bxe_acquire_hw_lock(struct bxe_softc *sc, uint32_t resource) { uint32_t lock_status; uint32_t resource_bit = (1 << resource); int func = SC_FUNC(sc); uint32_t hw_lock_control_reg; int cnt; /* validate the resource is within range */ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)" " resource_bit 0x%x\n", resource, resource_bit); return (-1); } if (func <= 5) { hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8)); } else { hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); } /* validate the resource is not already taken */ lock_status = REG_RD(sc, hw_lock_control_reg); if (lock_status & resource_bit) { BLOGE(sc, "resource (0x%x) in use (status 0x%x bit 0x%x)\n", resource, lock_status, resource_bit); return (-1); } /* try every 5ms for 5 seconds */ for (cnt = 0; cnt < 1000; cnt++) { REG_WR(sc, (hw_lock_control_reg + 4), resource_bit); lock_status = REG_RD(sc, hw_lock_control_reg); if (lock_status & resource_bit) { return (0); } DELAY(5000); } BLOGE(sc, "Resource 0x%x resource_bit 0x%x lock timeout!\n", resource, resource_bit); return (-1); } static int bxe_release_hw_lock(struct bxe_softc *sc, uint32_t resource) { uint32_t lock_status; uint32_t resource_bit = (1 << resource); int func = SC_FUNC(sc); uint32_t hw_lock_control_reg; /* validate the resource is within range */ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)" " resource_bit 0x%x\n", resource, resource_bit); return (-1); } if (func <= 5) { hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + (func * 8)); } else { hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8)); } /* validate the resource is currently taken */ lock_status = REG_RD(sc, hw_lock_control_reg); if (!(lock_status & resource_bit)) { BLOGE(sc, "resource (0x%x) not in use (status 0x%x bit 0x%x)\n", resource, lock_status, resource_bit); return (-1); } REG_WR(sc, hw_lock_control_reg, resource_bit); return (0); } static void bxe_acquire_phy_lock(struct bxe_softc *sc) { BXE_PHY_LOCK(sc); bxe_acquire_hw_lock(sc,HW_LOCK_RESOURCE_MDIO); } static void bxe_release_phy_lock(struct bxe_softc *sc) { bxe_release_hw_lock(sc,HW_LOCK_RESOURCE_MDIO); BXE_PHY_UNLOCK(sc); } /* * Per pf misc lock must be acquired before the per port mcp lock. Otherwise, * had we done things the other way around, if two pfs from the same port * would attempt to access nvram at the same time, we could run into a * scenario such as: * pf A takes the port lock. * pf B succeeds in taking the same lock since they are from the same port. * pf A takes the per pf misc lock. Performs eeprom access. * pf A finishes. Unlocks the per pf misc lock. * Pf B takes the lock and proceeds to perform it's own access. * pf A unlocks the per port lock, while pf B is still working (!). * mcp takes the per port lock and corrupts pf B's access (and/or has it's own * access corrupted by pf B).* */ static int bxe_acquire_nvram_lock(struct bxe_softc *sc) { int port = SC_PORT(sc); int count, i; uint32_t val = 0; /* acquire HW lock: protect against other PFs in PF Direct Assignment */ bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM); /* adjust timeout for emulation/FPGA */ count = NVRAM_TIMEOUT_COUNT; if (CHIP_REV_IS_SLOW(sc)) { count *= 100; } /* request access to nvram interface */ REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port)); for (i = 0; i < count*10; i++) { val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB); if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { break; } DELAY(5); } if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { BLOGE(sc, "Cannot get access to nvram interface " "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n", port, val); return (-1); } return (0); } static int bxe_release_nvram_lock(struct bxe_softc *sc) { int port = SC_PORT(sc); int count, i; uint32_t val = 0; /* adjust timeout for emulation/FPGA */ count = NVRAM_TIMEOUT_COUNT; if (CHIP_REV_IS_SLOW(sc)) { count *= 100; } /* relinquish nvram interface */ REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port)); for (i = 0; i < count*10; i++) { val = REG_RD(sc, MCP_REG_MCPR_NVM_SW_ARB); if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { break; } DELAY(5); } if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { BLOGE(sc, "Cannot free access to nvram interface " "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n", port, val); return (-1); } /* release HW lock: protect against other PFs in PF Direct Assignment */ bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_NVRAM); return (0); } static void bxe_enable_nvram_access(struct bxe_softc *sc) { uint32_t val; val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE); /* enable both bits, even on read */ REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE, (val | MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN)); } static void bxe_disable_nvram_access(struct bxe_softc *sc) { uint32_t val; val = REG_RD(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE); /* disable both bits, even after read */ REG_WR(sc, MCP_REG_MCPR_NVM_ACCESS_ENABLE, (val & ~(MCPR_NVM_ACCESS_ENABLE_EN | MCPR_NVM_ACCESS_ENABLE_WR_EN))); } static int bxe_nvram_read_dword(struct bxe_softc *sc, uint32_t offset, uint32_t *ret_val, uint32_t cmd_flags) { int count, i, rc; uint32_t val; /* build the command word */ cmd_flags |= MCPR_NVM_COMMAND_DOIT; /* need to clear DONE bit separately */ REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); /* address of the NVRAM to read from */ REG_WR(sc, MCP_REG_MCPR_NVM_ADDR, (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); /* issue a read command */ REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); /* adjust timeout for emulation/FPGA */ count = NVRAM_TIMEOUT_COUNT; if (CHIP_REV_IS_SLOW(sc)) { count *= 100; } /* wait for completion */ *ret_val = 0; rc = -1; for (i = 0; i < count; i++) { DELAY(5); val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND); if (val & MCPR_NVM_COMMAND_DONE) { val = REG_RD(sc, MCP_REG_MCPR_NVM_READ); /* we read nvram data in cpu order * but ethtool sees it as an array of bytes * converting to big-endian will do the work */ *ret_val = htobe32(val); rc = 0; break; } } if (rc == -1) { BLOGE(sc, "nvram read timeout expired " "(offset 0x%x cmd_flags 0x%x val 0x%x)\n", offset, cmd_flags, val); } return (rc); } static int bxe_nvram_read(struct bxe_softc *sc, uint32_t offset, uint8_t *ret_buf, int buf_size) { uint32_t cmd_flags; uint32_t val; int rc; if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) { BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n", offset, buf_size); return (-1); } if ((offset + buf_size) > sc->devinfo.flash_size) { BLOGE(sc, "Invalid parameter, " "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", offset, buf_size, sc->devinfo.flash_size); return (-1); } /* request access to nvram interface */ rc = bxe_acquire_nvram_lock(sc); if (rc) { return (rc); } /* enable access to nvram interface */ bxe_enable_nvram_access(sc); /* read the first word(s) */ cmd_flags = MCPR_NVM_COMMAND_FIRST; while ((buf_size > sizeof(uint32_t)) && (rc == 0)) { rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags); memcpy(ret_buf, &val, 4); /* advance to the next dword */ offset += sizeof(uint32_t); ret_buf += sizeof(uint32_t); buf_size -= sizeof(uint32_t); cmd_flags = 0; } if (rc == 0) { cmd_flags |= MCPR_NVM_COMMAND_LAST; rc = bxe_nvram_read_dword(sc, offset, &val, cmd_flags); memcpy(ret_buf, &val, 4); } /* disable access to nvram interface */ bxe_disable_nvram_access(sc); bxe_release_nvram_lock(sc); return (rc); } static int bxe_nvram_write_dword(struct bxe_softc *sc, uint32_t offset, uint32_t val, uint32_t cmd_flags) { int count, i, rc; /* build the command word */ cmd_flags |= (MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR); /* need to clear DONE bit separately */ REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE); /* write the data */ REG_WR(sc, MCP_REG_MCPR_NVM_WRITE, val); /* address of the NVRAM to write to */ REG_WR(sc, MCP_REG_MCPR_NVM_ADDR, (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE)); /* issue the write command */ REG_WR(sc, MCP_REG_MCPR_NVM_COMMAND, cmd_flags); /* adjust timeout for emulation/FPGA */ count = NVRAM_TIMEOUT_COUNT; if (CHIP_REV_IS_SLOW(sc)) { count *= 100; } /* wait for completion */ rc = -1; for (i = 0; i < count; i++) { DELAY(5); val = REG_RD(sc, MCP_REG_MCPR_NVM_COMMAND); if (val & MCPR_NVM_COMMAND_DONE) { rc = 0; break; } } if (rc == -1) { BLOGE(sc, "nvram write timeout expired " "(offset 0x%x cmd_flags 0x%x val 0x%x)\n", offset, cmd_flags, val); } return (rc); } #define BYTE_OFFSET(offset) (8 * (offset & 0x03)) static int bxe_nvram_write1(struct bxe_softc *sc, uint32_t offset, uint8_t *data_buf, int buf_size) { uint32_t cmd_flags; uint32_t align_offset; uint32_t val; int rc; if ((offset + buf_size) > sc->devinfo.flash_size) { BLOGE(sc, "Invalid parameter, " "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", offset, buf_size, sc->devinfo.flash_size); return (-1); } /* request access to nvram interface */ rc = bxe_acquire_nvram_lock(sc); if (rc) { return (rc); } /* enable access to nvram interface */ bxe_enable_nvram_access(sc); cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST); align_offset = (offset & ~0x03); rc = bxe_nvram_read_dword(sc, align_offset, &val, cmd_flags); if (rc == 0) { val &= ~(0xff << BYTE_OFFSET(offset)); val |= (*data_buf << BYTE_OFFSET(offset)); /* nvram data is returned as an array of bytes * convert it back to cpu order */ val = be32toh(val); rc = bxe_nvram_write_dword(sc, align_offset, val, cmd_flags); } /* disable access to nvram interface */ bxe_disable_nvram_access(sc); bxe_release_nvram_lock(sc); return (rc); } static int bxe_nvram_write(struct bxe_softc *sc, uint32_t offset, uint8_t *data_buf, int buf_size) { uint32_t cmd_flags; uint32_t val; uint32_t written_so_far; int rc; if (buf_size == 1) { return (bxe_nvram_write1(sc, offset, data_buf, buf_size)); } if ((offset & 0x03) || (buf_size & 0x03) /* || (buf_size == 0) */) { BLOGE(sc, "Invalid parameter, offset 0x%x buf_size 0x%x\n", offset, buf_size); return (-1); } if (buf_size == 0) { return (0); /* nothing to do */ } if ((offset + buf_size) > sc->devinfo.flash_size) { BLOGE(sc, "Invalid parameter, " "offset 0x%x + buf_size 0x%x > flash_size 0x%x\n", offset, buf_size, sc->devinfo.flash_size); return (-1); } /* request access to nvram interface */ rc = bxe_acquire_nvram_lock(sc); if (rc) { return (rc); } /* enable access to nvram interface */ bxe_enable_nvram_access(sc); written_so_far = 0; cmd_flags = MCPR_NVM_COMMAND_FIRST; while ((written_so_far < buf_size) && (rc == 0)) { if (written_so_far == (buf_size - sizeof(uint32_t))) { cmd_flags |= MCPR_NVM_COMMAND_LAST; } else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0) { cmd_flags |= MCPR_NVM_COMMAND_LAST; } else if ((offset % NVRAM_PAGE_SIZE) == 0) { cmd_flags |= MCPR_NVM_COMMAND_FIRST; } memcpy(&val, data_buf, 4); rc = bxe_nvram_write_dword(sc, offset, val, cmd_flags); /* advance to the next dword */ offset += sizeof(uint32_t); data_buf += sizeof(uint32_t); written_so_far += sizeof(uint32_t); cmd_flags = 0; } /* disable access to nvram interface */ bxe_disable_nvram_access(sc); bxe_release_nvram_lock(sc); return (rc); } /* copy command into DMAE command memory and set DMAE command Go */ void bxe_post_dmae(struct bxe_softc *sc, struct dmae_cmd *dmae, int idx) { uint32_t cmd_offset; int i; cmd_offset = (DMAE_REG_CMD_MEM + (sizeof(struct dmae_cmd) * idx)); for (i = 0; i < ((sizeof(struct dmae_cmd) / 4)); i++) { REG_WR(sc, (cmd_offset + (i * 4)), *(((uint32_t *)dmae) + i)); } REG_WR(sc, dmae_reg_go_c[idx], 1); } uint32_t bxe_dmae_opcode_add_comp(uint32_t opcode, uint8_t comp_type) { return (opcode | ((comp_type << DMAE_CMD_C_DST_SHIFT) | DMAE_CMD_C_TYPE_ENABLE)); } uint32_t bxe_dmae_opcode_clr_src_reset(uint32_t opcode) { return (opcode & ~DMAE_CMD_SRC_RESET); } uint32_t bxe_dmae_opcode(struct bxe_softc *sc, uint8_t src_type, uint8_t dst_type, uint8_t with_comp, uint8_t comp_type) { uint32_t opcode = 0; opcode |= ((src_type << DMAE_CMD_SRC_SHIFT) | (dst_type << DMAE_CMD_DST_SHIFT)); opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET); opcode |= (SC_PORT(sc) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0); opcode |= ((SC_VN(sc) << DMAE_CMD_E1HVN_SHIFT) | (SC_VN(sc) << DMAE_CMD_DST_VN_SHIFT)); opcode |= (DMAE_COM_SET_ERR << DMAE_CMD_ERR_POLICY_SHIFT); #ifdef __BIG_ENDIAN opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP; #else opcode |= DMAE_CMD_ENDIANITY_DW_SWAP; #endif if (with_comp) { opcode = bxe_dmae_opcode_add_comp(opcode, comp_type); } return (opcode); } static void bxe_prep_dmae_with_comp(struct bxe_softc *sc, struct dmae_cmd *dmae, uint8_t src_type, uint8_t dst_type) { memset(dmae, 0, sizeof(struct dmae_cmd)); /* set the opcode */ dmae->opcode = bxe_dmae_opcode(sc, src_type, dst_type, TRUE, DMAE_COMP_PCI); /* fill in the completion parameters */ dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp)); dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp)); dmae->comp_val = DMAE_COMP_VAL; } /* issue a DMAE command over the init channel and wait for completion */ static int bxe_issue_dmae_with_comp(struct bxe_softc *sc, struct dmae_cmd *dmae) { uint32_t *wb_comp = BXE_SP(sc, wb_comp); int timeout = CHIP_REV_IS_SLOW(sc) ? 400000 : 4000; BXE_DMAE_LOCK(sc); /* reset completion */ *wb_comp = 0; /* post the command on the channel used for initializations */ bxe_post_dmae(sc, dmae, INIT_DMAE_C(sc)); /* wait for completion */ DELAY(5); while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) { if (!timeout || (sc->recovery_state != BXE_RECOVERY_DONE && sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) { BLOGE(sc, "DMAE timeout! *wb_comp 0x%x recovery_state 0x%x\n", *wb_comp, sc->recovery_state); BXE_DMAE_UNLOCK(sc); return (DMAE_TIMEOUT); } timeout--; DELAY(50); } if (*wb_comp & DMAE_PCI_ERR_FLAG) { BLOGE(sc, "DMAE PCI error! *wb_comp 0x%x recovery_state 0x%x\n", *wb_comp, sc->recovery_state); BXE_DMAE_UNLOCK(sc); return (DMAE_PCI_ERROR); } BXE_DMAE_UNLOCK(sc); return (0); } void bxe_read_dmae(struct bxe_softc *sc, uint32_t src_addr, uint32_t len32) { struct dmae_cmd dmae; uint32_t *data; int i, rc; DBASSERT(sc, (len32 <= 4), ("DMAE read length is %d", len32)); if (!sc->dmae_ready) { data = BXE_SP(sc, wb_data[0]); for (i = 0; i < len32; i++) { data[i] = (CHIP_IS_E1(sc)) ? bxe_reg_rd_ind(sc, (src_addr + (i * 4))) : REG_RD(sc, (src_addr + (i * 4))); } return; } /* set opcode and fixed command fields */ bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI); /* fill in addresses and len */ dmae.src_addr_lo = (src_addr >> 2); /* GRC addr has dword resolution */ dmae.src_addr_hi = 0; dmae.dst_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_data)); dmae.dst_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_data)); dmae.len = len32; /* issue the command and wait for completion */ if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) { bxe_panic(sc, ("DMAE failed (%d)\n", rc)); } } void bxe_write_dmae(struct bxe_softc *sc, bus_addr_t dma_addr, uint32_t dst_addr, uint32_t len32) { struct dmae_cmd dmae; int rc; if (!sc->dmae_ready) { DBASSERT(sc, (len32 <= 4), ("DMAE not ready and length is %d", len32)); if (CHIP_IS_E1(sc)) { ecore_init_ind_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32); } else { ecore_init_str_wr(sc, dst_addr, BXE_SP(sc, wb_data[0]), len32); } return; } /* set opcode and fixed command fields */ bxe_prep_dmae_with_comp(sc, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC); /* fill in addresses and len */ dmae.src_addr_lo = U64_LO(dma_addr); dmae.src_addr_hi = U64_HI(dma_addr); dmae.dst_addr_lo = (dst_addr >> 2); /* GRC addr has dword resolution */ dmae.dst_addr_hi = 0; dmae.len = len32; /* issue the command and wait for completion */ if ((rc = bxe_issue_dmae_with_comp(sc, &dmae)) != 0) { bxe_panic(sc, ("DMAE failed (%d)\n", rc)); } } void bxe_write_dmae_phys_len(struct bxe_softc *sc, bus_addr_t phys_addr, uint32_t addr, uint32_t len) { int dmae_wr_max = DMAE_LEN32_WR_MAX(sc); int offset = 0; while (len > dmae_wr_max) { bxe_write_dmae(sc, (phys_addr + offset), /* src DMA address */ (addr + offset), /* dst GRC address */ dmae_wr_max); offset += (dmae_wr_max * 4); len -= dmae_wr_max; } bxe_write_dmae(sc, (phys_addr + offset), /* src DMA address */ (addr + offset), /* dst GRC address */ len); } void bxe_set_ctx_validation(struct bxe_softc *sc, struct eth_context *cxt, uint32_t cid) { /* ustorm cxt validation */ cxt->ustorm_ag_context.cdu_usage = CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid), CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE); /* xcontext validation */ cxt->xstorm_ag_context.cdu_reserved = CDU_RSRVD_VALUE_TYPE_A(HW_CID(sc, cid), CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE); } static void bxe_storm_memset_hc_timeout(struct bxe_softc *sc, uint8_t port, uint8_t fw_sb_id, uint8_t sb_index, uint8_t ticks) { uint32_t addr = (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index)); REG_WR8(sc, addr, ticks); BLOGD(sc, DBG_LOAD, "port %d fw_sb_id %d sb_index %d ticks %d\n", port, fw_sb_id, sb_index, ticks); } static void bxe_storm_memset_hc_disable(struct bxe_softc *sc, uint8_t port, uint16_t fw_sb_id, uint8_t sb_index, uint8_t disable) { uint32_t enable_flag = (disable) ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); uint32_t addr = (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index)); uint8_t flags; /* clear and set */ flags = REG_RD8(sc, addr); flags &= ~HC_INDEX_DATA_HC_ENABLED; flags |= enable_flag; REG_WR8(sc, addr, flags); BLOGD(sc, DBG_LOAD, "port %d fw_sb_id %d sb_index %d disable %d\n", port, fw_sb_id, sb_index, disable); } void bxe_update_coalesce_sb_index(struct bxe_softc *sc, uint8_t fw_sb_id, uint8_t sb_index, uint8_t disable, uint16_t usec) { int port = SC_PORT(sc); uint8_t ticks = (usec / 4); /* XXX ??? */ bxe_storm_memset_hc_timeout(sc, port, fw_sb_id, sb_index, ticks); disable = (disable) ? 1 : ((usec) ? 0 : 1); bxe_storm_memset_hc_disable(sc, port, fw_sb_id, sb_index, disable); } void elink_cb_udelay(struct bxe_softc *sc, uint32_t usecs) { DELAY(usecs); } uint32_t elink_cb_reg_read(struct bxe_softc *sc, uint32_t reg_addr) { return (REG_RD(sc, reg_addr)); } void elink_cb_reg_write(struct bxe_softc *sc, uint32_t reg_addr, uint32_t val) { REG_WR(sc, reg_addr, val); } void elink_cb_reg_wb_write(struct bxe_softc *sc, uint32_t offset, uint32_t *wb_write, uint16_t len) { REG_WR_DMAE(sc, offset, wb_write, len); } void elink_cb_reg_wb_read(struct bxe_softc *sc, uint32_t offset, uint32_t *wb_write, uint16_t len) { REG_RD_DMAE(sc, offset, wb_write, len); } uint8_t elink_cb_path_id(struct bxe_softc *sc) { return (SC_PATH(sc)); } void elink_cb_event_log(struct bxe_softc *sc, const elink_log_id_t elink_log_id, ...) { /* XXX */ BLOGI(sc, "ELINK EVENT LOG (%d)\n", elink_log_id); } static int bxe_set_spio(struct bxe_softc *sc, int spio, uint32_t mode) { uint32_t spio_reg; /* Only 2 SPIOs are configurable */ if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) { BLOGE(sc, "Invalid SPIO 0x%x mode 0x%x\n", spio, mode); return (-1); } bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_SPIO); /* read SPIO and mask except the float bits */ spio_reg = (REG_RD(sc, MISC_REG_SPIO) & MISC_SPIO_FLOAT); switch (mode) { case MISC_SPIO_OUTPUT_LOW: BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output low\n", spio); /* clear FLOAT and set CLR */ spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); spio_reg |= (spio << MISC_SPIO_CLR_POS); break; case MISC_SPIO_OUTPUT_HIGH: BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output high\n", spio); /* clear FLOAT and set SET */ spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS); spio_reg |= (spio << MISC_SPIO_SET_POS); break; case MISC_SPIO_INPUT_HI_Z: BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> input\n", spio); /* set FLOAT */ spio_reg |= (spio << MISC_SPIO_FLOAT_POS); break; default: break; } REG_WR(sc, MISC_REG_SPIO, spio_reg); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_SPIO); return (0); } static int bxe_gpio_read(struct bxe_softc *sc, int gpio_num, uint8_t port) { /* The GPIO should be swapped if swap register is set and active */ int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); int gpio_shift = (gpio_num + (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); uint32_t gpio_mask = (1 << gpio_shift); uint32_t gpio_reg; if (gpio_num > MISC_REGISTERS_GPIO_3) { BLOGE(sc, "Invalid GPIO %d port 0x%x gpio_port %d gpio_shift %d" " gpio_mask 0x%x\n", gpio_num, port, gpio_port, gpio_shift, gpio_mask); return (-1); } /* read GPIO value */ gpio_reg = REG_RD(sc, MISC_REG_GPIO); /* get the requested pin value */ return ((gpio_reg & gpio_mask) == gpio_mask) ? 1 : 0; } static int bxe_gpio_write(struct bxe_softc *sc, int gpio_num, uint32_t mode, uint8_t port) { /* The GPIO should be swapped if swap register is set and active */ int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); int gpio_shift = (gpio_num + (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); uint32_t gpio_mask = (1 << gpio_shift); uint32_t gpio_reg; if (gpio_num > MISC_REGISTERS_GPIO_3) { BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d" " gpio_shift %d gpio_mask 0x%x\n", gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask); return (-1); } bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); /* read GPIO and mask except the float bits */ gpio_reg = (REG_RD(sc, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT); switch (mode) { case MISC_REGISTERS_GPIO_OUTPUT_LOW: BLOGD(sc, DBG_PHY, "Set GPIO %d (shift %d) -> output low\n", gpio_num, gpio_shift); /* clear FLOAT and set CLR */ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS); break; case MISC_REGISTERS_GPIO_OUTPUT_HIGH: BLOGD(sc, DBG_PHY, "Set GPIO %d (shift %d) -> output high\n", gpio_num, gpio_shift); /* clear FLOAT and set SET */ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS); break; case MISC_REGISTERS_GPIO_INPUT_HI_Z: BLOGD(sc, DBG_PHY, "Set GPIO %d (shift %d) -> input\n", gpio_num, gpio_shift); /* set FLOAT */ gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS); break; default: break; } REG_WR(sc, MISC_REG_GPIO, gpio_reg); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); return (0); } static int bxe_gpio_mult_write(struct bxe_softc *sc, uint8_t pins, uint32_t mode) { uint32_t gpio_reg; /* any port swapping should be handled by caller */ bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); /* read GPIO and mask except the float bits */ gpio_reg = REG_RD(sc, MISC_REG_GPIO); gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS); gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS); gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS); switch (mode) { case MISC_REGISTERS_GPIO_OUTPUT_LOW: BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output low\n", pins); /* set CLR */ gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS); break; case MISC_REGISTERS_GPIO_OUTPUT_HIGH: BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output high\n", pins); /* set SET */ gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS); break; case MISC_REGISTERS_GPIO_INPUT_HI_Z: BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> input\n", pins); /* set FLOAT */ gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS); break; default: BLOGE(sc, "Invalid GPIO mode assignment pins 0x%x mode 0x%x" " gpio_reg 0x%x\n", pins, mode, gpio_reg); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); return (-1); } REG_WR(sc, MISC_REG_GPIO, gpio_reg); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); return (0); } static int bxe_gpio_int_write(struct bxe_softc *sc, int gpio_num, uint32_t mode, uint8_t port) { /* The GPIO should be swapped if swap register is set and active */ int gpio_port = ((REG_RD(sc, NIG_REG_PORT_SWAP) && REG_RD(sc, NIG_REG_STRAP_OVERRIDE)) ^ port); int gpio_shift = (gpio_num + (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0)); uint32_t gpio_mask = (1 << gpio_shift); uint32_t gpio_reg; if (gpio_num > MISC_REGISTERS_GPIO_3) { BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d" " gpio_shift %d gpio_mask 0x%x\n", gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask); return (-1); } bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); /* read GPIO int */ gpio_reg = REG_RD(sc, MISC_REG_GPIO_INT); switch (mode) { case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR: BLOGD(sc, DBG_PHY, "Clear GPIO INT %d (shift %d) -> output low\n", gpio_num, gpio_shift); /* clear SET and set CLR */ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); break; case MISC_REGISTERS_GPIO_INT_OUTPUT_SET: BLOGD(sc, DBG_PHY, "Set GPIO INT %d (shift %d) -> output high\n", gpio_num, gpio_shift); /* clear CLR and set SET */ gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS); gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS); break; default: break; } REG_WR(sc, MISC_REG_GPIO_INT, gpio_reg); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); return (0); } uint32_t elink_cb_gpio_read(struct bxe_softc *sc, uint16_t gpio_num, uint8_t port) { return (bxe_gpio_read(sc, gpio_num, port)); } uint8_t elink_cb_gpio_write(struct bxe_softc *sc, uint16_t gpio_num, uint8_t mode, /* 0=low 1=high */ uint8_t port) { return (bxe_gpio_write(sc, gpio_num, mode, port)); } uint8_t elink_cb_gpio_mult_write(struct bxe_softc *sc, uint8_t pins, uint8_t mode) /* 0=low 1=high */ { return (bxe_gpio_mult_write(sc, pins, mode)); } uint8_t elink_cb_gpio_int_write(struct bxe_softc *sc, uint16_t gpio_num, uint8_t mode, /* 0=low 1=high */ uint8_t port) { return (bxe_gpio_int_write(sc, gpio_num, mode, port)); } void elink_cb_notify_link_changed(struct bxe_softc *sc) { REG_WR(sc, (MISC_REG_AEU_GENERAL_ATTN_12 + (SC_FUNC(sc) * sizeof(uint32_t))), 1); } /* send the MCP a request, block until there is a reply */ uint32_t elink_cb_fw_command(struct bxe_softc *sc, uint32_t command, uint32_t param) { int mb_idx = SC_FW_MB_IDX(sc); uint32_t seq; uint32_t rc = 0; uint32_t cnt = 1; uint8_t delay = CHIP_REV_IS_SLOW(sc) ? 100 : 10; BXE_FWMB_LOCK(sc); seq = ++sc->fw_seq; SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param); SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq)); BLOGD(sc, DBG_PHY, "wrote command 0x%08x to FW MB param 0x%08x\n", (command | seq), param); /* Let the FW do it's magic. GIve it up to 5 seconds... */ do { DELAY(delay * 1000); rc = SHMEM_RD(sc, func_mb[mb_idx].fw_mb_header); } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500)); BLOGD(sc, DBG_PHY, "[after %d ms] read 0x%x seq 0x%x from FW MB\n", cnt*delay, rc, seq); /* is this a reply to our command? */ if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) { rc &= FW_MSG_CODE_MASK; } else { /* Ruh-roh! */ BLOGE(sc, "FW failed to respond!\n"); // XXX bxe_fw_dump(sc); rc = 0; } BXE_FWMB_UNLOCK(sc); return (rc); } static uint32_t bxe_fw_command(struct bxe_softc *sc, uint32_t command, uint32_t param) { return (elink_cb_fw_command(sc, command, param)); } static void __storm_memset_dma_mapping(struct bxe_softc *sc, uint32_t addr, bus_addr_t mapping) { REG_WR(sc, addr, U64_LO(mapping)); REG_WR(sc, (addr + 4), U64_HI(mapping)); } static void storm_memset_spq_addr(struct bxe_softc *sc, bus_addr_t mapping, uint16_t abs_fid) { uint32_t addr = (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid)); __storm_memset_dma_mapping(sc, addr, mapping); } static void storm_memset_vf_to_pf(struct bxe_softc *sc, uint16_t abs_fid, uint16_t pf_id) { REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid)), pf_id); } static void storm_memset_func_en(struct bxe_softc *sc, uint16_t abs_fid, uint8_t enable) { REG_WR8(sc, (BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid)), enable); REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid)), enable); REG_WR8(sc, (BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid)), enable); REG_WR8(sc, (BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid)), enable); } static void storm_memset_eq_data(struct bxe_softc *sc, struct event_ring_data *eq_data, uint16_t pfid) { uint32_t addr; size_t size; addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid)); size = sizeof(struct event_ring_data); ecore_storm_memset_struct(sc, addr, size, (uint32_t *)eq_data); } static void storm_memset_eq_prod(struct bxe_softc *sc, uint16_t eq_prod, uint16_t pfid) { uint32_t addr = (BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid)); REG_WR16(sc, addr, eq_prod); } /* * Post a slowpath command. * * A slowpath command is used to propagate a configuration change through * the controller in a controlled manner, allowing each STORM processor and * other H/W blocks to phase in the change. The commands sent on the * slowpath are referred to as ramrods. Depending on the ramrod used the * completion of the ramrod will occur in different ways. Here's a * breakdown of ramrods and how they complete: * * RAMROD_CMD_ID_ETH_PORT_SETUP * Used to setup the leading connection on a port. Completes on the * Receive Completion Queue (RCQ) of that port (typically fp[0]). * * RAMROD_CMD_ID_ETH_CLIENT_SETUP * Used to setup an additional connection on a port. Completes on the * RCQ of the multi-queue/RSS connection being initialized. * * RAMROD_CMD_ID_ETH_STAT_QUERY * Used to force the storm processors to update the statistics database * in host memory. This ramrod is send on the leading connection CID and * completes as an index increment of the CSTORM on the default status * block. * * RAMROD_CMD_ID_ETH_UPDATE * Used to update the state of the leading connection, usually to udpate * the RSS indirection table. Completes on the RCQ of the leading * connection. (Not currently used under FreeBSD until OS support becomes * available.) * * RAMROD_CMD_ID_ETH_HALT * Used when tearing down a connection prior to driver unload. Completes * on the RCQ of the multi-queue/RSS connection being torn down. Don't * use this on the leading connection. * * RAMROD_CMD_ID_ETH_SET_MAC * Sets the Unicast/Broadcast/Multicast used by the port. Completes on * the RCQ of the leading connection. * * RAMROD_CMD_ID_ETH_CFC_DEL * Used when tearing down a conneciton prior to driver unload. Completes * on the RCQ of the leading connection (since the current connection * has been completely removed from controller memory). * * RAMROD_CMD_ID_ETH_PORT_DEL * Used to tear down the leading connection prior to driver unload, * typically fp[0]. Completes as an index increment of the CSTORM on the * default status block. * * RAMROD_CMD_ID_ETH_FORWARD_SETUP * Used for connection offload. Completes on the RCQ of the multi-queue * RSS connection that is being offloaded. (Not currently used under * FreeBSD.) * * There can only be one command pending per function. * * Returns: * 0 = Success, !0 = Failure. */ /* must be called under the spq lock */ static inline struct eth_spe *bxe_sp_get_next(struct bxe_softc *sc) { struct eth_spe *next_spe = sc->spq_prod_bd; if (sc->spq_prod_bd == sc->spq_last_bd) { /* wrap back to the first eth_spq */ sc->spq_prod_bd = sc->spq; sc->spq_prod_idx = 0; } else { sc->spq_prod_bd++; sc->spq_prod_idx++; } return (next_spe); } /* must be called under the spq lock */ static inline void bxe_sp_prod_update(struct bxe_softc *sc) { int func = SC_FUNC(sc); /* * Make sure that BD data is updated before writing the producer. * BD data is written to the memory, the producer is read from the * memory, thus we need a full memory barrier to ensure the ordering. */ mb(); REG_WR16(sc, (BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func)), sc->spq_prod_idx); bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, BUS_SPACE_BARRIER_WRITE); } /** * bxe_is_contextless_ramrod - check if the current command ends on EQ * * @cmd: command to check * @cmd_type: command type */ static inline int bxe_is_contextless_ramrod(int cmd, int cmd_type) { if ((cmd_type == NONE_CONNECTION_TYPE) || (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) || (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) || (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) || (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) || (cmd == RAMROD_CMD_ID_ETH_SET_MAC) || (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE)) { return (TRUE); } else { return (FALSE); } } /** * bxe_sp_post - place a single command on an SP ring * * @sc: driver handle * @command: command to place (e.g. SETUP, FILTER_RULES, etc.) * @cid: SW CID the command is related to * @data_hi: command private data address (high 32 bits) * @data_lo: command private data address (low 32 bits) * @cmd_type: command type (e.g. NONE, ETH) * * SP data is handled as if it's always an address pair, thus data fields are * not swapped to little endian in upper functions. Instead this function swaps * data as if it's two uint32 fields. */ int bxe_sp_post(struct bxe_softc *sc, int command, int cid, uint32_t data_hi, uint32_t data_lo, int cmd_type) { struct eth_spe *spe; uint16_t type; int common; common = bxe_is_contextless_ramrod(command, cmd_type); BXE_SP_LOCK(sc); if (common) { if (!atomic_load_acq_long(&sc->eq_spq_left)) { BLOGE(sc, "EQ ring is full!\n"); BXE_SP_UNLOCK(sc); return (-1); } } else { if (!atomic_load_acq_long(&sc->cq_spq_left)) { BLOGE(sc, "SPQ ring is full!\n"); BXE_SP_UNLOCK(sc); return (-1); } } spe = bxe_sp_get_next(sc); /* CID needs port number to be encoded int it */ spe->hdr.conn_and_cmd_data = htole32((command << SPE_HDR_T_CMD_ID_SHIFT) | HW_CID(sc, cid)); type = (cmd_type << SPE_HDR_T_CONN_TYPE_SHIFT) & SPE_HDR_T_CONN_TYPE; /* TBD: Check if it works for VFs */ type |= ((SC_FUNC(sc) << SPE_HDR_T_FUNCTION_ID_SHIFT) & SPE_HDR_T_FUNCTION_ID); spe->hdr.type = htole16(type); spe->data.update_data_addr.hi = htole32(data_hi); spe->data.update_data_addr.lo = htole32(data_lo); /* * It's ok if the actual decrement is issued towards the memory * somewhere between the lock and unlock. Thus no more explict * memory barrier is needed. */ if (common) { atomic_subtract_acq_long(&sc->eq_spq_left, 1); } else { atomic_subtract_acq_long(&sc->cq_spq_left, 1); } BLOGD(sc, DBG_SP, "SPQE -> %#jx\n", (uintmax_t)sc->spq_dma.paddr); BLOGD(sc, DBG_SP, "FUNC_RDATA -> %p / %#jx\n", BXE_SP(sc, func_rdata), (uintmax_t)BXE_SP_MAPPING(sc, func_rdata)); BLOGD(sc, DBG_SP, "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)\n", sc->spq_prod_idx, (uint32_t)U64_HI(sc->spq_dma.paddr), (uint32_t)(U64_LO(sc->spq_dma.paddr) + (uint8_t *)sc->spq_prod_bd - (uint8_t *)sc->spq), command, common, HW_CID(sc, cid), data_hi, data_lo, type, atomic_load_acq_long(&sc->cq_spq_left), atomic_load_acq_long(&sc->eq_spq_left)); bxe_sp_prod_update(sc); BXE_SP_UNLOCK(sc); return (0); } /** * bxe_debug_print_ind_table - prints the indirection table configuration. * * @sc: driver hanlde * @p: pointer to rss configuration */ /* * FreeBSD Device probe function. * * Compares the device found to the driver's list of supported devices and * reports back to the bsd loader whether this is the right driver for the device. * This is the driver entry function called from the "kldload" command. * * Returns: * BUS_PROBE_DEFAULT on success, positive value on failure. */ static int bxe_probe(device_t dev) { struct bxe_device_type *t; char *descbuf; uint16_t did, sdid, svid, vid; /* Find our device structure */ t = bxe_devs; /* Get the data for the device to be probed. */ vid = pci_get_vendor(dev); did = pci_get_device(dev); svid = pci_get_subvendor(dev); sdid = pci_get_subdevice(dev); /* Look through the list of known devices for a match. */ while (t->bxe_name != NULL) { if ((vid == t->bxe_vid) && (did == t->bxe_did) && ((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) && ((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) { descbuf = malloc(BXE_DEVDESC_MAX, M_TEMP, M_NOWAIT); if (descbuf == NULL) return (ENOMEM); /* Print out the device identity. */ snprintf(descbuf, BXE_DEVDESC_MAX, "%s (%c%d) BXE v:%s\n", t->bxe_name, (((pci_read_config(dev, PCIR_REVID, 4) & 0xf0) >> 4) + 'A'), (pci_read_config(dev, PCIR_REVID, 4) & 0xf), BXE_DRIVER_VERSION); device_set_desc_copy(dev, descbuf); free(descbuf, M_TEMP); return (BUS_PROBE_DEFAULT); } t++; } return (ENXIO); } static void bxe_init_mutexes(struct bxe_softc *sc) { #ifdef BXE_CORE_LOCK_SX snprintf(sc->core_sx_name, sizeof(sc->core_sx_name), "bxe%d_core_lock", sc->unit); sx_init(&sc->core_sx, sc->core_sx_name); #else snprintf(sc->core_mtx_name, sizeof(sc->core_mtx_name), "bxe%d_core_lock", sc->unit); mtx_init(&sc->core_mtx, sc->core_mtx_name, NULL, MTX_DEF); #endif snprintf(sc->sp_mtx_name, sizeof(sc->sp_mtx_name), "bxe%d_sp_lock", sc->unit); mtx_init(&sc->sp_mtx, sc->sp_mtx_name, NULL, MTX_DEF); snprintf(sc->dmae_mtx_name, sizeof(sc->dmae_mtx_name), "bxe%d_dmae_lock", sc->unit); mtx_init(&sc->dmae_mtx, sc->dmae_mtx_name, NULL, MTX_DEF); snprintf(sc->port.phy_mtx_name, sizeof(sc->port.phy_mtx_name), "bxe%d_phy_lock", sc->unit); mtx_init(&sc->port.phy_mtx, sc->port.phy_mtx_name, NULL, MTX_DEF); snprintf(sc->fwmb_mtx_name, sizeof(sc->fwmb_mtx_name), "bxe%d_fwmb_lock", sc->unit); mtx_init(&sc->fwmb_mtx, sc->fwmb_mtx_name, NULL, MTX_DEF); snprintf(sc->print_mtx_name, sizeof(sc->print_mtx_name), "bxe%d_print_lock", sc->unit); mtx_init(&(sc->print_mtx), sc->print_mtx_name, NULL, MTX_DEF); snprintf(sc->stats_mtx_name, sizeof(sc->stats_mtx_name), "bxe%d_stats_lock", sc->unit); mtx_init(&(sc->stats_mtx), sc->stats_mtx_name, NULL, MTX_DEF); snprintf(sc->mcast_mtx_name, sizeof(sc->mcast_mtx_name), "bxe%d_mcast_lock", sc->unit); mtx_init(&(sc->mcast_mtx), sc->mcast_mtx_name, NULL, MTX_DEF); } static void bxe_release_mutexes(struct bxe_softc *sc) { #ifdef BXE_CORE_LOCK_SX sx_destroy(&sc->core_sx); #else if (mtx_initialized(&sc->core_mtx)) { mtx_destroy(&sc->core_mtx); } #endif if (mtx_initialized(&sc->sp_mtx)) { mtx_destroy(&sc->sp_mtx); } if (mtx_initialized(&sc->dmae_mtx)) { mtx_destroy(&sc->dmae_mtx); } if (mtx_initialized(&sc->port.phy_mtx)) { mtx_destroy(&sc->port.phy_mtx); } if (mtx_initialized(&sc->fwmb_mtx)) { mtx_destroy(&sc->fwmb_mtx); } if (mtx_initialized(&sc->print_mtx)) { mtx_destroy(&sc->print_mtx); } if (mtx_initialized(&sc->stats_mtx)) { mtx_destroy(&sc->stats_mtx); } if (mtx_initialized(&sc->mcast_mtx)) { mtx_destroy(&sc->mcast_mtx); } } static void bxe_tx_disable(struct bxe_softc* sc) { if_t ifp = sc->ifp; /* tell the stack the driver is stopped and TX queue is full */ if (ifp != NULL) { if_setdrvflags(ifp, 0); } } static void bxe_drv_pulse(struct bxe_softc *sc) { SHMEM_WR(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb, sc->fw_drv_pulse_wr_seq); } static inline uint16_t bxe_tx_avail(struct bxe_softc *sc, struct bxe_fastpath *fp) { int16_t used; uint16_t prod; uint16_t cons; prod = fp->tx_bd_prod; cons = fp->tx_bd_cons; used = SUB_S16(prod, cons); return (int16_t)(sc->tx_ring_size) - used; } static inline int bxe_tx_queue_has_work(struct bxe_fastpath *fp) { uint16_t hw_cons; mb(); /* status block fields can change */ hw_cons = le16toh(*fp->tx_cons_sb); return (hw_cons != fp->tx_pkt_cons); } static inline uint8_t bxe_has_tx_work(struct bxe_fastpath *fp) { /* expand this for multi-cos if ever supported */ return (bxe_tx_queue_has_work(fp)) ? TRUE : FALSE; } static inline int bxe_has_rx_work(struct bxe_fastpath *fp) { uint16_t rx_cq_cons_sb; mb(); /* status block fields can change */ rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb); if ((rx_cq_cons_sb & RCQ_MAX) == RCQ_MAX) rx_cq_cons_sb++; return (fp->rx_cq_cons != rx_cq_cons_sb); } static void bxe_sp_event(struct bxe_softc *sc, struct bxe_fastpath *fp, union eth_rx_cqe *rr_cqe) { int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX; struct ecore_queue_sp_obj *q_obj = &BXE_SP_OBJ(sc, fp).q_obj; BLOGD(sc, DBG_SP, "fp=%d cid=%d got ramrod #%d state is %x type is %d\n", fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type); switch (command) { case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): BLOGD(sc, DBG_SP, "got UPDATE ramrod. CID %d\n", cid); drv_cmd = ECORE_Q_CMD_UPDATE; break; case (RAMROD_CMD_ID_ETH_CLIENT_SETUP): BLOGD(sc, DBG_SP, "got MULTI[%d] setup ramrod\n", cid); drv_cmd = ECORE_Q_CMD_SETUP; break; case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP): BLOGD(sc, DBG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid); drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY; break; case (RAMROD_CMD_ID_ETH_HALT): BLOGD(sc, DBG_SP, "got MULTI[%d] halt ramrod\n", cid); drv_cmd = ECORE_Q_CMD_HALT; break; case (RAMROD_CMD_ID_ETH_TERMINATE): BLOGD(sc, DBG_SP, "got MULTI[%d] teminate ramrod\n", cid); drv_cmd = ECORE_Q_CMD_TERMINATE; break; case (RAMROD_CMD_ID_ETH_EMPTY): BLOGD(sc, DBG_SP, "got MULTI[%d] empty ramrod\n", cid); drv_cmd = ECORE_Q_CMD_EMPTY; break; default: BLOGD(sc, DBG_SP, "ERROR: unexpected MC reply (%d) on fp[%d]\n", command, fp->index); return; } if ((drv_cmd != ECORE_Q_CMD_MAX) && q_obj->complete_cmd(sc, q_obj, drv_cmd)) { /* * q_obj->complete_cmd() failure means that this was * an unexpected completion. * * In this case we don't want to increase the sc->spq_left * because apparently we haven't sent this command the first * place. */ // bxe_panic(sc, ("Unexpected SP completion\n")); return; } atomic_add_acq_long(&sc->cq_spq_left, 1); BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n", atomic_load_acq_long(&sc->cq_spq_left)); } /* * The current mbuf is part of an aggregation. Move the mbuf into the TPA * aggregation queue, put an empty mbuf back onto the receive chain, and mark * the current aggregation queue as in-progress. */ static void bxe_tpa_start(struct bxe_softc *sc, struct bxe_fastpath *fp, uint16_t queue, uint16_t cons, uint16_t prod, struct eth_fast_path_rx_cqe *cqe) { struct bxe_sw_rx_bd tmp_bd; struct bxe_sw_rx_bd *rx_buf; struct eth_rx_bd *rx_bd; int max_agg_queues; struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue]; uint16_t index; BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA START " "cons=%d prod=%d\n", fp->index, queue, cons, prod); max_agg_queues = MAX_AGG_QS(sc); KASSERT((queue < max_agg_queues), ("fp[%02d] invalid aggr queue (%d >= %d)!", fp->index, queue, max_agg_queues)); KASSERT((tpa_info->state == BXE_TPA_STATE_STOP), ("fp[%02d].tpa[%02d] starting aggr on queue not stopped!", fp->index, queue)); /* copy the existing mbuf and mapping from the TPA pool */ tmp_bd = tpa_info->bd; if (tmp_bd.m == NULL) { uint32_t *tmp; tmp = (uint32_t *)cqe; BLOGE(sc, "fp[%02d].tpa[%02d] cons[%d] prod[%d]mbuf not allocated!\n", fp->index, queue, cons, prod); BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n", *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7)); /* XXX Error handling? */ return; } /* change the TPA queue to the start state */ tpa_info->state = BXE_TPA_STATE_START; tpa_info->placement_offset = cqe->placement_offset; tpa_info->parsing_flags = le16toh(cqe->pars_flags.flags); tpa_info->vlan_tag = le16toh(cqe->vlan_tag); tpa_info->len_on_bd = le16toh(cqe->len_on_bd); fp->rx_tpa_queue_used |= (1 << queue); /* * If all the buffer descriptors are filled with mbufs then fill in * the current consumer index with a new BD. Else if a maximum Rx * buffer limit is imposed then fill in the next producer index. */ index = (sc->max_rx_bufs != RX_BD_USABLE) ? prod : cons; /* move the received mbuf and mapping to TPA pool */ tpa_info->bd = fp->rx_mbuf_chain[cons]; /* release any existing RX BD mbuf mappings */ if (cons != index) { rx_buf = &fp->rx_mbuf_chain[cons]; if (rx_buf->m_map != NULL) { bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); } /* * We get here when the maximum number of rx buffers is less than * RX_BD_USABLE. The mbuf is already saved above so it's OK to NULL * it out here without concern of a memory leak. */ fp->rx_mbuf_chain[cons].m = NULL; } /* update the Rx SW BD with the mbuf info from the TPA pool */ fp->rx_mbuf_chain[index] = tmp_bd; /* update the Rx BD with the empty mbuf phys address from the TPA pool */ rx_bd = &fp->rx_chain[index]; rx_bd->addr_hi = htole32(U64_HI(tpa_info->seg.ds_addr)); rx_bd->addr_lo = htole32(U64_LO(tpa_info->seg.ds_addr)); } /* * When a TPA aggregation is completed, loop through the individual mbufs * of the aggregation, combining them into a single mbuf which will be sent * up the stack. Refill all freed SGEs with mbufs as we go along. */ static int bxe_fill_frag_mbuf(struct bxe_softc *sc, struct bxe_fastpath *fp, struct bxe_sw_tpa_info *tpa_info, uint16_t queue, uint16_t pages, struct mbuf *m, struct eth_end_agg_rx_cqe *cqe, uint16_t cqe_idx) { struct mbuf *m_frag; uint32_t frag_len, frag_size, i; uint16_t sge_idx; int rc = 0; int j; frag_size = le16toh(cqe->pkt_len) - tpa_info->len_on_bd; BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill len_on_bd=%d frag_size=%d pages=%d\n", fp->index, queue, tpa_info->len_on_bd, frag_size, pages); /* make sure the aggregated frame is not too big to handle */ if (pages > 8 * PAGES_PER_SGE) { uint32_t *tmp = (uint32_t *)cqe; BLOGE(sc, "fp[%02d].sge[0x%04x] has too many pages (%d)! " "pkt_len=%d len_on_bd=%d frag_size=%d\n", fp->index, cqe_idx, pages, le16toh(cqe->pkt_len), tpa_info->len_on_bd, frag_size); BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n", *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7)); bxe_panic(sc, ("sge page count error\n")); return (EINVAL); } /* * Scan through the scatter gather list pulling individual mbufs into a * single mbuf for the host stack. */ for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) { sge_idx = RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[j])); /* * Firmware gives the indices of the SGE as if the ring is an array * (meaning that the "next" element will consume 2 indices). */ frag_len = min(frag_size, (uint32_t)(SGE_PAGES)); BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill i=%d j=%d " "sge_idx=%d frag_size=%d frag_len=%d\n", fp->index, queue, i, j, sge_idx, frag_size, frag_len); m_frag = fp->rx_sge_mbuf_chain[sge_idx].m; /* allocate a new mbuf for the SGE */ rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx); if (rc) { /* Leave all remaining SGEs in the ring! */ return (rc); } /* update the fragment length */ m_frag->m_len = frag_len; /* concatenate the fragment to the head mbuf */ m_cat(m, m_frag); fp->eth_q_stats.mbuf_alloc_sge--; /* update the TPA mbuf size and remaining fragment size */ m->m_pkthdr.len += frag_len; frag_size -= frag_len; } BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA fill done frag_size=%d\n", fp->index, queue, frag_size); return (rc); } static inline void bxe_clear_sge_mask_next_elems(struct bxe_fastpath *fp) { int i, j; for (i = 1; i <= RX_SGE_NUM_PAGES; i++) { int idx = RX_SGE_TOTAL_PER_PAGE * i - 1; for (j = 0; j < 2; j++) { BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx); idx--; } } } static inline void bxe_init_sge_ring_bit_mask(struct bxe_fastpath *fp) { /* set the mask to all 1's, it's faster to compare to 0 than to 0xf's */ memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask)); /* * Clear the two last indices in the page to 1. These are the indices that * correspond to the "next" element, hence will never be indicated and * should be removed from the calculations. */ bxe_clear_sge_mask_next_elems(fp); } static inline void bxe_update_last_max_sge(struct bxe_fastpath *fp, uint16_t idx) { uint16_t last_max = fp->last_max_sge; if (SUB_S16(idx, last_max) > 0) { fp->last_max_sge = idx; } } static inline void bxe_update_sge_prod(struct bxe_softc *sc, struct bxe_fastpath *fp, uint16_t sge_len, union eth_sgl_or_raw_data *cqe) { uint16_t last_max, last_elem, first_elem; uint16_t delta = 0; uint16_t i; if (!sge_len) { return; } /* first mark all used pages */ for (i = 0; i < sge_len; i++) { BIT_VEC64_CLEAR_BIT(fp->sge_mask, RX_SGE(le16toh(cqe->sgl[i]))); } BLOGD(sc, DBG_LRO, "fp[%02d] fp_cqe->sgl[%d] = %d\n", fp->index, sge_len - 1, le16toh(cqe->sgl[sge_len - 1])); /* assume that the last SGE index is the biggest */ bxe_update_last_max_sge(fp, le16toh(cqe->sgl[sge_len - 1])); last_max = RX_SGE(fp->last_max_sge); last_elem = last_max >> BIT_VEC64_ELEM_SHIFT; first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT; /* if ring is not full */ if (last_elem + 1 != first_elem) { last_elem++; } /* now update the prod */ for (i = first_elem; i != last_elem; i = RX_SGE_NEXT_MASK_ELEM(i)) { if (__predict_true(fp->sge_mask[i])) { break; } fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK; delta += BIT_VEC64_ELEM_SZ; } if (delta > 0) { fp->rx_sge_prod += delta; /* clear page-end entries */ bxe_clear_sge_mask_next_elems(fp); } BLOGD(sc, DBG_LRO, "fp[%02d] fp->last_max_sge=%d fp->rx_sge_prod=%d\n", fp->index, fp->last_max_sge, fp->rx_sge_prod); } /* * The aggregation on the current TPA queue has completed. Pull the individual * mbuf fragments together into a single mbuf, perform all necessary checksum * calculations, and send the resuting mbuf to the stack. */ static void bxe_tpa_stop(struct bxe_softc *sc, struct bxe_fastpath *fp, struct bxe_sw_tpa_info *tpa_info, uint16_t queue, uint16_t pages, struct eth_end_agg_rx_cqe *cqe, uint16_t cqe_idx) { if_t ifp = sc->ifp; struct mbuf *m; int rc = 0; BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] pad=%d pkt_len=%d pages=%d vlan=%d\n", fp->index, queue, tpa_info->placement_offset, le16toh(cqe->pkt_len), pages, tpa_info->vlan_tag); m = tpa_info->bd.m; /* allocate a replacement before modifying existing mbuf */ rc = bxe_alloc_rx_tpa_mbuf(fp, queue); if (rc) { /* drop the frame and log an error */ fp->eth_q_stats.rx_soft_errors++; goto bxe_tpa_stop_exit; } /* we have a replacement, fixup the current mbuf */ m_adj(m, tpa_info->placement_offset); m->m_pkthdr.len = m->m_len = tpa_info->len_on_bd; /* mark the checksums valid (taken care of by the firmware) */ fp->eth_q_stats.rx_ofld_frames_csum_ip++; fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++; m->m_pkthdr.csum_data = 0xffff; m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR); /* aggregate all of the SGEs into a single mbuf */ rc = bxe_fill_frag_mbuf(sc, fp, tpa_info, queue, pages, m, cqe, cqe_idx); if (rc) { /* drop the packet and log an error */ fp->eth_q_stats.rx_soft_errors++; m_freem(m); } else { if (tpa_info->parsing_flags & PARSING_FLAGS_INNER_VLAN_EXIST) { m->m_pkthdr.ether_vtag = tpa_info->vlan_tag; m->m_flags |= M_VLANTAG; } /* assign packet to this interface interface */ if_setrcvif(m, ifp); #if __FreeBSD_version >= 800000 /* specify what RSS queue was used for this flow */ m->m_pkthdr.flowid = fp->index; BXE_SET_FLOWID(m); #endif if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); fp->eth_q_stats.rx_tpa_pkts++; /* pass the frame to the stack */ if_input(ifp, m); } /* we passed an mbuf up the stack or dropped the frame */ fp->eth_q_stats.mbuf_alloc_tpa--; bxe_tpa_stop_exit: fp->rx_tpa_info[queue].state = BXE_TPA_STATE_STOP; fp->rx_tpa_queue_used &= ~(1 << queue); } static uint8_t bxe_service_rxsgl( struct bxe_fastpath *fp, uint16_t len, uint16_t lenonbd, struct mbuf *m, struct eth_fast_path_rx_cqe *cqe_fp) { struct mbuf *m_frag; uint16_t frags, frag_len; uint16_t sge_idx = 0; uint16_t j; uint8_t i, rc = 0; uint32_t frag_size; /* adjust the mbuf */ m->m_len = lenonbd; frag_size = len - lenonbd; frags = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; for (i = 0, j = 0; i < frags; i += PAGES_PER_SGE, j++) { sge_idx = RX_SGE(le16toh(cqe_fp->sgl_or_raw_data.sgl[j])); m_frag = fp->rx_sge_mbuf_chain[sge_idx].m; frag_len = min(frag_size, (uint32_t)(SGE_PAGE_SIZE)); m_frag->m_len = frag_len; /* allocate a new mbuf for the SGE */ rc = bxe_alloc_rx_sge_mbuf(fp, sge_idx); if (rc) { /* Leave all remaining SGEs in the ring! */ return (rc); } fp->eth_q_stats.mbuf_alloc_sge--; /* concatenate the fragment to the head mbuf */ m_cat(m, m_frag); frag_size -= frag_len; } bxe_update_sge_prod(fp->sc, fp, frags, &cqe_fp->sgl_or_raw_data); return rc; } static uint8_t bxe_rxeof(struct bxe_softc *sc, struct bxe_fastpath *fp) { if_t ifp = sc->ifp; uint16_t bd_cons, bd_prod, bd_prod_fw, comp_ring_cons; uint16_t hw_cq_cons, sw_cq_cons, sw_cq_prod; int rx_pkts = 0; int rc = 0; BXE_FP_RX_LOCK(fp); /* CQ "next element" is of the size of the regular element */ hw_cq_cons = le16toh(*fp->rx_cq_cons_sb); if ((hw_cq_cons & RCQ_USABLE_PER_PAGE) == RCQ_USABLE_PER_PAGE) { hw_cq_cons++; } bd_cons = fp->rx_bd_cons; bd_prod = fp->rx_bd_prod; bd_prod_fw = bd_prod; sw_cq_cons = fp->rx_cq_cons; sw_cq_prod = fp->rx_cq_prod; /* * Memory barrier necessary as speculative reads of the rx * buffer can be ahead of the index in the status block */ rmb(); BLOGD(sc, DBG_RX, "fp[%02d] Rx START hw_cq_cons=%u sw_cq_cons=%u\n", fp->index, hw_cq_cons, sw_cq_cons); while (sw_cq_cons != hw_cq_cons) { struct bxe_sw_rx_bd *rx_buf = NULL; union eth_rx_cqe *cqe; struct eth_fast_path_rx_cqe *cqe_fp; uint8_t cqe_fp_flags; enum eth_rx_cqe_type cqe_fp_type; uint16_t len, lenonbd, pad; struct mbuf *m = NULL; comp_ring_cons = RCQ(sw_cq_cons); bd_prod = RX_BD(bd_prod); bd_cons = RX_BD(bd_cons); cqe = &fp->rcq_chain[comp_ring_cons]; cqe_fp = &cqe->fast_path_cqe; cqe_fp_flags = cqe_fp->type_error_flags; cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE; BLOGD(sc, DBG_RX, "fp[%02d] Rx hw_cq_cons=%d hw_sw_cons=%d " "BD prod=%d cons=%d CQE type=0x%x err=0x%x " "status=0x%x rss_hash=0x%x vlan=0x%x len=%u lenonbd=%u\n", fp->index, hw_cq_cons, sw_cq_cons, bd_prod, bd_cons, CQE_TYPE(cqe_fp_flags), cqe_fp_flags, cqe_fp->status_flags, le32toh(cqe_fp->rss_hash_result), le16toh(cqe_fp->vlan_tag), le16toh(cqe_fp->pkt_len_or_gro_seg_len), le16toh(cqe_fp->len_on_bd)); /* is this a slowpath msg? */ if (__predict_false(CQE_TYPE_SLOW(cqe_fp_type))) { bxe_sp_event(sc, fp, cqe); goto next_cqe; } rx_buf = &fp->rx_mbuf_chain[bd_cons]; if (!CQE_TYPE_FAST(cqe_fp_type)) { struct bxe_sw_tpa_info *tpa_info; uint16_t frag_size, pages; uint8_t queue; if (CQE_TYPE_START(cqe_fp_type)) { bxe_tpa_start(sc, fp, cqe_fp->queue_index, bd_cons, bd_prod, cqe_fp); m = NULL; /* packet not ready yet */ goto next_rx; } KASSERT(CQE_TYPE_STOP(cqe_fp_type), ("CQE type is not STOP! (0x%x)\n", cqe_fp_type)); queue = cqe->end_agg_cqe.queue_index; tpa_info = &fp->rx_tpa_info[queue]; BLOGD(sc, DBG_LRO, "fp[%02d].tpa[%02d] TPA STOP\n", fp->index, queue); frag_size = (le16toh(cqe->end_agg_cqe.pkt_len) - tpa_info->len_on_bd); pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; bxe_tpa_stop(sc, fp, tpa_info, queue, pages, &cqe->end_agg_cqe, comp_ring_cons); bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe.sgl_or_raw_data); goto next_cqe; } /* non TPA */ /* is this an error packet? */ if (__predict_false(cqe_fp_flags & ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG)) { BLOGE(sc, "flags 0x%x rx packet %u\n", cqe_fp_flags, sw_cq_cons); fp->eth_q_stats.rx_soft_errors++; goto next_rx; } len = le16toh(cqe_fp->pkt_len_or_gro_seg_len); lenonbd = le16toh(cqe_fp->len_on_bd); pad = cqe_fp->placement_offset; m = rx_buf->m; if (__predict_false(m == NULL)) { BLOGE(sc, "No mbuf in rx chain descriptor %d for fp[%02d]\n", bd_cons, fp->index); goto next_rx; } /* XXX double copy if packet length under a threshold */ /* * If all the buffer descriptors are filled with mbufs then fill in * the current consumer index with a new BD. Else if a maximum Rx * buffer limit is imposed then fill in the next producer index. */ rc = bxe_alloc_rx_bd_mbuf(fp, bd_cons, (sc->max_rx_bufs != RX_BD_USABLE) ? bd_prod : bd_cons); if (rc != 0) { /* we simply reuse the received mbuf and don't post it to the stack */ m = NULL; BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n", fp->index, rc); fp->eth_q_stats.rx_soft_errors++; if (sc->max_rx_bufs != RX_BD_USABLE) { /* copy this consumer index to the producer index */ memcpy(&fp->rx_mbuf_chain[bd_prod], rx_buf, sizeof(struct bxe_sw_rx_bd)); memset(rx_buf, 0, sizeof(struct bxe_sw_rx_bd)); } goto next_rx; } /* current mbuf was detached from the bd */ fp->eth_q_stats.mbuf_alloc_rx--; /* we allocated a replacement mbuf, fixup the current one */ m_adj(m, pad); m->m_pkthdr.len = m->m_len = len; if ((len > 60) && (len > lenonbd)) { fp->eth_q_stats.rx_bxe_service_rxsgl++; rc = bxe_service_rxsgl(fp, len, lenonbd, m, cqe_fp); if (rc) break; fp->eth_q_stats.rx_jumbo_sge_pkts++; } else if (lenonbd < len) { fp->eth_q_stats.rx_erroneous_jumbo_sge_pkts++; } /* assign packet to this interface interface */ if_setrcvif(m, ifp); /* assume no hardware checksum has complated */ m->m_pkthdr.csum_flags = 0; /* validate checksum if offload enabled */ if (if_getcapenable(ifp) & IFCAP_RXCSUM) { /* check for a valid IP frame */ if (!(cqe->fast_path_cqe.status_flags & ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG)) { m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if (__predict_false(cqe_fp_flags & ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG)) { fp->eth_q_stats.rx_hw_csum_errors++; } else { fp->eth_q_stats.rx_ofld_frames_csum_ip++; m->m_pkthdr.csum_flags |= CSUM_IP_VALID; } } /* check for a valid TCP/UDP frame */ if (!(cqe->fast_path_cqe.status_flags & ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) { if (__predict_false(cqe_fp_flags & ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) { fp->eth_q_stats.rx_hw_csum_errors++; } else { fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++; m->m_pkthdr.csum_data = 0xFFFF; m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); } } } /* if there is a VLAN tag then flag that info */ if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_INNER_VLAN_EXIST) { m->m_pkthdr.ether_vtag = cqe->fast_path_cqe.vlan_tag; m->m_flags |= M_VLANTAG; } #if __FreeBSD_version >= 800000 /* specify what RSS queue was used for this flow */ m->m_pkthdr.flowid = fp->index; BXE_SET_FLOWID(m); #endif next_rx: bd_cons = RX_BD_NEXT(bd_cons); bd_prod = RX_BD_NEXT(bd_prod); bd_prod_fw = RX_BD_NEXT(bd_prod_fw); /* pass the frame to the stack */ if (__predict_true(m != NULL)) { if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); rx_pkts++; if_input(ifp, m); } next_cqe: sw_cq_prod = RCQ_NEXT(sw_cq_prod); sw_cq_cons = RCQ_NEXT(sw_cq_cons); /* limit spinning on the queue */ if (rc != 0) break; if (rx_pkts == sc->rx_budget) { fp->eth_q_stats.rx_budget_reached++; break; } } /* while work to do */ fp->rx_bd_cons = bd_cons; fp->rx_bd_prod = bd_prod_fw; fp->rx_cq_cons = sw_cq_cons; fp->rx_cq_prod = sw_cq_prod; /* Update producers */ bxe_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod, fp->rx_sge_prod); fp->eth_q_stats.rx_pkts += rx_pkts; fp->eth_q_stats.rx_calls++; BXE_FP_RX_UNLOCK(fp); return (sw_cq_cons != hw_cq_cons); } static uint16_t bxe_free_tx_pkt(struct bxe_softc *sc, struct bxe_fastpath *fp, uint16_t idx) { struct bxe_sw_tx_bd *tx_buf = &fp->tx_mbuf_chain[idx]; struct eth_tx_start_bd *tx_start_bd; uint16_t bd_idx = TX_BD(tx_buf->first_bd); uint16_t new_cons; int nbd; /* unmap the mbuf from non-paged memory */ bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); tx_start_bd = &fp->tx_chain[bd_idx].start_bd; nbd = le16toh(tx_start_bd->nbd) - 1; new_cons = (tx_buf->first_bd + nbd); /* free the mbuf */ if (__predict_true(tx_buf->m != NULL)) { m_freem(tx_buf->m); fp->eth_q_stats.mbuf_alloc_tx--; } else { fp->eth_q_stats.tx_chain_lost_mbuf++; } tx_buf->m = NULL; tx_buf->first_bd = 0; return (new_cons); } /* transmit timeout watchdog */ static int bxe_watchdog(struct bxe_softc *sc, struct bxe_fastpath *fp) { BXE_FP_TX_LOCK(fp); if ((fp->watchdog_timer == 0) || (--fp->watchdog_timer)) { BXE_FP_TX_UNLOCK(fp); return (0); } BLOGE(sc, "TX watchdog timeout on fp[%02d], resetting!\n", fp->index); BXE_FP_TX_UNLOCK(fp); BXE_SET_ERROR_BIT(sc, BXE_ERR_TXQ_STUCK); taskqueue_enqueue_timeout(taskqueue_thread, &sc->sp_err_timeout_task, hz/10); return (-1); } /* processes transmit completions */ static uint8_t bxe_txeof(struct bxe_softc *sc, struct bxe_fastpath *fp) { if_t ifp = sc->ifp; uint16_t bd_cons, hw_cons, sw_cons, pkt_cons; uint16_t tx_bd_avail; BXE_FP_TX_LOCK_ASSERT(fp); bd_cons = fp->tx_bd_cons; hw_cons = le16toh(*fp->tx_cons_sb); sw_cons = fp->tx_pkt_cons; while (sw_cons != hw_cons) { pkt_cons = TX_BD(sw_cons); BLOGD(sc, DBG_TX, "TX: fp[%d]: hw_cons=%u sw_cons=%u pkt_cons=%u\n", fp->index, hw_cons, sw_cons, pkt_cons); bd_cons = bxe_free_tx_pkt(sc, fp, pkt_cons); sw_cons++; } fp->tx_pkt_cons = sw_cons; fp->tx_bd_cons = bd_cons; BLOGD(sc, DBG_TX, "TX done: fp[%d]: hw_cons=%u sw_cons=%u sw_prod=%u\n", fp->index, hw_cons, fp->tx_pkt_cons, fp->tx_pkt_prod); mb(); tx_bd_avail = bxe_tx_avail(sc, fp); if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); } else { if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); } if (fp->tx_pkt_prod != fp->tx_pkt_cons) { /* reset the watchdog timer if there are pending transmits */ fp->watchdog_timer = BXE_TX_TIMEOUT; return (TRUE); } else { /* clear watchdog when there are no pending transmits */ fp->watchdog_timer = 0; return (FALSE); } } static void bxe_drain_tx_queues(struct bxe_softc *sc) { struct bxe_fastpath *fp; int i, count; /* wait until all TX fastpath tasks have completed */ for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; count = 1000; while (bxe_has_tx_work(fp)) { BXE_FP_TX_LOCK(fp); bxe_txeof(sc, fp); BXE_FP_TX_UNLOCK(fp); if (count == 0) { BLOGE(sc, "Timeout waiting for fp[%d] " "transmits to complete!\n", i); bxe_panic(sc, ("tx drain failure\n")); return; } count--; DELAY(1000); rmb(); } } return; } static int bxe_del_all_macs(struct bxe_softc *sc, struct ecore_vlan_mac_obj *mac_obj, int mac_type, uint8_t wait_for_comp) { unsigned long ramrod_flags = 0, vlan_mac_flags = 0; int rc; /* wait for completion of requested */ if (wait_for_comp) { bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); } /* Set the mac type of addresses we want to clear */ bxe_set_bit(mac_type, &vlan_mac_flags); rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags); if (rc < 0) { BLOGE(sc, "Failed to delete MACs (%d) mac_type %d wait_for_comp 0x%x\n", rc, mac_type, wait_for_comp); } return (rc); } static int bxe_fill_accept_flags(struct bxe_softc *sc, uint32_t rx_mode, unsigned long *rx_accept_flags, unsigned long *tx_accept_flags) { /* Clear the flags first */ *rx_accept_flags = 0; *tx_accept_flags = 0; switch (rx_mode) { case BXE_RX_MODE_NONE: /* * 'drop all' supersedes any accept flags that may have been * passed to the function. */ break; case BXE_RX_MODE_NORMAL: bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); bxe_set_bit(ECORE_ACCEPT_MULTICAST, rx_accept_flags); bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); /* internal switching mode */ bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); bxe_set_bit(ECORE_ACCEPT_MULTICAST, tx_accept_flags); bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); break; case BXE_RX_MODE_ALLMULTI: bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags); bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); /* internal switching mode */ bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags); bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); break; case BXE_RX_MODE_PROMISC: /* * According to deffinition of SI mode, iface in promisc mode * should receive matched and unmatched (in resolution of port) * unicast packets. */ bxe_set_bit(ECORE_ACCEPT_UNMATCHED, rx_accept_flags); bxe_set_bit(ECORE_ACCEPT_UNICAST, rx_accept_flags); bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, rx_accept_flags); bxe_set_bit(ECORE_ACCEPT_BROADCAST, rx_accept_flags); /* internal switching mode */ bxe_set_bit(ECORE_ACCEPT_ALL_MULTICAST, tx_accept_flags); bxe_set_bit(ECORE_ACCEPT_BROADCAST, tx_accept_flags); if (IS_MF_SI(sc)) { bxe_set_bit(ECORE_ACCEPT_ALL_UNICAST, tx_accept_flags); } else { bxe_set_bit(ECORE_ACCEPT_UNICAST, tx_accept_flags); } break; default: BLOGE(sc, "Unknown rx_mode (0x%x)\n", rx_mode); return (-1); } /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */ if (rx_mode != BXE_RX_MODE_NONE) { bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, rx_accept_flags); bxe_set_bit(ECORE_ACCEPT_ANY_VLAN, tx_accept_flags); } return (0); } static int bxe_set_q_rx_mode(struct bxe_softc *sc, uint8_t cl_id, unsigned long rx_mode_flags, unsigned long rx_accept_flags, unsigned long tx_accept_flags, unsigned long ramrod_flags) { struct ecore_rx_mode_ramrod_params ramrod_param; int rc; memset(&ramrod_param, 0, sizeof(ramrod_param)); /* Prepare ramrod parameters */ ramrod_param.cid = 0; ramrod_param.cl_id = cl_id; ramrod_param.rx_mode_obj = &sc->rx_mode_obj; ramrod_param.func_id = SC_FUNC(sc); ramrod_param.pstate = &sc->sp_state; ramrod_param.state = ECORE_FILTER_RX_MODE_PENDING; ramrod_param.rdata = BXE_SP(sc, rx_mode_rdata); ramrod_param.rdata_mapping = BXE_SP_MAPPING(sc, rx_mode_rdata); bxe_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); ramrod_param.ramrod_flags = ramrod_flags; ramrod_param.rx_mode_flags = rx_mode_flags; ramrod_param.rx_accept_flags = rx_accept_flags; ramrod_param.tx_accept_flags = tx_accept_flags; rc = ecore_config_rx_mode(sc, &ramrod_param); if (rc < 0) { BLOGE(sc, "Set rx_mode %d cli_id 0x%x rx_mode_flags 0x%x " "rx_accept_flags 0x%x tx_accept_flags 0x%x " "ramrod_flags 0x%x rc %d failed\n", sc->rx_mode, cl_id, (uint32_t)rx_mode_flags, (uint32_t)rx_accept_flags, (uint32_t)tx_accept_flags, (uint32_t)ramrod_flags, rc); return (rc); } return (0); } static int bxe_set_storm_rx_mode(struct bxe_softc *sc) { unsigned long rx_mode_flags = 0, ramrod_flags = 0; unsigned long rx_accept_flags = 0, tx_accept_flags = 0; int rc; rc = bxe_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags, &tx_accept_flags); if (rc) { return (rc); } bxe_set_bit(RAMROD_RX, &ramrod_flags); bxe_set_bit(RAMROD_TX, &ramrod_flags); /* XXX ensure all fastpath have same cl_id and/or move it to bxe_softc */ return (bxe_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags, rx_accept_flags, tx_accept_flags, ramrod_flags)); } /* returns the "mcp load_code" according to global load_count array */ static int bxe_nic_load_no_mcp(struct bxe_softc *sc) { int path = SC_PATH(sc); int port = SC_PORT(sc); BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n", path, load_count[path][0], load_count[path][1], load_count[path][2]); load_count[path][0]++; load_count[path][1 + port]++; BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n", path, load_count[path][0], load_count[path][1], load_count[path][2]); if (load_count[path][0] == 1) { return (FW_MSG_CODE_DRV_LOAD_COMMON); } else if (load_count[path][1 + port] == 1) { return (FW_MSG_CODE_DRV_LOAD_PORT); } else { return (FW_MSG_CODE_DRV_LOAD_FUNCTION); } } /* returns the "mcp load_code" according to global load_count array */ static int bxe_nic_unload_no_mcp(struct bxe_softc *sc) { int port = SC_PORT(sc); int path = SC_PATH(sc); BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n", path, load_count[path][0], load_count[path][1], load_count[path][2]); load_count[path][0]--; load_count[path][1 + port]--; BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n", path, load_count[path][0], load_count[path][1], load_count[path][2]); if (load_count[path][0] == 0) { return (FW_MSG_CODE_DRV_UNLOAD_COMMON); } else if (load_count[path][1 + port] == 0) { return (FW_MSG_CODE_DRV_UNLOAD_PORT); } else { return (FW_MSG_CODE_DRV_UNLOAD_FUNCTION); } } /* request unload mode from the MCP: COMMON, PORT or FUNCTION */ static uint32_t bxe_send_unload_req(struct bxe_softc *sc, int unload_mode) { uint32_t reset_code = 0; /* Select the UNLOAD request mode */ if (unload_mode == UNLOAD_NORMAL) { reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; } else { reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; } /* Send the request to the MCP */ if (!BXE_NOMCP(sc)) { reset_code = bxe_fw_command(sc, reset_code, 0); } else { reset_code = bxe_nic_unload_no_mcp(sc); } return (reset_code); } /* send UNLOAD_DONE command to the MCP */ static void bxe_send_unload_done(struct bxe_softc *sc, uint8_t keep_link) { uint32_t reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0; /* Report UNLOAD_DONE to MCP */ if (!BXE_NOMCP(sc)) { bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, reset_param); } } static int bxe_func_wait_started(struct bxe_softc *sc) { int tout = 50; if (!sc->port.pmf) { return (0); } /* * (assumption: No Attention from MCP at this stage) * PMF probably in the middle of TX disable/enable transaction * 1. Sync IRS for default SB * 2. Sync SP queue - this guarantees us that attention handling started * 3. Wait, that TX disable/enable transaction completes * * 1+2 guarantee that if DCBX attention was scheduled it already changed * pending bit of transaction from STARTED-->TX_STOPPED, if we already * received completion for the transaction the state is TX_STOPPED. * State will return to STARTED after completion of TX_STOPPED-->STARTED * transaction. */ /* XXX make sure default SB ISR is done */ /* need a way to synchronize an irq (intr_mtx?) */ /* XXX flush any work queues */ while (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED && tout--) { DELAY(20000); } if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) { /* * Failed to complete the transaction in a "good way" * Force both transactions with CLR bit. */ struct ecore_func_state_params func_params = { NULL }; BLOGE(sc, "Unexpected function state! " "Forcing STARTED-->TX_STOPPED-->STARTED\n"); func_params.f_obj = &sc->func_obj; bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); /* STARTED-->TX_STOPPED */ func_params.cmd = ECORE_F_CMD_TX_STOP; ecore_func_state_change(sc, &func_params); /* TX_STOPPED-->STARTED */ func_params.cmd = ECORE_F_CMD_TX_START; return (ecore_func_state_change(sc, &func_params)); } return (0); } static int bxe_stop_queue(struct bxe_softc *sc, int index) { struct bxe_fastpath *fp = &sc->fp[index]; struct ecore_queue_state_params q_params = { NULL }; int rc; BLOGD(sc, DBG_LOAD, "stopping queue %d cid %d\n", index, fp->index); q_params.q_obj = &sc->sp_objs[fp->index].q_obj; /* We want to wait for completion in this context */ bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); /* Stop the primary connection: */ /* ...halt the connection */ q_params.cmd = ECORE_Q_CMD_HALT; rc = ecore_queue_state_change(sc, &q_params); if (rc) { return (rc); } /* ...terminate the connection */ q_params.cmd = ECORE_Q_CMD_TERMINATE; memset(&q_params.params.terminate, 0, sizeof(q_params.params.terminate)); q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX; rc = ecore_queue_state_change(sc, &q_params); if (rc) { return (rc); } /* ...delete cfc entry */ q_params.cmd = ECORE_Q_CMD_CFC_DEL; memset(&q_params.params.cfc_del, 0, sizeof(q_params.params.cfc_del)); q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX; return (ecore_queue_state_change(sc, &q_params)); } /* wait for the outstanding SP commands */ static inline uint8_t bxe_wait_sp_comp(struct bxe_softc *sc, unsigned long mask) { unsigned long tmp; int tout = 5000; /* wait for 5 secs tops */ while (tout--) { mb(); if (!(atomic_load_acq_long(&sc->sp_state) & mask)) { return (TRUE); } DELAY(1000); } mb(); tmp = atomic_load_acq_long(&sc->sp_state); if (tmp & mask) { BLOGE(sc, "Filtering completion timed out: " "sp_state 0x%lx, mask 0x%lx\n", tmp, mask); return (FALSE); } return (FALSE); } static int bxe_func_stop(struct bxe_softc *sc) { struct ecore_func_state_params func_params = { NULL }; int rc; /* prepare parameters for function state transitions */ bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); func_params.f_obj = &sc->func_obj; func_params.cmd = ECORE_F_CMD_STOP; /* * Try to stop the function the 'good way'. If it fails (in case * of a parity error during bxe_chip_cleanup()) and we are * not in a debug mode, perform a state transaction in order to * enable further HW_RESET transaction. */ rc = ecore_func_state_change(sc, &func_params); if (rc) { BLOGE(sc, "FUNC_STOP ramrod failed. " "Running a dry transaction (%d)\n", rc); bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); return (ecore_func_state_change(sc, &func_params)); } return (0); } static int bxe_reset_hw(struct bxe_softc *sc, uint32_t load_code) { struct ecore_func_state_params func_params = { NULL }; /* Prepare parameters for function state transitions */ bxe_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); func_params.f_obj = &sc->func_obj; func_params.cmd = ECORE_F_CMD_HW_RESET; func_params.params.hw_init.load_phase = load_code; return (ecore_func_state_change(sc, &func_params)); } static void bxe_int_disable_sync(struct bxe_softc *sc, int disable_hw) { if (disable_hw) { /* prevent the HW from sending interrupts */ bxe_int_disable(sc); } /* XXX need a way to synchronize ALL irqs (intr_mtx?) */ /* make sure all ISRs are done */ /* XXX make sure sp_task is not running */ /* cancel and flush work queues */ } static void bxe_chip_cleanup(struct bxe_softc *sc, uint32_t unload_mode, uint8_t keep_link) { int port = SC_PORT(sc); struct ecore_mcast_ramrod_params rparam = { NULL }; uint32_t reset_code; int i, rc = 0; bxe_drain_tx_queues(sc); /* give HW time to discard old tx messages */ DELAY(1000); /* Clean all ETH MACs */ rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, FALSE); if (rc < 0) { BLOGE(sc, "Failed to delete all ETH MACs (%d)\n", rc); } /* Clean up UC list */ rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, TRUE); if (rc < 0) { BLOGE(sc, "Failed to delete UC MACs list (%d)\n", rc); } /* Disable LLH */ if (!CHIP_IS_E1(sc)) { REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0); } /* Set "drop all" to stop Rx */ /* * We need to take the BXE_MCAST_LOCK() here in order to prevent * a race between the completion code and this code. */ BXE_MCAST_LOCK(sc); if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) { bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state); } else { bxe_set_storm_rx_mode(sc); } /* Clean up multicast configuration */ rparam.mcast_obj = &sc->mcast_obj; rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); if (rc < 0) { BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc); } BXE_MCAST_UNLOCK(sc); // XXX bxe_iov_chip_cleanup(sc); /* * Send the UNLOAD_REQUEST to the MCP. This will return if * this function should perform FUNCTION, PORT, or COMMON HW * reset. */ reset_code = bxe_send_unload_req(sc, unload_mode); /* * (assumption: No Attention from MCP at this stage) * PMF probably in the middle of TX disable/enable transaction */ rc = bxe_func_wait_started(sc); if (rc) { BLOGE(sc, "bxe_func_wait_started failed (%d)\n", rc); } /* * Close multi and leading connections * Completions for ramrods are collected in a synchronous way */ for (i = 0; i < sc->num_queues; i++) { if (bxe_stop_queue(sc, i)) { goto unload_error; } } /* * If SP settings didn't get completed so far - something * very wrong has happen. */ if (!bxe_wait_sp_comp(sc, ~0x0UL)) { BLOGE(sc, "Common slow path ramrods got stuck!(%d)\n", rc); } unload_error: rc = bxe_func_stop(sc); if (rc) { BLOGE(sc, "Function stop failed!(%d)\n", rc); } /* disable HW interrupts */ bxe_int_disable_sync(sc, TRUE); /* detach interrupts */ bxe_interrupt_detach(sc); /* Reset the chip */ rc = bxe_reset_hw(sc, reset_code); if (rc) { BLOGE(sc, "Hardware reset failed(%d)\n", rc); } /* Report UNLOAD_DONE to MCP */ bxe_send_unload_done(sc, keep_link); } static void bxe_disable_close_the_gate(struct bxe_softc *sc) { uint32_t val; int port = SC_PORT(sc); BLOGD(sc, DBG_LOAD, "Disabling 'close the gates'\n"); if (CHIP_IS_E1(sc)) { uint32_t addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : MISC_REG_AEU_MASK_ATTN_FUNC_0; val = REG_RD(sc, addr); val &= ~(0x300); REG_WR(sc, addr, val); } else { val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK); val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK | MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK); REG_WR(sc, MISC_REG_AEU_GENERAL_MASK, val); } } /* * Cleans the object that have internal lists without sending * ramrods. Should be run when interrutps are disabled. */ static void bxe_squeeze_objects(struct bxe_softc *sc) { unsigned long ramrod_flags = 0, vlan_mac_flags = 0; struct ecore_mcast_ramrod_params rparam = { NULL }; struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj; int rc; /* Cleanup MACs' object first... */ /* Wait for completion of requested */ bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); /* Perform a dry cleanup */ bxe_set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags); /* Clean ETH primary MAC */ bxe_set_bit(ECORE_ETH_MAC, &vlan_mac_flags); rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags, &ramrod_flags); if (rc != 0) { BLOGE(sc, "Failed to clean ETH MACs (%d)\n", rc); } /* Cleanup UC list */ vlan_mac_flags = 0; bxe_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags); rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags); if (rc != 0) { BLOGE(sc, "Failed to clean UC list MACs (%d)\n", rc); } /* Now clean mcast object... */ rparam.mcast_obj = &sc->mcast_obj; bxe_set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags); /* Add a DEL command... */ rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); if (rc < 0) { BLOGE(sc, "Failed to send DEL MCAST command (%d)\n", rc); } /* now wait until all pending commands are cleared */ rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); while (rc != 0) { if (rc < 0) { BLOGE(sc, "Failed to clean MCAST object (%d)\n", rc); return; } rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); } } /* stop the controller */ static __noinline int bxe_nic_unload(struct bxe_softc *sc, uint32_t unload_mode, uint8_t keep_link) { uint8_t global = FALSE; uint32_t val; int i; BXE_CORE_LOCK_ASSERT(sc); if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING); for (i = 0; i < sc->num_queues; i++) { struct bxe_fastpath *fp; fp = &sc->fp[i]; fp->watchdog_timer = 0; BXE_FP_TX_LOCK(fp); BXE_FP_TX_UNLOCK(fp); } BLOGD(sc, DBG_LOAD, "Starting NIC unload...\n"); /* mark driver as unloaded in shmem2 */ if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) { val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]); SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)], val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2); } if (IS_PF(sc) && sc->recovery_state != BXE_RECOVERY_DONE && (sc->state == BXE_STATE_CLOSED || sc->state == BXE_STATE_ERROR)) { if(CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { /* * We can get here if the driver has been unloaded * during parity error recovery and is either waiting for a * leader to complete or for other functions to unload and * then ifconfig down has been issued. In this case we want to * unload and let other functions to complete a recovery * process. */ sc->recovery_state = BXE_RECOVERY_DONE; sc->is_leader = 0; bxe_release_leader_lock(sc); mb(); BLOGD(sc, DBG_LOAD, "Releasing a leadership...\n"); } BLOGE(sc, "Can't unload in closed or error state recover_state 0x%x" " state = 0x%x\n", sc->recovery_state, sc->state); return (-1); } /* * Nothing to do during unload if previous bxe_nic_load() * did not completed successfully - all resourses are released. */ if ((sc->state == BXE_STATE_CLOSED) || (sc->state == BXE_STATE_ERROR)) { return (0); } sc->state = BXE_STATE_CLOSING_WAITING_HALT; mb(); /* stop tx */ bxe_tx_disable(sc); sc->rx_mode = BXE_RX_MODE_NONE; /* XXX set rx mode ??? */ if (IS_PF(sc) && !sc->grcdump_done) { /* set ALWAYS_ALIVE bit in shmem */ sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; bxe_drv_pulse(sc); bxe_stats_handle(sc, STATS_EVENT_STOP); bxe_save_statistics(sc); } /* wait till consumers catch up with producers in all queues */ bxe_drain_tx_queues(sc); /* if VF indicate to PF this function is going down (PF will delete sp * elements and clear initializations */ if (IS_VF(sc)) { ; /* bxe_vfpf_close_vf(sc); */ } else if (unload_mode != UNLOAD_RECOVERY) { /* if this is a normal/close unload need to clean up chip */ if (!sc->grcdump_done) bxe_chip_cleanup(sc, unload_mode, keep_link); } else { /* Send the UNLOAD_REQUEST to the MCP */ bxe_send_unload_req(sc, unload_mode); /* * Prevent transactions to host from the functions on the * engine that doesn't reset global blocks in case of global * attention once gloabl blocks are reset and gates are opened * (the engine which leader will perform the recovery * last). */ if (!CHIP_IS_E1x(sc)) { bxe_pf_disable(sc); } /* disable HW interrupts */ bxe_int_disable_sync(sc, TRUE); /* detach interrupts */ bxe_interrupt_detach(sc); /* Report UNLOAD_DONE to MCP */ bxe_send_unload_done(sc, FALSE); } /* * At this stage no more interrupts will arrive so we may safely clean * the queue'able objects here in case they failed to get cleaned so far. */ if (IS_PF(sc)) { bxe_squeeze_objects(sc); } /* There should be no more pending SP commands at this stage */ sc->sp_state = 0; sc->port.pmf = 0; bxe_free_fp_buffers(sc); if (IS_PF(sc)) { bxe_free_mem(sc); } bxe_free_fw_stats_mem(sc); sc->state = BXE_STATE_CLOSED; /* * Check if there are pending parity attentions. If there are - set * RECOVERY_IN_PROGRESS. */ if (IS_PF(sc) && bxe_chk_parity_attn(sc, &global, FALSE)) { bxe_set_reset_in_progress(sc); /* Set RESET_IS_GLOBAL if needed */ if (global) { bxe_set_reset_global(sc); } } /* * The last driver must disable a "close the gate" if there is no * parity attention or "process kill" pending. */ if (IS_PF(sc) && !bxe_clear_pf_load(sc) && bxe_reset_is_done(sc, SC_PATH(sc))) { bxe_disable_close_the_gate(sc); } BLOGD(sc, DBG_LOAD, "Ended NIC unload\n"); bxe_link_report(sc); return (0); } /* * Called by the OS to set various media options (i.e. link, speed, etc.) when * the user runs "ifconfig bxe media ..." or "ifconfig bxe mediaopt ...". */ static int bxe_ifmedia_update(struct ifnet *ifp) { struct bxe_softc *sc = (struct bxe_softc *)if_getsoftc(ifp); struct ifmedia *ifm; ifm = &sc->ifmedia; /* We only support Ethernet media type. */ if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { return (EINVAL); } switch (IFM_SUBTYPE(ifm->ifm_media)) { case IFM_AUTO: break; case IFM_10G_CX4: case IFM_10G_SR: case IFM_10G_T: case IFM_10G_TWINAX: default: /* We don't support changing the media type. */ BLOGD(sc, DBG_LOAD, "Invalid media type (%d)\n", IFM_SUBTYPE(ifm->ifm_media)); return (EINVAL); } return (0); } /* * Called by the OS to get the current media status (i.e. link, speed, etc.). */ static void bxe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr) { struct bxe_softc *sc = if_getsoftc(ifp); /* Bug 165447: the 'ifconfig' tool skips printing of the "status: ..." line if the IFM_AVALID flag is *NOT* set. So we need to set this flag unconditionally (irrespective of the admininistrative 'up/down' state of the interface) to ensure that that line is always displayed. */ ifmr->ifm_status = IFM_AVALID; /* Setup the default interface info. */ ifmr->ifm_active = IFM_ETHER; /* Report link down if the driver isn't running. */ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { ifmr->ifm_active |= IFM_NONE; BLOGD(sc, DBG_PHY, "in %s : nic still not loaded fully\n", __func__); BLOGD(sc, DBG_PHY, "in %s : link_up (1) : %d\n", __func__, sc->link_vars.link_up); return; } if (sc->link_vars.link_up) { ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_active |= IFM_FDX; } else { ifmr->ifm_active |= IFM_NONE; BLOGD(sc, DBG_PHY, "in %s : setting IFM_NONE\n", __func__); return; } ifmr->ifm_active |= sc->media; return; } static void bxe_handle_chip_tq(void *context, int pending) { struct bxe_softc *sc = (struct bxe_softc *)context; long work = atomic_load_acq_long(&sc->chip_tq_flags); switch (work) { case CHIP_TQ_REINIT: if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { /* restart the interface */ BLOGD(sc, DBG_LOAD, "Restarting the interface...\n"); bxe_periodic_stop(sc); BXE_CORE_LOCK(sc); bxe_stop_locked(sc); bxe_init_locked(sc); BXE_CORE_UNLOCK(sc); } break; default: break; } } /* * Handles any IOCTL calls from the operating system. * * Returns: * 0 = Success, >0 Failure */ static int bxe_ioctl(if_t ifp, u_long command, caddr_t data) { struct bxe_softc *sc = if_getsoftc(ifp); struct ifreq *ifr = (struct ifreq *)data; int mask = 0; int reinit = 0; int error = 0; int mtu_min = (ETH_MIN_PACKET_SIZE - ETH_HLEN); int mtu_max = (MJUM9BYTES - ETH_OVERHEAD - IP_HEADER_ALIGNMENT_PADDING); switch (command) { case SIOCSIFMTU: BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMTU ioctl (mtu=%d)\n", ifr->ifr_mtu); if (sc->mtu == ifr->ifr_mtu) { /* nothing to change */ break; } if ((ifr->ifr_mtu < mtu_min) || (ifr->ifr_mtu > mtu_max)) { BLOGE(sc, "Unsupported MTU size %d (range is %d-%d)\n", ifr->ifr_mtu, mtu_min, mtu_max); error = EINVAL; break; } atomic_store_rel_int((volatile unsigned int *)&sc->mtu, (unsigned long)ifr->ifr_mtu); /* atomic_store_rel_long((volatile unsigned long *)&if_getmtu(ifp), (unsigned long)ifr->ifr_mtu); XXX - Not sure why it needs to be atomic */ if_setmtu(ifp, ifr->ifr_mtu); reinit = 1; break; case SIOCSIFFLAGS: /* toggle the interface state up or down */ BLOGD(sc, DBG_IOCTL, "Received SIOCSIFFLAGS ioctl\n"); BXE_CORE_LOCK(sc); /* check if the interface is up */ if (if_getflags(ifp) & IFF_UP) { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { /* set the receive mode flags */ bxe_set_rx_mode(sc); } else if(sc->state != BXE_STATE_DISABLED) { bxe_init_locked(sc); } } else { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { bxe_periodic_stop(sc); bxe_stop_locked(sc); } } BXE_CORE_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: /* add/delete multicast addresses */ BLOGD(sc, DBG_IOCTL, "Received SIOCADDMULTI/SIOCDELMULTI ioctl\n"); /* check if the interface is up */ if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { /* set the receive mode flags */ BXE_CORE_LOCK(sc); bxe_set_rx_mode(sc); BXE_CORE_UNLOCK(sc); } break; case SIOCSIFCAP: /* find out which capabilities have changed */ mask = (ifr->ifr_reqcap ^ if_getcapenable(ifp)); BLOGD(sc, DBG_IOCTL, "Received SIOCSIFCAP ioctl (mask=0x%08x)\n", mask); /* toggle the LRO capabilites enable flag */ if (mask & IFCAP_LRO) { if_togglecapenable(ifp, IFCAP_LRO); BLOGD(sc, DBG_IOCTL, "Turning LRO %s\n", (if_getcapenable(ifp) & IFCAP_LRO) ? "ON" : "OFF"); reinit = 1; } /* toggle the TXCSUM checksum capabilites enable flag */ if (mask & IFCAP_TXCSUM) { if_togglecapenable(ifp, IFCAP_TXCSUM); BLOGD(sc, DBG_IOCTL, "Turning TXCSUM %s\n", (if_getcapenable(ifp) & IFCAP_TXCSUM) ? "ON" : "OFF"); if (if_getcapenable(ifp) & IFCAP_TXCSUM) { if_sethwassistbits(ifp, (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO | CSUM_TCP_IPV6 | CSUM_UDP_IPV6), 0); } else { if_clearhwassist(ifp); /* XXX */ } } /* toggle the RXCSUM checksum capabilities enable flag */ if (mask & IFCAP_RXCSUM) { if_togglecapenable(ifp, IFCAP_RXCSUM); BLOGD(sc, DBG_IOCTL, "Turning RXCSUM %s\n", (if_getcapenable(ifp) & IFCAP_RXCSUM) ? "ON" : "OFF"); if (if_getcapenable(ifp) & IFCAP_RXCSUM) { if_sethwassistbits(ifp, (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO | CSUM_TCP_IPV6 | CSUM_UDP_IPV6), 0); } else { if_clearhwassist(ifp); /* XXX */ } } /* toggle TSO4 capabilities enabled flag */ if (mask & IFCAP_TSO4) { if_togglecapenable(ifp, IFCAP_TSO4); BLOGD(sc, DBG_IOCTL, "Turning TSO4 %s\n", (if_getcapenable(ifp) & IFCAP_TSO4) ? "ON" : "OFF"); } /* toggle TSO6 capabilities enabled flag */ if (mask & IFCAP_TSO6) { if_togglecapenable(ifp, IFCAP_TSO6); BLOGD(sc, DBG_IOCTL, "Turning TSO6 %s\n", (if_getcapenable(ifp) & IFCAP_TSO6) ? "ON" : "OFF"); } /* toggle VLAN_HWTSO capabilities enabled flag */ if (mask & IFCAP_VLAN_HWTSO) { if_togglecapenable(ifp, IFCAP_VLAN_HWTSO); BLOGD(sc, DBG_IOCTL, "Turning VLAN_HWTSO %s\n", (if_getcapenable(ifp) & IFCAP_VLAN_HWTSO) ? "ON" : "OFF"); } /* toggle VLAN_HWCSUM capabilities enabled flag */ if (mask & IFCAP_VLAN_HWCSUM) { /* XXX investigate this... */ BLOGE(sc, "Changing VLAN_HWCSUM is not supported!\n"); error = EINVAL; } /* toggle VLAN_MTU capabilities enable flag */ if (mask & IFCAP_VLAN_MTU) { /* XXX investigate this... */ BLOGE(sc, "Changing VLAN_MTU is not supported!\n"); error = EINVAL; } /* toggle VLAN_HWTAGGING capabilities enabled flag */ if (mask & IFCAP_VLAN_HWTAGGING) { /* XXX investigate this... */ BLOGE(sc, "Changing VLAN_HWTAGGING is not supported!\n"); error = EINVAL; } /* toggle VLAN_HWFILTER capabilities enabled flag */ if (mask & IFCAP_VLAN_HWFILTER) { /* XXX investigate this... */ BLOGE(sc, "Changing VLAN_HWFILTER is not supported!\n"); error = EINVAL; } /* XXX not yet... * IFCAP_WOL_MAGIC */ break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: /* set/get interface media */ BLOGD(sc, DBG_IOCTL, "Received SIOCSIFMEDIA/SIOCGIFMEDIA ioctl (cmd=%lu)\n", (command & 0xff)); error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); break; default: BLOGD(sc, DBG_IOCTL, "Received Unknown Ioctl (cmd=%lu)\n", (command & 0xff)); error = ether_ioctl(ifp, command, data); break; } if (reinit && (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) { BLOGD(sc, DBG_LOAD | DBG_IOCTL, "Re-initializing hardware from IOCTL change\n"); bxe_periodic_stop(sc); BXE_CORE_LOCK(sc); bxe_stop_locked(sc); bxe_init_locked(sc); BXE_CORE_UNLOCK(sc); } return (error); } static __noinline void bxe_dump_mbuf(struct bxe_softc *sc, struct mbuf *m, uint8_t contents) { char * type; int i = 0; if (!(sc->debug & DBG_MBUF)) { return; } if (m == NULL) { BLOGD(sc, DBG_MBUF, "mbuf: null pointer\n"); return; } while (m) { #if __FreeBSD_version >= 1000000 BLOGD(sc, DBG_MBUF, "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n", i, m, m->m_len, m->m_flags, M_FLAG_BITS, m->m_data); if (m->m_flags & M_PKTHDR) { BLOGD(sc, DBG_MBUF, "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n", i, m->m_pkthdr.len, m->m_flags, M_FLAG_BITS, (int)m->m_pkthdr.csum_flags, CSUM_BITS); } #else BLOGD(sc, DBG_MBUF, "%02d: mbuf=%p m_len=%d m_flags=0x%b m_data=%p\n", i, m, m->m_len, m->m_flags, "\20\1M_EXT\2M_PKTHDR\3M_EOR\4M_RDONLY", m->m_data); if (m->m_flags & M_PKTHDR) { BLOGD(sc, DBG_MBUF, "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n", i, m->m_pkthdr.len, m->m_flags, "\20\12M_BCAST\13M_MCAST\14M_FRAG" "\15M_FIRSTFRAG\16M_LASTFRAG\21M_VLANTAG" "\22M_PROMISC\23M_NOFREE", (int)m->m_pkthdr.csum_flags, "\20\1CSUM_IP\2CSUM_TCP\3CSUM_UDP\4CSUM_IP_FRAGS" "\5CSUM_FRAGMENT\6CSUM_TSO\11CSUM_IP_CHECKED" "\12CSUM_IP_VALID\13CSUM_DATA_VALID" "\14CSUM_PSEUDO_HDR"); } #endif /* #if __FreeBSD_version >= 1000000 */ if (m->m_flags & M_EXT) { switch (m->m_ext.ext_type) { case EXT_CLUSTER: type = "EXT_CLUSTER"; break; case EXT_SFBUF: type = "EXT_SFBUF"; break; case EXT_JUMBOP: type = "EXT_JUMBOP"; break; case EXT_JUMBO9: type = "EXT_JUMBO9"; break; case EXT_JUMBO16: type = "EXT_JUMBO16"; break; case EXT_PACKET: type = "EXT_PACKET"; break; case EXT_MBUF: type = "EXT_MBUF"; break; case EXT_NET_DRV: type = "EXT_NET_DRV"; break; case EXT_MOD_TYPE: type = "EXT_MOD_TYPE"; break; case EXT_DISPOSABLE: type = "EXT_DISPOSABLE"; break; case EXT_EXTREF: type = "EXT_EXTREF"; break; default: type = "UNKNOWN"; break; } BLOGD(sc, DBG_MBUF, "%02d: - m_ext: %p ext_size=%d type=%s\n", i, m->m_ext.ext_buf, m->m_ext.ext_size, type); } if (contents) { bxe_dump_mbuf_data(sc, "mbuf data", m, TRUE); } m = m->m_next; i++; } } /* * Checks to ensure the 13 bd sliding window is >= MSS for TSO. * Check that (13 total bds - 3 bds) = 10 bd window >= MSS. * The window: 3 bds are = 1 for headers BD + 2 for parse BD and last BD * The headers comes in a separate bd in FreeBSD so 13-3=10. * Returns: 0 if OK to send, 1 if packet needs further defragmentation */ static int bxe_chktso_window(struct bxe_softc *sc, int nsegs, bus_dma_segment_t *segs, struct mbuf *m) { uint32_t num_wnds, wnd_size, wnd_sum; int32_t frag_idx, wnd_idx; unsigned short lso_mss; int defrag; defrag = 0; wnd_sum = 0; wnd_size = 10; num_wnds = nsegs - wnd_size; lso_mss = htole16(m->m_pkthdr.tso_segsz); /* * Total header lengths Eth+IP+TCP in first FreeBSD mbuf so calculate the * first window sum of data while skipping the first assuming it is the * header in FreeBSD. */ for (frag_idx = 1; (frag_idx <= wnd_size); frag_idx++) { wnd_sum += htole16(segs[frag_idx].ds_len); } /* check the first 10 bd window size */ if (wnd_sum < lso_mss) { return (1); } /* run through the windows */ for (wnd_idx = 0; wnd_idx < num_wnds; wnd_idx++, frag_idx++) { /* subtract the first mbuf->m_len of the last wndw(-header) */ wnd_sum -= htole16(segs[wnd_idx+1].ds_len); /* add the next mbuf len to the len of our new window */ wnd_sum += htole16(segs[frag_idx].ds_len); if (wnd_sum < lso_mss) { return (1); } } return (0); } static uint8_t bxe_set_pbd_csum_e2(struct bxe_fastpath *fp, struct mbuf *m, uint32_t *parsing_data) { struct ether_vlan_header *eh = NULL; struct ip *ip4 = NULL; struct ip6_hdr *ip6 = NULL; caddr_t ip = NULL; struct tcphdr *th = NULL; int e_hlen, ip_hlen, l4_off; uint16_t proto; if (m->m_pkthdr.csum_flags == CSUM_IP) { /* no L4 checksum offload needed */ return (0); } /* get the Ethernet header */ eh = mtod(m, struct ether_vlan_header *); /* handle VLAN encapsulation if present */ if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); proto = ntohs(eh->evl_proto); } else { e_hlen = ETHER_HDR_LEN; proto = ntohs(eh->evl_encap_proto); } switch (proto) { case ETHERTYPE_IP: /* get the IP header, if mbuf len < 20 then header in next mbuf */ ip4 = (m->m_len < sizeof(struct ip)) ? (struct ip *)m->m_next->m_data : (struct ip *)(m->m_data + e_hlen); /* ip_hl is number of 32-bit words */ ip_hlen = (ip4->ip_hl << 2); ip = (caddr_t)ip4; break; case ETHERTYPE_IPV6: /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */ ip6 = (m->m_len < sizeof(struct ip6_hdr)) ? (struct ip6_hdr *)m->m_next->m_data : (struct ip6_hdr *)(m->m_data + e_hlen); /* XXX cannot support offload with IPv6 extensions */ ip_hlen = sizeof(struct ip6_hdr); ip = (caddr_t)ip6; break; default: /* We can't offload in this case... */ /* XXX error stat ??? */ return (0); } /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */ l4_off = (e_hlen + ip_hlen); *parsing_data |= (((l4_off >> 1) << ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) & ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W); if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_TSO | CSUM_TCP_IPV6)) { fp->eth_q_stats.tx_ofld_frames_csum_tcp++; th = (struct tcphdr *)(ip + ip_hlen); /* th_off is number of 32-bit words */ *parsing_data |= ((th->th_off << ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) & ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW); return (l4_off + (th->th_off << 2)); /* entire header length */ } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_UDP_IPV6)) { fp->eth_q_stats.tx_ofld_frames_csum_udp++; return (l4_off + sizeof(struct udphdr)); /* entire header length */ } else { /* XXX error stat ??? */ return (0); } } static uint8_t bxe_set_pbd_csum(struct bxe_fastpath *fp, struct mbuf *m, struct eth_tx_parse_bd_e1x *pbd) { struct ether_vlan_header *eh = NULL; struct ip *ip4 = NULL; struct ip6_hdr *ip6 = NULL; caddr_t ip = NULL; struct tcphdr *th = NULL; struct udphdr *uh = NULL; int e_hlen, ip_hlen; uint16_t proto; uint8_t hlen; uint16_t tmp_csum; uint32_t *tmp_uh; /* get the Ethernet header */ eh = mtod(m, struct ether_vlan_header *); /* handle VLAN encapsulation if present */ if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { e_hlen = (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); proto = ntohs(eh->evl_proto); } else { e_hlen = ETHER_HDR_LEN; proto = ntohs(eh->evl_encap_proto); } switch (proto) { case ETHERTYPE_IP: /* get the IP header, if mbuf len < 20 then header in next mbuf */ ip4 = (m->m_len < sizeof(struct ip)) ? (struct ip *)m->m_next->m_data : (struct ip *)(m->m_data + e_hlen); /* ip_hl is number of 32-bit words */ ip_hlen = (ip4->ip_hl << 1); ip = (caddr_t)ip4; break; case ETHERTYPE_IPV6: /* get the IPv6 header, if mbuf len < 40 then header in next mbuf */ ip6 = (m->m_len < sizeof(struct ip6_hdr)) ? (struct ip6_hdr *)m->m_next->m_data : (struct ip6_hdr *)(m->m_data + e_hlen); /* XXX cannot support offload with IPv6 extensions */ ip_hlen = (sizeof(struct ip6_hdr) >> 1); ip = (caddr_t)ip6; break; default: /* We can't offload in this case... */ /* XXX error stat ??? */ return (0); } hlen = (e_hlen >> 1); /* note that rest of global_data is indirectly zeroed here */ if (m->m_flags & M_VLANTAG) { pbd->global_data = htole16(hlen | (1 << ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT)); } else { pbd->global_data = htole16(hlen); } pbd->ip_hlen_w = ip_hlen; hlen += pbd->ip_hlen_w; /* XXX assuming L4 header is contiguous to IPv4/IPv6 in the same mbuf */ if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_TSO | CSUM_TCP_IPV6)) { th = (struct tcphdr *)(ip + (ip_hlen << 1)); /* th_off is number of 32-bit words */ hlen += (uint16_t)(th->th_off << 1); } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_UDP_IPV6)) { uh = (struct udphdr *)(ip + (ip_hlen << 1)); hlen += (sizeof(struct udphdr) / 2); } else { /* valid case as only CSUM_IP was set */ return (0); } pbd->total_hlen_w = htole16(hlen); if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_TSO | CSUM_TCP_IPV6)) { fp->eth_q_stats.tx_ofld_frames_csum_tcp++; pbd->tcp_pseudo_csum = ntohs(th->th_sum); } else if (m->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_UDP_IPV6)) { fp->eth_q_stats.tx_ofld_frames_csum_udp++; /* * Everest1 (i.e. 57710, 57711, 57711E) does not natively support UDP * checksums and does not know anything about the UDP header and where * the checksum field is located. It only knows about TCP. Therefore * we "lie" to the hardware for outgoing UDP packets w/ checksum * offload. Since the checksum field offset for TCP is 16 bytes and * for UDP it is 6 bytes we pass a pointer to the hardware that is 10 * bytes less than the start of the UDP header. This allows the * hardware to write the checksum in the correct spot. But the * hardware will compute a checksum which includes the last 10 bytes * of the IP header. To correct this we tweak the stack computed * pseudo checksum by folding in the calculation of the inverse * checksum for those final 10 bytes of the IP header. This allows * the correct checksum to be computed by the hardware. */ /* set pointer 10 bytes before UDP header */ tmp_uh = (uint32_t *)((uint8_t *)uh - 10); /* calculate a pseudo header checksum over the first 10 bytes */ tmp_csum = in_pseudo(*tmp_uh, *(tmp_uh + 1), *(uint16_t *)(tmp_uh + 2)); pbd->tcp_pseudo_csum = ntohs(in_addword(uh->uh_sum, ~tmp_csum)); } return (hlen * 2); /* entire header length, number of bytes */ } static void bxe_set_pbd_lso_e2(struct mbuf *m, uint32_t *parsing_data) { *parsing_data |= ((m->m_pkthdr.tso_segsz << ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) & ETH_TX_PARSE_BD_E2_LSO_MSS); /* XXX test for IPv6 with extension header... */ } static void bxe_set_pbd_lso(struct mbuf *m, struct eth_tx_parse_bd_e1x *pbd) { struct ether_vlan_header *eh = NULL; struct ip *ip = NULL; struct tcphdr *th = NULL; int e_hlen; /* get the Ethernet header */ eh = mtod(m, struct ether_vlan_header *); /* handle VLAN encapsulation if present */ e_hlen = (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ? (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) : ETHER_HDR_LEN; /* get the IP and TCP header, with LSO entire header in first mbuf */ /* XXX assuming IPv4 */ ip = (struct ip *)(m->m_data + e_hlen); th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); pbd->lso_mss = htole16(m->m_pkthdr.tso_segsz); pbd->tcp_send_seq = ntohl(th->th_seq); pbd->tcp_flags = ((ntohl(((uint32_t *)th)[3]) >> 16) & 0xff); #if 1 /* XXX IPv4 */ pbd->ip_id = ntohs(ip->ip_id); pbd->tcp_pseudo_csum = ntohs(in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htons(IPPROTO_TCP))); #else /* XXX IPv6 */ pbd->tcp_pseudo_csum = ntohs(in_pseudo(&ip6->ip6_src, &ip6->ip6_dst, htons(IPPROTO_TCP))); #endif pbd->global_data |= htole16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN); } /* * Encapsulte an mbuf cluster into the tx bd chain and makes the memory * visible to the controller. * * If an mbuf is submitted to this routine and cannot be given to the * controller (e.g. it has too many fragments) then the function may free * the mbuf and return to the caller. * * Returns: * 0 = Success, !0 = Failure * Note the side effect that an mbuf may be freed if it causes a problem. */ static int bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head) { bus_dma_segment_t segs[32]; struct mbuf *m0; struct bxe_sw_tx_bd *tx_buf; struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; /* struct eth_tx_parse_2nd_bd *pbd2 = NULL; */ struct eth_tx_bd *tx_data_bd; struct eth_tx_bd *tx_total_pkt_size_bd; struct eth_tx_start_bd *tx_start_bd; uint16_t bd_prod, pkt_prod, total_pkt_size; uint8_t mac_type; int defragged, error, nsegs, rc, nbds, vlan_off, ovlan; struct bxe_softc *sc; uint16_t tx_bd_avail; struct ether_vlan_header *eh; uint32_t pbd_e2_parsing_data = 0; uint8_t hlen = 0; int tmp_bd; int i; sc = fp->sc; #if __FreeBSD_version >= 800000 M_ASSERTPKTHDR(*m_head); #endif /* #if __FreeBSD_version >= 800000 */ m0 = *m_head; rc = defragged = nbds = ovlan = vlan_off = total_pkt_size = 0; tx_start_bd = NULL; tx_data_bd = NULL; tx_total_pkt_size_bd = NULL; /* get the H/W pointer for packets and BDs */ pkt_prod = fp->tx_pkt_prod; bd_prod = fp->tx_bd_prod; mac_type = UNICAST_ADDRESS; /* map the mbuf into the next open DMAable memory */ tx_buf = &fp->tx_mbuf_chain[TX_BD(pkt_prod)]; error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, tx_buf->m_map, m0, segs, &nsegs, BUS_DMA_NOWAIT); /* mapping errors */ if(__predict_false(error != 0)) { fp->eth_q_stats.tx_dma_mapping_failure++; if (error == ENOMEM) { /* resource issue, try again later */ rc = ENOMEM; } else if (error == EFBIG) { /* possibly recoverable with defragmentation */ fp->eth_q_stats.mbuf_defrag_attempts++; m0 = m_defrag(*m_head, M_NOWAIT); if (m0 == NULL) { fp->eth_q_stats.mbuf_defrag_failures++; rc = ENOBUFS; } else { /* defrag successful, try mapping again */ *m_head = m0; error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, tx_buf->m_map, m0, segs, &nsegs, BUS_DMA_NOWAIT); if (error) { fp->eth_q_stats.tx_dma_mapping_failure++; rc = error; } } } else { /* unknown, unrecoverable mapping error */ BLOGE(sc, "Unknown TX mapping error rc=%d\n", error); bxe_dump_mbuf(sc, m0, FALSE); rc = error; } goto bxe_tx_encap_continue; } tx_bd_avail = bxe_tx_avail(sc, fp); /* make sure there is enough room in the send queue */ if (__predict_false(tx_bd_avail < (nsegs + 2))) { /* Recoverable, try again later. */ fp->eth_q_stats.tx_hw_queue_full++; bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); rc = ENOMEM; goto bxe_tx_encap_continue; } /* capture the current H/W TX chain high watermark */ if (__predict_false(fp->eth_q_stats.tx_hw_max_queue_depth < (TX_BD_USABLE - tx_bd_avail))) { fp->eth_q_stats.tx_hw_max_queue_depth = (TX_BD_USABLE - tx_bd_avail); } /* make sure it fits in the packet window */ if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) { /* * The mbuf may be to big for the controller to handle. If the frame * is a TSO frame we'll need to do an additional check. */ if (m0->m_pkthdr.csum_flags & CSUM_TSO) { if (bxe_chktso_window(sc, nsegs, segs, m0) == 0) { goto bxe_tx_encap_continue; /* OK to send */ } else { fp->eth_q_stats.tx_window_violation_tso++; } } else { fp->eth_q_stats.tx_window_violation_std++; } /* lets try to defragment this mbuf and remap it */ fp->eth_q_stats.mbuf_defrag_attempts++; bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); m0 = m_defrag(*m_head, M_NOWAIT); if (m0 == NULL) { fp->eth_q_stats.mbuf_defrag_failures++; /* Ugh, just drop the frame... :( */ rc = ENOBUFS; } else { /* defrag successful, try mapping again */ *m_head = m0; error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag, tx_buf->m_map, m0, segs, &nsegs, BUS_DMA_NOWAIT); if (error) { fp->eth_q_stats.tx_dma_mapping_failure++; /* No sense in trying to defrag/copy chain, drop it. :( */ rc = error; } else { /* if the chain is still too long then drop it */ if(m0->m_pkthdr.csum_flags & CSUM_TSO) { /* * in case TSO is enabled nsegs should be checked against * BXE_TSO_MAX_SEGMENTS */ if (__predict_false(nsegs > BXE_TSO_MAX_SEGMENTS)) { bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); fp->eth_q_stats.nsegs_path1_errors++; rc = ENODEV; } } else { if (__predict_false(nsegs > BXE_MAX_SEGMENTS)) { bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map); fp->eth_q_stats.nsegs_path2_errors++; rc = ENODEV; } } } } } bxe_tx_encap_continue: /* Check for errors */ if (rc) { if (rc == ENOMEM) { /* recoverable try again later */ } else { fp->eth_q_stats.tx_soft_errors++; fp->eth_q_stats.mbuf_alloc_tx--; m_freem(*m_head); *m_head = NULL; } return (rc); } /* set flag according to packet type (UNICAST_ADDRESS is default) */ if (m0->m_flags & M_BCAST) { mac_type = BROADCAST_ADDRESS; } else if (m0->m_flags & M_MCAST) { mac_type = MULTICAST_ADDRESS; } /* store the mbuf into the mbuf ring */ tx_buf->m = m0; tx_buf->first_bd = fp->tx_bd_prod; tx_buf->flags = 0; /* prepare the first transmit (start) BD for the mbuf */ tx_start_bd = &fp->tx_chain[TX_BD(bd_prod)].start_bd; BLOGD(sc, DBG_TX, "sending pkt_prod=%u tx_buf=%p next_idx=%u bd=%u tx_start_bd=%p\n", pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd); tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr)); tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr)); tx_start_bd->nbytes = htole16(segs[0].ds_len); total_pkt_size += tx_start_bd->nbytes; tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; tx_start_bd->general_data = (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); /* all frames have at least Start BD + Parsing BD */ nbds = nsegs + 1; tx_start_bd->nbd = htole16(nbds); if (m0->m_flags & M_VLANTAG) { tx_start_bd->vlan_or_ethertype = htole16(m0->m_pkthdr.ether_vtag); tx_start_bd->bd_flags.as_bitfield |= (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); } else { /* vf tx, start bd must hold the ethertype for fw to enforce it */ if (IS_VF(sc)) { /* map ethernet header to find type and header length */ eh = mtod(m0, struct ether_vlan_header *); tx_start_bd->vlan_or_ethertype = eh->evl_encap_proto; } else { /* used by FW for packet accounting */ tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod); } } /* * add a parsing BD from the chain. The parsing BD is always added * though it is only used for TSO and chksum */ bd_prod = TX_BD_NEXT(bd_prod); if (m0->m_pkthdr.csum_flags) { if (m0->m_pkthdr.csum_flags & CSUM_IP) { fp->eth_q_stats.tx_ofld_frames_csum_ip++; tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM; } if (m0->m_pkthdr.csum_flags & CSUM_TCP_IPV6) { tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 | ETH_TX_BD_FLAGS_L4_CSUM); } else if (m0->m_pkthdr.csum_flags & CSUM_UDP_IPV6) { tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 | ETH_TX_BD_FLAGS_IS_UDP | ETH_TX_BD_FLAGS_L4_CSUM); } else if ((m0->m_pkthdr.csum_flags & CSUM_TCP) || (m0->m_pkthdr.csum_flags & CSUM_TSO)) { tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; } else if (m0->m_pkthdr.csum_flags & CSUM_UDP) { tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_L4_CSUM | ETH_TX_BD_FLAGS_IS_UDP); } } if (!CHIP_IS_E1x(sc)) { pbd_e2 = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e2; memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); if (m0->m_pkthdr.csum_flags) { hlen = bxe_set_pbd_csum_e2(fp, m0, &pbd_e2_parsing_data); } SET_FLAG(pbd_e2_parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type); } else { uint16_t global_data = 0; pbd_e1x = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e1x; memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); if (m0->m_pkthdr.csum_flags) { hlen = bxe_set_pbd_csum(fp, m0, pbd_e1x); } SET_FLAG(global_data, ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type); pbd_e1x->global_data |= htole16(global_data); } /* setup the parsing BD with TSO specific info */ if (m0->m_pkthdr.csum_flags & CSUM_TSO) { fp->eth_q_stats.tx_ofld_frames_lso++; tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; if (__predict_false(tx_start_bd->nbytes > hlen)) { fp->eth_q_stats.tx_ofld_frames_lso_hdr_splits++; /* split the first BD into header/data making the fw job easy */ nbds++; tx_start_bd->nbd = htole16(nbds); tx_start_bd->nbytes = htole16(hlen); bd_prod = TX_BD_NEXT(bd_prod); /* new transmit BD after the tx_parse_bd */ tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd; tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hlen)); tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hlen)); tx_data_bd->nbytes = htole16(segs[0].ds_len - hlen); if (tx_total_pkt_size_bd == NULL) { tx_total_pkt_size_bd = tx_data_bd; } BLOGD(sc, DBG_TX, "TSO split header size is %d (%x:%x) nbds %d\n", le16toh(tx_start_bd->nbytes), le32toh(tx_start_bd->addr_hi), le32toh(tx_start_bd->addr_lo), nbds); } if (!CHIP_IS_E1x(sc)) { bxe_set_pbd_lso_e2(m0, &pbd_e2_parsing_data); } else { bxe_set_pbd_lso(m0, pbd_e1x); } } if (pbd_e2_parsing_data) { pbd_e2->parsing_data = htole32(pbd_e2_parsing_data); } /* prepare remaining BDs, start tx bd contains first seg/frag */ for (i = 1; i < nsegs ; i++) { bd_prod = TX_BD_NEXT(bd_prod); tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd; tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr)); tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr)); tx_data_bd->nbytes = htole16(segs[i].ds_len); if (tx_total_pkt_size_bd == NULL) { tx_total_pkt_size_bd = tx_data_bd; } total_pkt_size += tx_data_bd->nbytes; } BLOGD(sc, DBG_TX, "last bd %p\n", tx_data_bd); if (tx_total_pkt_size_bd != NULL) { tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size; } if (__predict_false(sc->debug & DBG_TX)) { tmp_bd = tx_buf->first_bd; for (i = 0; i < nbds; i++) { if (i == 0) { BLOGD(sc, DBG_TX, "TX Strt: %p bd=%d nbd=%d vlan=0x%x " "bd_flags=0x%x hdr_nbds=%d\n", tx_start_bd, tmp_bd, le16toh(tx_start_bd->nbd), le16toh(tx_start_bd->vlan_or_ethertype), tx_start_bd->bd_flags.as_bitfield, (tx_start_bd->general_data & ETH_TX_START_BD_HDR_NBDS)); } else if (i == 1) { if (pbd_e1x) { BLOGD(sc, DBG_TX, "-> Prse: %p bd=%d global=0x%x ip_hlen_w=%u " "ip_id=%u lso_mss=%u tcp_flags=0x%x csum=0x%x " "tcp_seq=%u total_hlen_w=%u\n", pbd_e1x, tmp_bd, pbd_e1x->global_data, pbd_e1x->ip_hlen_w, pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags, pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq, le16toh(pbd_e1x->total_hlen_w)); } else { /* if (pbd_e2) */ BLOGD(sc, DBG_TX, "-> Parse: %p bd=%d dst=%02x:%02x:%02x " "src=%02x:%02x:%02x parsing_data=0x%x\n", pbd_e2, tmp_bd, pbd_e2->data.mac_addr.dst_hi, pbd_e2->data.mac_addr.dst_mid, pbd_e2->data.mac_addr.dst_lo, pbd_e2->data.mac_addr.src_hi, pbd_e2->data.mac_addr.src_mid, pbd_e2->data.mac_addr.src_lo, pbd_e2->parsing_data); } } if (i != 1) { /* skip parse db as it doesn't hold data */ tx_data_bd = &fp->tx_chain[TX_BD(tmp_bd)].reg_bd; BLOGD(sc, DBG_TX, "-> Frag: %p bd=%d nbytes=%d hi=0x%x lo: 0x%x\n", tx_data_bd, tmp_bd, le16toh(tx_data_bd->nbytes), le32toh(tx_data_bd->addr_hi), le32toh(tx_data_bd->addr_lo)); } tmp_bd = TX_BD_NEXT(tmp_bd); } } BLOGD(sc, DBG_TX, "doorbell: nbds=%d bd=%u\n", nbds, bd_prod); /* update TX BD producer index value for next TX */ bd_prod = TX_BD_NEXT(bd_prod); /* * If the chain of tx_bd's describing this frame is adjacent to or spans * an eth_tx_next_bd element then we need to increment the nbds value. */ if (TX_BD_IDX(bd_prod) < nbds) { nbds++; } /* don't allow reordering of writes for nbd and packets */ mb(); fp->tx_db.data.prod += nbds; /* producer points to the next free tx_bd at this point */ fp->tx_pkt_prod++; fp->tx_bd_prod = bd_prod; DOORBELL(sc, fp->index, fp->tx_db.raw); fp->eth_q_stats.tx_pkts++; /* Prevent speculative reads from getting ahead of the status block. */ bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, BUS_SPACE_BARRIER_READ); /* Prevent speculative reads from getting ahead of the doorbell. */ bus_space_barrier(sc->bar[BAR2].tag, sc->bar[BAR2].handle, 0, 0, BUS_SPACE_BARRIER_READ); return (0); } static void bxe_tx_start_locked(struct bxe_softc *sc, if_t ifp, struct bxe_fastpath *fp) { struct mbuf *m = NULL; int tx_count = 0; uint16_t tx_bd_avail; BXE_FP_TX_LOCK_ASSERT(fp); /* keep adding entries while there are frames to send */ while (!if_sendq_empty(ifp)) { /* * check for any frames to send * dequeue can still be NULL even if queue is not empty */ m = if_dequeue(ifp); if (__predict_false(m == NULL)) { break; } /* the mbuf now belongs to us */ fp->eth_q_stats.mbuf_alloc_tx++; /* * Put the frame into the transmit ring. If we don't have room, * place the mbuf back at the head of the TX queue, set the * OACTIVE flag, and wait for the NIC to drain the chain. */ if (__predict_false(bxe_tx_encap(fp, &m))) { fp->eth_q_stats.tx_encap_failures++; if (m != NULL) { /* mark the TX queue as full and return the frame */ if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); if_sendq_prepend(ifp, m); fp->eth_q_stats.mbuf_alloc_tx--; fp->eth_q_stats.tx_queue_xoff++; } /* stop looking for more work */ break; } /* the frame was enqueued successfully */ tx_count++; /* send a copy of the frame to any BPF listeners. */ if_etherbpfmtap(ifp, m); tx_bd_avail = bxe_tx_avail(sc, fp); /* handle any completions if we're running low */ if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */ bxe_txeof(sc, fp); if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) { break; } } } /* all TX packets were dequeued and/or the tx ring is full */ if (tx_count > 0) { /* reset the TX watchdog timeout timer */ fp->watchdog_timer = BXE_TX_TIMEOUT; } } /* Legacy (non-RSS) dispatch routine */ static void bxe_tx_start(if_t ifp) { struct bxe_softc *sc; struct bxe_fastpath *fp; sc = if_getsoftc(ifp); if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { BLOGW(sc, "Interface not running, ignoring transmit request\n"); return; } if (!sc->link_vars.link_up) { BLOGW(sc, "Interface link is down, ignoring transmit request\n"); return; } fp = &sc->fp[0]; if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) { fp->eth_q_stats.tx_queue_full_return++; return; } BXE_FP_TX_LOCK(fp); bxe_tx_start_locked(sc, ifp, fp); BXE_FP_TX_UNLOCK(fp); } #if __FreeBSD_version >= 901504 static int bxe_tx_mq_start_locked(struct bxe_softc *sc, if_t ifp, struct bxe_fastpath *fp, struct mbuf *m) { struct buf_ring *tx_br = fp->tx_br; struct mbuf *next; int depth, rc, tx_count; uint16_t tx_bd_avail; rc = tx_count = 0; BXE_FP_TX_LOCK_ASSERT(fp); if (sc->state != BXE_STATE_OPEN) { fp->eth_q_stats.bxe_tx_mq_sc_state_failures++; return ENETDOWN; } if (!tx_br) { BLOGE(sc, "Multiqueue TX and no buf_ring!\n"); return (EINVAL); } if (m != NULL) { rc = drbr_enqueue(ifp, tx_br, m); if (rc != 0) { fp->eth_q_stats.tx_soft_errors++; goto bxe_tx_mq_start_locked_exit; } } if (!sc->link_vars.link_up || !(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { fp->eth_q_stats.tx_request_link_down_failures++; goto bxe_tx_mq_start_locked_exit; } /* fetch the depth of the driver queue */ depth = drbr_inuse_drv(ifp, tx_br); if (depth > fp->eth_q_stats.tx_max_drbr_queue_depth) { fp->eth_q_stats.tx_max_drbr_queue_depth = depth; } /* keep adding entries while there are frames to send */ while ((next = drbr_peek(ifp, tx_br)) != NULL) { /* handle any completions if we're running low */ tx_bd_avail = bxe_tx_avail(sc, fp); if (tx_bd_avail < BXE_TX_CLEANUP_THRESHOLD) { /* bxe_txeof will set IFF_DRV_OACTIVE appropriately */ bxe_txeof(sc, fp); tx_bd_avail = bxe_tx_avail(sc, fp); if (tx_bd_avail < (BXE_TSO_MAX_SEGMENTS + 1)) { fp->eth_q_stats.bd_avail_too_less_failures++; m_freem(next); drbr_advance(ifp, tx_br); rc = ENOBUFS; break; } } /* the mbuf now belongs to us */ fp->eth_q_stats.mbuf_alloc_tx++; /* * Put the frame into the transmit ring. If we don't have room, * place the mbuf back at the head of the TX queue, set the * OACTIVE flag, and wait for the NIC to drain the chain. */ rc = bxe_tx_encap(fp, &next); if (__predict_false(rc != 0)) { fp->eth_q_stats.tx_encap_failures++; if (next != NULL) { /* mark the TX queue as full and save the frame */ if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); drbr_putback(ifp, tx_br, next); fp->eth_q_stats.mbuf_alloc_tx--; fp->eth_q_stats.tx_frames_deferred++; } else drbr_advance(ifp, tx_br); /* stop looking for more work */ break; } /* the transmit frame was enqueued successfully */ tx_count++; /* send a copy of the frame to any BPF listeners */ if_etherbpfmtap(ifp, next); drbr_advance(ifp, tx_br); } /* all TX packets were dequeued and/or the tx ring is full */ if (tx_count > 0) { /* reset the TX watchdog timeout timer */ fp->watchdog_timer = BXE_TX_TIMEOUT; } bxe_tx_mq_start_locked_exit: /* If we didn't drain the drbr, enqueue a task in the future to do it. */ if (!drbr_empty(ifp, tx_br)) { fp->eth_q_stats.tx_mq_not_empty++; taskqueue_enqueue_timeout(fp->tq, &fp->tx_timeout_task, 1); } return (rc); } static void bxe_tx_mq_start_deferred(void *arg, int pending) { struct bxe_fastpath *fp = (struct bxe_fastpath *)arg; struct bxe_softc *sc = fp->sc; if_t ifp = sc->ifp; BXE_FP_TX_LOCK(fp); bxe_tx_mq_start_locked(sc, ifp, fp, NULL); BXE_FP_TX_UNLOCK(fp); } /* Multiqueue (TSS) dispatch routine. */ static int bxe_tx_mq_start(struct ifnet *ifp, struct mbuf *m) { struct bxe_softc *sc = if_getsoftc(ifp); struct bxe_fastpath *fp; int fp_index, rc; fp_index = 0; /* default is the first queue */ /* check if flowid is set */ if (BXE_VALID_FLOWID(m)) fp_index = (m->m_pkthdr.flowid % sc->num_queues); fp = &sc->fp[fp_index]; if (sc->state != BXE_STATE_OPEN) { fp->eth_q_stats.bxe_tx_mq_sc_state_failures++; return ENETDOWN; } if (BXE_FP_TX_TRYLOCK(fp)) { rc = bxe_tx_mq_start_locked(sc, ifp, fp, m); BXE_FP_TX_UNLOCK(fp); } else { rc = drbr_enqueue(ifp, fp->tx_br, m); taskqueue_enqueue(fp->tq, &fp->tx_task); } return (rc); } static void bxe_mq_flush(struct ifnet *ifp) { struct bxe_softc *sc = if_getsoftc(ifp); struct bxe_fastpath *fp; struct mbuf *m; int i; for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; if (fp->state != BXE_FP_STATE_IRQ) { BLOGD(sc, DBG_LOAD, "Not clearing fp[%02d] buf_ring (state=%d)\n", fp->index, fp->state); continue; } if (fp->tx_br != NULL) { BLOGD(sc, DBG_LOAD, "Clearing fp[%02d] buf_ring\n", fp->index); BXE_FP_TX_LOCK(fp); while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) { m_freem(m); } BXE_FP_TX_UNLOCK(fp); } } if_qflush(ifp); } #endif /* FreeBSD_version >= 901504 */ static uint16_t bxe_cid_ilt_lines(struct bxe_softc *sc) { if (IS_SRIOV(sc)) { return ((BXE_FIRST_VF_CID + BXE_VF_CIDS) / ILT_PAGE_CIDS); } return (L2_ILT_LINES(sc)); } static void bxe_ilt_set_info(struct bxe_softc *sc) { struct ilt_client_info *ilt_client; struct ecore_ilt *ilt = sc->ilt; uint16_t line = 0; ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc)); BLOGD(sc, DBG_LOAD, "ilt starts at line %d\n", ilt->start_line); /* CDU */ ilt_client = &ilt->clients[ILT_CLIENT_CDU]; ilt_client->client_num = ILT_CLIENT_CDU; ilt_client->page_size = CDU_ILT_PAGE_SZ; ilt_client->flags = ILT_CLIENT_SKIP_MEM; ilt_client->start = line; line += bxe_cid_ilt_lines(sc); if (CNIC_SUPPORT(sc)) { line += CNIC_ILT_LINES; } ilt_client->end = (line - 1); BLOGD(sc, DBG_LOAD, "ilt client[CDU]: start %d, end %d, " "psz 0x%x, flags 0x%x, hw psz %d\n", ilt_client->start, ilt_client->end, ilt_client->page_size, ilt_client->flags, ilog2(ilt_client->page_size >> 12)); /* QM */ if (QM_INIT(sc->qm_cid_count)) { ilt_client = &ilt->clients[ILT_CLIENT_QM]; ilt_client->client_num = ILT_CLIENT_QM; ilt_client->page_size = QM_ILT_PAGE_SZ; ilt_client->flags = 0; ilt_client->start = line; /* 4 bytes for each cid */ line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4, QM_ILT_PAGE_SZ); ilt_client->end = (line - 1); BLOGD(sc, DBG_LOAD, "ilt client[QM]: start %d, end %d, " "psz 0x%x, flags 0x%x, hw psz %d\n", ilt_client->start, ilt_client->end, ilt_client->page_size, ilt_client->flags, ilog2(ilt_client->page_size >> 12)); } if (CNIC_SUPPORT(sc)) { /* SRC */ ilt_client = &ilt->clients[ILT_CLIENT_SRC]; ilt_client->client_num = ILT_CLIENT_SRC; ilt_client->page_size = SRC_ILT_PAGE_SZ; ilt_client->flags = 0; ilt_client->start = line; line += SRC_ILT_LINES; ilt_client->end = (line - 1); BLOGD(sc, DBG_LOAD, "ilt client[SRC]: start %d, end %d, " "psz 0x%x, flags 0x%x, hw psz %d\n", ilt_client->start, ilt_client->end, ilt_client->page_size, ilt_client->flags, ilog2(ilt_client->page_size >> 12)); /* TM */ ilt_client = &ilt->clients[ILT_CLIENT_TM]; ilt_client->client_num = ILT_CLIENT_TM; ilt_client->page_size = TM_ILT_PAGE_SZ; ilt_client->flags = 0; ilt_client->start = line; line += TM_ILT_LINES; ilt_client->end = (line - 1); BLOGD(sc, DBG_LOAD, "ilt client[TM]: start %d, end %d, " "psz 0x%x, flags 0x%x, hw psz %d\n", ilt_client->start, ilt_client->end, ilt_client->page_size, ilt_client->flags, ilog2(ilt_client->page_size >> 12)); } KASSERT((line <= ILT_MAX_LINES), ("Invalid number of ILT lines!")); } static void bxe_set_fp_rx_buf_size(struct bxe_softc *sc) { int i; uint32_t rx_buf_size; rx_buf_size = (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu); for (i = 0; i < sc->num_queues; i++) { if(rx_buf_size <= MCLBYTES){ sc->fp[i].rx_buf_size = rx_buf_size; sc->fp[i].mbuf_alloc_size = MCLBYTES; }else if (rx_buf_size <= MJUMPAGESIZE){ sc->fp[i].rx_buf_size = rx_buf_size; sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE; }else if (rx_buf_size <= (MJUMPAGESIZE + MCLBYTES)){ sc->fp[i].rx_buf_size = MCLBYTES; sc->fp[i].mbuf_alloc_size = MCLBYTES; }else if (rx_buf_size <= (2 * MJUMPAGESIZE)){ sc->fp[i].rx_buf_size = MJUMPAGESIZE; sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE; }else { sc->fp[i].rx_buf_size = MCLBYTES; sc->fp[i].mbuf_alloc_size = MCLBYTES; } } } static int bxe_alloc_ilt_mem(struct bxe_softc *sc) { int rc = 0; if ((sc->ilt = (struct ecore_ilt *)malloc(sizeof(struct ecore_ilt), M_BXE_ILT, (M_NOWAIT | M_ZERO))) == NULL) { rc = 1; } return (rc); } static int bxe_alloc_ilt_lines_mem(struct bxe_softc *sc) { int rc = 0; if ((sc->ilt->lines = (struct ilt_line *)malloc((sizeof(struct ilt_line) * ILT_MAX_LINES), M_BXE_ILT, (M_NOWAIT | M_ZERO))) == NULL) { rc = 1; } return (rc); } static void bxe_free_ilt_mem(struct bxe_softc *sc) { if (sc->ilt != NULL) { free(sc->ilt, M_BXE_ILT); sc->ilt = NULL; } } static void bxe_free_ilt_lines_mem(struct bxe_softc *sc) { if (sc->ilt->lines != NULL) { free(sc->ilt->lines, M_BXE_ILT); sc->ilt->lines = NULL; } } static void bxe_free_mem(struct bxe_softc *sc) { int i; for (i = 0; i < L2_ILT_LINES(sc); i++) { bxe_dma_free(sc, &sc->context[i].vcxt_dma); sc->context[i].vcxt = NULL; sc->context[i].size = 0; } ecore_ilt_mem_op(sc, ILT_MEMOP_FREE); bxe_free_ilt_lines_mem(sc); } static int bxe_alloc_mem(struct bxe_softc *sc) { int context_size; int allocated; int i; /* * Allocate memory for CDU context: * This memory is allocated separately and not in the generic ILT * functions because CDU differs in few aspects: * 1. There can be multiple entities allocating memory for context - * regular L2, CNIC, and SRIOV drivers. Each separately controls * its own ILT lines. * 2. Since CDU page-size is not a single 4KB page (which is the case * for the other ILT clients), to be efficient we want to support * allocation of sub-page-size in the last entry. * 3. Context pointers are used by the driver to pass to FW / update * the context (for the other ILT clients the pointers are used just to * free the memory during unload). */ context_size = (sizeof(union cdu_context) * BXE_L2_CID_COUNT(sc)); for (i = 0, allocated = 0; allocated < context_size; i++) { sc->context[i].size = min(CDU_ILT_PAGE_SZ, (context_size - allocated)); if (bxe_dma_alloc(sc, sc->context[i].size, &sc->context[i].vcxt_dma, "cdu context") != 0) { bxe_free_mem(sc); return (-1); } sc->context[i].vcxt = (union cdu_context *)sc->context[i].vcxt_dma.vaddr; allocated += sc->context[i].size; } bxe_alloc_ilt_lines_mem(sc); BLOGD(sc, DBG_LOAD, "ilt=%p start_line=%u lines=%p\n", sc->ilt, sc->ilt->start_line, sc->ilt->lines); { for (i = 0; i < 4; i++) { BLOGD(sc, DBG_LOAD, "c%d page_size=%u start=%u end=%u num=%u flags=0x%x\n", i, sc->ilt->clients[i].page_size, sc->ilt->clients[i].start, sc->ilt->clients[i].end, sc->ilt->clients[i].client_num, sc->ilt->clients[i].flags); } } if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) { BLOGE(sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed\n"); bxe_free_mem(sc); return (-1); } return (0); } static void bxe_free_rx_bd_chain(struct bxe_fastpath *fp) { struct bxe_softc *sc; int i; sc = fp->sc; if (fp->rx_mbuf_tag == NULL) { return; } /* free all mbufs and unload all maps */ for (i = 0; i < RX_BD_TOTAL; i++) { if (fp->rx_mbuf_chain[i].m_map != NULL) { bus_dmamap_sync(fp->rx_mbuf_tag, fp->rx_mbuf_chain[i].m_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_chain[i].m_map); } if (fp->rx_mbuf_chain[i].m != NULL) { m_freem(fp->rx_mbuf_chain[i].m); fp->rx_mbuf_chain[i].m = NULL; fp->eth_q_stats.mbuf_alloc_rx--; } } } static void bxe_free_tpa_pool(struct bxe_fastpath *fp) { struct bxe_softc *sc; int i, max_agg_queues; sc = fp->sc; if (fp->rx_mbuf_tag == NULL) { return; } max_agg_queues = MAX_AGG_QS(sc); /* release all mbufs and unload all DMA maps in the TPA pool */ for (i = 0; i < max_agg_queues; i++) { if (fp->rx_tpa_info[i].bd.m_map != NULL) { bus_dmamap_sync(fp->rx_mbuf_tag, fp->rx_tpa_info[i].bd.m_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_tpa_info[i].bd.m_map); } if (fp->rx_tpa_info[i].bd.m != NULL) { m_freem(fp->rx_tpa_info[i].bd.m); fp->rx_tpa_info[i].bd.m = NULL; fp->eth_q_stats.mbuf_alloc_tpa--; } } } static void bxe_free_sge_chain(struct bxe_fastpath *fp) { struct bxe_softc *sc; int i; sc = fp->sc; if (fp->rx_sge_mbuf_tag == NULL) { return; } /* rree all mbufs and unload all maps */ for (i = 0; i < RX_SGE_TOTAL; i++) { if (fp->rx_sge_mbuf_chain[i].m_map != NULL) { bus_dmamap_sync(fp->rx_sge_mbuf_tag, fp->rx_sge_mbuf_chain[i].m_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fp->rx_sge_mbuf_tag, fp->rx_sge_mbuf_chain[i].m_map); } if (fp->rx_sge_mbuf_chain[i].m != NULL) { m_freem(fp->rx_sge_mbuf_chain[i].m); fp->rx_sge_mbuf_chain[i].m = NULL; fp->eth_q_stats.mbuf_alloc_sge--; } } } static void bxe_free_fp_buffers(struct bxe_softc *sc) { struct bxe_fastpath *fp; int i; for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; #if __FreeBSD_version >= 901504 if (fp->tx_br != NULL) { /* just in case bxe_mq_flush() wasn't called */ if (mtx_initialized(&fp->tx_mtx)) { struct mbuf *m; BXE_FP_TX_LOCK(fp); while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) m_freem(m); BXE_FP_TX_UNLOCK(fp); } } #endif /* free all RX buffers */ bxe_free_rx_bd_chain(fp); bxe_free_tpa_pool(fp); bxe_free_sge_chain(fp); if (fp->eth_q_stats.mbuf_alloc_rx != 0) { BLOGE(sc, "failed to claim all rx mbufs (%d left)\n", fp->eth_q_stats.mbuf_alloc_rx); } if (fp->eth_q_stats.mbuf_alloc_sge != 0) { BLOGE(sc, "failed to claim all sge mbufs (%d left)\n", fp->eth_q_stats.mbuf_alloc_sge); } if (fp->eth_q_stats.mbuf_alloc_tpa != 0) { BLOGE(sc, "failed to claim all sge mbufs (%d left)\n", fp->eth_q_stats.mbuf_alloc_tpa); } if (fp->eth_q_stats.mbuf_alloc_tx != 0) { BLOGE(sc, "failed to release tx mbufs (%d left)\n", fp->eth_q_stats.mbuf_alloc_tx); } /* XXX verify all mbufs were reclaimed */ } } static int bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp, uint16_t prev_index, uint16_t index) { struct bxe_sw_rx_bd *rx_buf; struct eth_rx_bd *rx_bd; bus_dma_segment_t segs[1]; bus_dmamap_t map; struct mbuf *m; int nsegs, rc; rc = 0; /* allocate the new RX BD mbuf */ m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size); if (__predict_false(m == NULL)) { fp->eth_q_stats.mbuf_rx_bd_alloc_failed++; return (ENOBUFS); } fp->eth_q_stats.mbuf_alloc_rx++; /* initialize the mbuf buffer length */ m->m_pkthdr.len = m->m_len = fp->rx_buf_size; /* map the mbuf into non-paged pool */ rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (__predict_false(rc != 0)) { fp->eth_q_stats.mbuf_rx_bd_mapping_failed++; m_freem(m); fp->eth_q_stats.mbuf_alloc_rx--; return (rc); } /* all mbufs must map to a single segment */ KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); /* release any existing RX BD mbuf mappings */ if (prev_index != index) { rx_buf = &fp->rx_mbuf_chain[prev_index]; if (rx_buf->m_map != NULL) { bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); } /* * We only get here from bxe_rxeof() when the maximum number * of rx buffers is less than RX_BD_USABLE. bxe_rxeof() already * holds the mbuf in the prev_index so it's OK to NULL it out * here without concern of a memory leak. */ fp->rx_mbuf_chain[prev_index].m = NULL; } rx_buf = &fp->rx_mbuf_chain[index]; if (rx_buf->m_map != NULL) { bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map); } /* save the mbuf and mapping info for a future packet */ map = (prev_index != index) ? fp->rx_mbuf_chain[prev_index].m_map : rx_buf->m_map; rx_buf->m_map = fp->rx_mbuf_spare_map; fp->rx_mbuf_spare_map = map; bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map, BUS_DMASYNC_PREREAD); rx_buf->m = m; rx_bd = &fp->rx_chain[index]; rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr)); rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr)); return (rc); } static int bxe_alloc_rx_tpa_mbuf(struct bxe_fastpath *fp, int queue) { struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue]; bus_dma_segment_t segs[1]; bus_dmamap_t map; struct mbuf *m; int nsegs; int rc = 0; /* allocate the new TPA mbuf */ m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size); if (__predict_false(m == NULL)) { fp->eth_q_stats.mbuf_rx_tpa_alloc_failed++; return (ENOBUFS); } fp->eth_q_stats.mbuf_alloc_tpa++; /* initialize the mbuf buffer length */ m->m_pkthdr.len = m->m_len = fp->rx_buf_size; /* map the mbuf into non-paged pool */ rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag, fp->rx_tpa_info_mbuf_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (__predict_false(rc != 0)) { fp->eth_q_stats.mbuf_rx_tpa_mapping_failed++; m_free(m); fp->eth_q_stats.mbuf_alloc_tpa--; return (rc); } /* all mbufs must map to a single segment */ KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); /* release any existing TPA mbuf mapping */ if (tpa_info->bd.m_map != NULL) { bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fp->rx_mbuf_tag, tpa_info->bd.m_map); } /* save the mbuf and mapping info for the TPA mbuf */ map = tpa_info->bd.m_map; tpa_info->bd.m_map = fp->rx_tpa_info_mbuf_spare_map; fp->rx_tpa_info_mbuf_spare_map = map; bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map, BUS_DMASYNC_PREREAD); tpa_info->bd.m = m; tpa_info->seg = segs[0]; return (rc); } /* * Allocate an mbuf and assign it to the receive scatter gather chain. The * caller must take care to save a copy of the existing mbuf in the SG mbuf * chain. */ static int bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp, uint16_t index) { struct bxe_sw_rx_bd *sge_buf; struct eth_rx_sge *sge; bus_dma_segment_t segs[1]; bus_dmamap_t map; struct mbuf *m; int nsegs; int rc = 0; /* allocate a new SGE mbuf */ m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE); if (__predict_false(m == NULL)) { fp->eth_q_stats.mbuf_rx_sge_alloc_failed++; return (ENOMEM); } fp->eth_q_stats.mbuf_alloc_sge++; /* initialize the mbuf buffer length */ m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE; /* map the SGE mbuf into non-paged pool */ rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_mbuf_tag, fp->rx_sge_mbuf_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (__predict_false(rc != 0)) { fp->eth_q_stats.mbuf_rx_sge_mapping_failed++; m_freem(m); fp->eth_q_stats.mbuf_alloc_sge--; return (rc); } /* all mbufs must map to a single segment */ KASSERT((nsegs == 1), ("Too many segments, %d returned!", nsegs)); sge_buf = &fp->rx_sge_mbuf_chain[index]; /* release any existing SGE mbuf mapping */ if (sge_buf->m_map != NULL) { bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fp->rx_sge_mbuf_tag, sge_buf->m_map); } /* save the mbuf and mapping info for a future packet */ map = sge_buf->m_map; sge_buf->m_map = fp->rx_sge_mbuf_spare_map; fp->rx_sge_mbuf_spare_map = map; bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map, BUS_DMASYNC_PREREAD); sge_buf->m = m; sge = &fp->rx_sge_chain[index]; sge->addr_hi = htole32(U64_HI(segs[0].ds_addr)); sge->addr_lo = htole32(U64_LO(segs[0].ds_addr)); return (rc); } static __noinline int bxe_alloc_fp_buffers(struct bxe_softc *sc) { struct bxe_fastpath *fp; int i, j, rc = 0; int ring_prod, cqe_ring_prod; int max_agg_queues; for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; ring_prod = cqe_ring_prod = 0; fp->rx_bd_cons = 0; fp->rx_cq_cons = 0; /* allocate buffers for the RX BDs in RX BD chain */ for (j = 0; j < sc->max_rx_bufs; j++) { rc = bxe_alloc_rx_bd_mbuf(fp, ring_prod, ring_prod); if (rc != 0) { BLOGE(sc, "mbuf alloc fail for fp[%02d] rx chain (%d)\n", i, rc); goto bxe_alloc_fp_buffers_error; } ring_prod = RX_BD_NEXT(ring_prod); cqe_ring_prod = RCQ_NEXT(cqe_ring_prod); } fp->rx_bd_prod = ring_prod; fp->rx_cq_prod = cqe_ring_prod; fp->eth_q_stats.rx_calls = fp->eth_q_stats.rx_pkts = 0; max_agg_queues = MAX_AGG_QS(sc); fp->tpa_enable = TRUE; /* fill the TPA pool */ for (j = 0; j < max_agg_queues; j++) { rc = bxe_alloc_rx_tpa_mbuf(fp, j); if (rc != 0) { BLOGE(sc, "mbuf alloc fail for fp[%02d] TPA queue %d\n", i, j); fp->tpa_enable = FALSE; goto bxe_alloc_fp_buffers_error; } fp->rx_tpa_info[j].state = BXE_TPA_STATE_STOP; } if (fp->tpa_enable) { /* fill the RX SGE chain */ ring_prod = 0; for (j = 0; j < RX_SGE_USABLE; j++) { rc = bxe_alloc_rx_sge_mbuf(fp, ring_prod); if (rc != 0) { BLOGE(sc, "mbuf alloc fail for fp[%02d] SGE %d\n", i, ring_prod); fp->tpa_enable = FALSE; ring_prod = 0; goto bxe_alloc_fp_buffers_error; } ring_prod = RX_SGE_NEXT(ring_prod); } fp->rx_sge_prod = ring_prod; } } return (0); bxe_alloc_fp_buffers_error: /* unwind what was already allocated */ bxe_free_rx_bd_chain(fp); bxe_free_tpa_pool(fp); bxe_free_sge_chain(fp); return (ENOBUFS); } static void bxe_free_fw_stats_mem(struct bxe_softc *sc) { bxe_dma_free(sc, &sc->fw_stats_dma); sc->fw_stats_num = 0; sc->fw_stats_req_size = 0; sc->fw_stats_req = NULL; sc->fw_stats_req_mapping = 0; sc->fw_stats_data_size = 0; sc->fw_stats_data = NULL; sc->fw_stats_data_mapping = 0; } static int bxe_alloc_fw_stats_mem(struct bxe_softc *sc) { uint8_t num_queue_stats; int num_groups; /* number of queues for statistics is number of eth queues */ num_queue_stats = BXE_NUM_ETH_QUEUES(sc); /* * Total number of FW statistics requests = * 1 for port stats + 1 for PF stats + num of queues */ sc->fw_stats_num = (2 + num_queue_stats); /* * Request is built from stats_query_header and an array of * stats_query_cmd_group each of which contains STATS_QUERY_CMD_COUNT * rules. The real number or requests is configured in the * stats_query_header. */ num_groups = ((sc->fw_stats_num / STATS_QUERY_CMD_COUNT) + ((sc->fw_stats_num % STATS_QUERY_CMD_COUNT) ? 1 : 0)); BLOGD(sc, DBG_LOAD, "stats fw_stats_num %d num_groups %d\n", sc->fw_stats_num, num_groups); sc->fw_stats_req_size = (sizeof(struct stats_query_header) + (num_groups * sizeof(struct stats_query_cmd_group))); /* * Data for statistics requests + stats_counter. * stats_counter holds per-STORM counters that are incremented when * STORM has finished with the current request. Memory for FCoE * offloaded statistics are counted anyway, even if they will not be sent. * VF stats are not accounted for here as the data of VF stats is stored * in memory allocated by the VF, not here. */ sc->fw_stats_data_size = (sizeof(struct stats_counter) + sizeof(struct per_port_stats) + sizeof(struct per_pf_stats) + /* sizeof(struct fcoe_statistics_params) + */ (sizeof(struct per_queue_stats) * num_queue_stats)); if (bxe_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size), &sc->fw_stats_dma, "fw stats") != 0) { bxe_free_fw_stats_mem(sc); return (-1); } /* set up the shortcuts */ sc->fw_stats_req = (struct bxe_fw_stats_req *)sc->fw_stats_dma.vaddr; sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr; sc->fw_stats_data = (struct bxe_fw_stats_data *)((uint8_t *)sc->fw_stats_dma.vaddr + sc->fw_stats_req_size); sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr + sc->fw_stats_req_size); BLOGD(sc, DBG_LOAD, "statistics request base address set to %#jx\n", (uintmax_t)sc->fw_stats_req_mapping); BLOGD(sc, DBG_LOAD, "statistics data base address set to %#jx\n", (uintmax_t)sc->fw_stats_data_mapping); return (0); } /* * Bits map: * 0-7 - Engine0 load counter. * 8-15 - Engine1 load counter. * 16 - Engine0 RESET_IN_PROGRESS bit. * 17 - Engine1 RESET_IN_PROGRESS bit. * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active * function on the engine * 19 - Engine1 ONE_IS_LOADED. * 20 - Chip reset flow bit. When set none-leader must wait for both engines * leader to complete (check for both RESET_IN_PROGRESS bits and not * for just the one belonging to its engine). */ #define BXE_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1 #define BXE_PATH0_LOAD_CNT_MASK 0x000000ff #define BXE_PATH0_LOAD_CNT_SHIFT 0 #define BXE_PATH1_LOAD_CNT_MASK 0x0000ff00 #define BXE_PATH1_LOAD_CNT_SHIFT 8 #define BXE_PATH0_RST_IN_PROG_BIT 0x00010000 #define BXE_PATH1_RST_IN_PROG_BIT 0x00020000 #define BXE_GLOBAL_RESET_BIT 0x00040000 /* set the GLOBAL_RESET bit, should be run under rtnl lock */ static void bxe_set_reset_global(struct bxe_softc *sc) { uint32_t val; bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); REG_WR(sc, BXE_RECOVERY_GLOB_REG, val | BXE_GLOBAL_RESET_BIT); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); } /* clear the GLOBAL_RESET bit, should be run under rtnl lock */ static void bxe_clear_reset_global(struct bxe_softc *sc) { uint32_t val; bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); REG_WR(sc, BXE_RECOVERY_GLOB_REG, val & (~BXE_GLOBAL_RESET_BIT)); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); } /* checks the GLOBAL_RESET bit, should be run under rtnl lock */ static uint8_t bxe_reset_is_global(struct bxe_softc *sc) { uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); BLOGD(sc, DBG_LOAD, "GLOB_REG=0x%08x\n", val); return (val & BXE_GLOBAL_RESET_BIT) ? TRUE : FALSE; } /* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */ static void bxe_set_reset_done(struct bxe_softc *sc) { uint32_t val; uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT : BXE_PATH0_RST_IN_PROG_BIT; bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); /* Clear the bit */ val &= ~bit; REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); } /* set RESET_IN_PROGRESS for the engine, should be run under rtnl lock */ static void bxe_set_reset_in_progress(struct bxe_softc *sc) { uint32_t val; uint32_t bit = SC_PATH(sc) ? BXE_PATH1_RST_IN_PROG_BIT : BXE_PATH0_RST_IN_PROG_BIT; bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); /* Set the bit */ val |= bit; REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); } /* check RESET_IN_PROGRESS bit for an engine, should be run under rtnl lock */ static uint8_t bxe_reset_is_done(struct bxe_softc *sc, int engine) { uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); uint32_t bit = engine ? BXE_PATH1_RST_IN_PROG_BIT : BXE_PATH0_RST_IN_PROG_BIT; /* return false if bit is set */ return (val & bit) ? FALSE : TRUE; } /* get the load status for an engine, should be run under rtnl lock */ static uint8_t bxe_get_load_status(struct bxe_softc *sc, int engine) { uint32_t mask = engine ? BXE_PATH1_LOAD_CNT_MASK : BXE_PATH0_LOAD_CNT_MASK; uint32_t shift = engine ? BXE_PATH1_LOAD_CNT_SHIFT : BXE_PATH0_LOAD_CNT_SHIFT; uint32_t val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val); val = ((val & mask) >> shift); BLOGD(sc, DBG_LOAD, "Load mask engine %d = 0x%08x\n", engine, val); return (val != 0); } /* set pf load mark */ /* XXX needs to be under rtnl lock */ static void bxe_set_pf_load(struct bxe_softc *sc) { uint32_t val; uint32_t val1; uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK : BXE_PATH0_LOAD_CNT_MASK; uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT : BXE_PATH0_LOAD_CNT_SHIFT; bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); BLOGD(sc, DBG_LOAD, "Old value for GLOB_REG=0x%08x\n", val); /* get the current counter value */ val1 = ((val & mask) >> shift); /* set bit of this PF */ val1 |= (1 << SC_ABS_FUNC(sc)); /* clear the old value */ val &= ~mask; /* set the new one */ val |= ((val1 << shift) & mask); REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); } /* clear pf load mark */ /* XXX needs to be under rtnl lock */ static uint8_t bxe_clear_pf_load(struct bxe_softc *sc) { uint32_t val1, val; uint32_t mask = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_MASK : BXE_PATH0_LOAD_CNT_MASK; uint32_t shift = SC_PATH(sc) ? BXE_PATH1_LOAD_CNT_SHIFT : BXE_PATH0_LOAD_CNT_SHIFT; bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); val = REG_RD(sc, BXE_RECOVERY_GLOB_REG); BLOGD(sc, DBG_LOAD, "Old GEN_REG_VAL=0x%08x\n", val); /* get the current counter value */ val1 = (val & mask) >> shift; /* clear bit of that PF */ val1 &= ~(1 << SC_ABS_FUNC(sc)); /* clear the old value */ val &= ~mask; /* set the new one */ val |= ((val1 << shift) & mask); REG_WR(sc, BXE_RECOVERY_GLOB_REG, val); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG); return (val1 != 0); } /* send load requrest to mcp and analyze response */ static int bxe_nic_load_request(struct bxe_softc *sc, uint32_t *load_code) { /* init fw_seq */ sc->fw_seq = (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) & DRV_MSG_SEQ_NUMBER_MASK); BLOGD(sc, DBG_LOAD, "initial fw_seq 0x%04x\n", sc->fw_seq); /* get the current FW pulse sequence */ sc->fw_drv_pulse_wr_seq = (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_pulse_mb) & DRV_PULSE_SEQ_MASK); BLOGD(sc, DBG_LOAD, "initial drv_pulse 0x%04x\n", sc->fw_drv_pulse_wr_seq); /* load request */ (*load_code) = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ, DRV_MSG_CODE_LOAD_REQ_WITH_LFA); /* if the MCP fails to respond we must abort */ if (!(*load_code)) { BLOGE(sc, "MCP response failure!\n"); return (-1); } /* if MCP refused then must abort */ if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) { BLOGE(sc, "MCP refused load request\n"); return (-1); } return (0); } /* * Check whether another PF has already loaded FW to chip. In virtualized * environments a pf from anoth VM may have already initialized the device * including loading FW. */ static int bxe_nic_load_analyze_req(struct bxe_softc *sc, uint32_t load_code) { uint32_t my_fw, loaded_fw; /* is another pf loaded on this engine? */ if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { /* build my FW version dword */ my_fw = (BCM_5710_FW_MAJOR_VERSION + (BCM_5710_FW_MINOR_VERSION << 8 ) + (BCM_5710_FW_REVISION_VERSION << 16) + (BCM_5710_FW_ENGINEERING_VERSION << 24)); /* read loaded FW from chip */ loaded_fw = REG_RD(sc, XSEM_REG_PRAM); BLOGD(sc, DBG_LOAD, "loaded FW 0x%08x / my FW 0x%08x\n", loaded_fw, my_fw); /* abort nic load if version mismatch */ if (my_fw != loaded_fw) { BLOGE(sc, "FW 0x%08x already loaded (mine is 0x%08x)", loaded_fw, my_fw); return (-1); } } return (0); } /* mark PMF if applicable */ static void bxe_nic_load_pmf(struct bxe_softc *sc, uint32_t load_code) { uint32_t ncsi_oem_data_addr; if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) || (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) || (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) { /* * Barrier here for ordering between the writing to sc->port.pmf here * and reading it from the periodic task. */ sc->port.pmf = 1; mb(); } else { sc->port.pmf = 0; } BLOGD(sc, DBG_LOAD, "pmf %d\n", sc->port.pmf); /* XXX needed? */ if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) { if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) { ncsi_oem_data_addr = SHMEM2_RD(sc, ncsi_oem_data_addr); if (ncsi_oem_data_addr) { REG_WR(sc, (ncsi_oem_data_addr + offsetof(struct glob_ncsi_oem_data, driver_version)), 0); } } } } static void bxe_read_mf_cfg(struct bxe_softc *sc) { int n = (CHIP_IS_MODE_4_PORT(sc) ? 2 : 1); int abs_func; int vn; if (BXE_NOMCP(sc)) { return; /* what should be the default bvalue in this case */ } /* * The formula for computing the absolute function number is... * For 2 port configuration (4 functions per port): * abs_func = 2 * vn + SC_PORT + SC_PATH * For 4 port configuration (2 functions per port): * abs_func = 4 * vn + 2 * SC_PORT + SC_PATH */ for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { abs_func = (n * (2 * vn + SC_PORT(sc)) + SC_PATH(sc)); if (abs_func >= E1H_FUNC_MAX) { break; } sc->devinfo.mf_info.mf_config[vn] = MFCFG_RD(sc, func_mf_config[abs_func].config); } if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) { BLOGD(sc, DBG_LOAD, "mf_cfg function disabled\n"); sc->flags |= BXE_MF_FUNC_DIS; } else { BLOGD(sc, DBG_LOAD, "mf_cfg function enabled\n"); sc->flags &= ~BXE_MF_FUNC_DIS; } } /* acquire split MCP access lock register */ static int bxe_acquire_alr(struct bxe_softc *sc) { uint32_t j, val; for (j = 0; j < 1000; j++) { val = (1UL << 31); REG_WR(sc, GRCBASE_MCP + 0x9c, val); val = REG_RD(sc, GRCBASE_MCP + 0x9c); if (val & (1L << 31)) break; DELAY(5000); } if (!(val & (1L << 31))) { BLOGE(sc, "Cannot acquire MCP access lock register\n"); return (-1); } return (0); } /* release split MCP access lock register */ static void bxe_release_alr(struct bxe_softc *sc) { REG_WR(sc, GRCBASE_MCP + 0x9c, 0); } static void bxe_fan_failure(struct bxe_softc *sc) { int port = SC_PORT(sc); uint32_t ext_phy_config; /* mark the failure */ ext_phy_config = SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK; ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE; SHMEM_WR(sc, dev_info.port_hw_config[port].external_phy_config, ext_phy_config); /* log the failure */ BLOGW(sc, "Fan Failure has caused the driver to shutdown " "the card to prevent permanent damage. " "Please contact OEM Support for assistance\n"); /* XXX */ #if 1 bxe_panic(sc, ("Schedule task to handle fan failure\n")); #else /* * Schedule device reset (unload) * This is due to some boards consuming sufficient power when driver is * up to overheat if fan fails. */ bxe_set_bit(BXE_SP_RTNL_FAN_FAILURE, &sc->sp_rtnl_state); schedule_delayed_work(&sc->sp_rtnl_task, 0); #endif } /* this function is called upon a link interrupt */ static void bxe_link_attn(struct bxe_softc *sc) { uint32_t pause_enabled = 0; struct host_port_stats *pstats; int cmng_fns; struct bxe_fastpath *fp; int i; /* Make sure that we are synced with the current statistics */ bxe_stats_handle(sc, STATS_EVENT_STOP); BLOGD(sc, DBG_LOAD, "link_vars phy_flags : %x\n", sc->link_vars.phy_flags); elink_link_update(&sc->link_params, &sc->link_vars); if (sc->link_vars.link_up) { /* dropless flow control */ if (!CHIP_IS_E1(sc) && sc->dropless_fc) { pause_enabled = 0; if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { pause_enabled = 1; } REG_WR(sc, (BAR_USTRORM_INTMEM + USTORM_ETH_PAUSE_ENABLED_OFFSET(SC_PORT(sc))), pause_enabled); } if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) { pstats = BXE_SP(sc, port_stats); /* reset old mac stats */ memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx)); } if (sc->state == BXE_STATE_OPEN) { bxe_stats_handle(sc, STATS_EVENT_LINK_UP); /* Restart tx when the link comes back. */ FOR_EACH_ETH_QUEUE(sc, i) { fp = &sc->fp[i]; taskqueue_enqueue(fp->tq, &fp->tx_task); } } } if (sc->link_vars.link_up && sc->link_vars.line_speed) { cmng_fns = bxe_get_cmng_fns_mode(sc); if (cmng_fns != CMNG_FNS_NONE) { bxe_cmng_fns_init(sc, FALSE, cmng_fns); storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); } else { /* rate shaping and fairness are disabled */ BLOGD(sc, DBG_LOAD, "single function mode without fairness\n"); } } bxe_link_report_locked(sc); if (IS_MF(sc)) { ; // XXX bxe_link_sync_notify(sc); } } static void bxe_attn_int_asserted(struct bxe_softc *sc, uint32_t asserted) { int port = SC_PORT(sc); uint32_t aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : MISC_REG_AEU_MASK_ATTN_FUNC_0; uint32_t nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 : NIG_REG_MASK_INTERRUPT_PORT0; uint32_t aeu_mask; uint32_t nig_mask = 0; uint32_t reg_addr; uint32_t igu_acked; uint32_t cnt; if (sc->attn_state & asserted) { BLOGE(sc, "IGU ERROR attn=0x%08x\n", asserted); } bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); aeu_mask = REG_RD(sc, aeu_addr); BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly asserted 0x%08x\n", aeu_mask, asserted); aeu_mask &= ~(asserted & 0x3ff); BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask); REG_WR(sc, aeu_addr, aeu_mask); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state); sc->attn_state |= asserted; BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state); if (asserted & ATTN_HARD_WIRED_MASK) { if (asserted & ATTN_NIG_FOR_FUNC) { bxe_acquire_phy_lock(sc); /* save nig interrupt mask */ nig_mask = REG_RD(sc, nig_int_mask_addr); /* If nig_mask is not set, no need to call the update function */ if (nig_mask) { REG_WR(sc, nig_int_mask_addr, 0); bxe_link_attn(sc); } /* handle unicore attn? */ } if (asserted & ATTN_SW_TIMER_4_FUNC) { BLOGD(sc, DBG_INTR, "ATTN_SW_TIMER_4_FUNC!\n"); } if (asserted & GPIO_2_FUNC) { BLOGD(sc, DBG_INTR, "GPIO_2_FUNC!\n"); } if (asserted & GPIO_3_FUNC) { BLOGD(sc, DBG_INTR, "GPIO_3_FUNC!\n"); } if (asserted & GPIO_4_FUNC) { BLOGD(sc, DBG_INTR, "GPIO_4_FUNC!\n"); } if (port == 0) { if (asserted & ATTN_GENERAL_ATTN_1) { BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_1!\n"); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0); } if (asserted & ATTN_GENERAL_ATTN_2) { BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_2!\n"); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0); } if (asserted & ATTN_GENERAL_ATTN_3) { BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_3!\n"); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0); } } else { if (asserted & ATTN_GENERAL_ATTN_4) { BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_4!\n"); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0); } if (asserted & ATTN_GENERAL_ATTN_5) { BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_5!\n"); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0); } if (asserted & ATTN_GENERAL_ATTN_6) { BLOGD(sc, DBG_INTR, "ATTN_GENERAL_ATTN_6!\n"); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0); } } } /* hardwired */ if (sc->devinfo.int_block == INT_BLOCK_HC) { reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_SET); } else { reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8); } BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n", asserted, (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); REG_WR(sc, reg_addr, asserted); /* now set back the mask */ if (asserted & ATTN_NIG_FOR_FUNC) { /* * Verify that IGU ack through BAR was written before restoring * NIG mask. This loop should exit after 2-3 iterations max. */ if (sc->devinfo.int_block != INT_BLOCK_HC) { cnt = 0; do { igu_acked = REG_RD(sc, IGU_REG_ATTENTION_ACK_BITS); } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) && (++cnt < MAX_IGU_ATTN_ACK_TO)); if (!igu_acked) { BLOGE(sc, "Failed to verify IGU ack on time\n"); } mb(); } REG_WR(sc, nig_int_mask_addr, nig_mask); bxe_release_phy_lock(sc); } } static void bxe_print_next_block(struct bxe_softc *sc, int idx, const char *blk) { BLOGI(sc, "%s%s", idx ? ", " : "", blk); } static int bxe_check_blocks_with_parity0(struct bxe_softc *sc, uint32_t sig, int par_num, uint8_t print) { uint32_t cur_bit = 0; int i = 0; for (i = 0; sig; i++) { cur_bit = ((uint32_t)0x1 << i); if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "BRB"); break; case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "PARSER"); break; case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "TSDM"); break; case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "SEARCHER"); break; case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "TCM"); break; case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "TSEMI"); break; case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "XPB"); break; } /* Clear the bit */ sig &= ~cur_bit; } } return (par_num); } static int bxe_check_blocks_with_parity1(struct bxe_softc *sc, uint32_t sig, int par_num, uint8_t *global, uint8_t print) { int i = 0; uint32_t cur_bit = 0; for (i = 0; sig; i++) { cur_bit = ((uint32_t)0x1 << i); if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "PBF"); break; case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "QM"); break; case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "TM"); break; case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "XSDM"); break; case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "XCM"); break; case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "XSEMI"); break; case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "DOORBELLQ"); break; case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "NIG"); break; case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "VAUX PCI CORE"); *global = TRUE; break; case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "DEBUG"); break; case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "USDM"); break; case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "UCM"); break; case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "USEMI"); break; case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "UPB"); break; case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "CSDM"); break; case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "CCM"); break; } /* Clear the bit */ sig &= ~cur_bit; } } return (par_num); } static int bxe_check_blocks_with_parity2(struct bxe_softc *sc, uint32_t sig, int par_num, uint8_t print) { uint32_t cur_bit = 0; int i = 0; for (i = 0; sig; i++) { cur_bit = ((uint32_t)0x1 << i); if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "CSEMI"); break; case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "PXP"); break; case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "PXPPCICLOCKCLIENT"); break; case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "CFC"); break; case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "CDU"); break; case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "DMAE"); break; case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "IGU"); break; case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "MISC"); break; } /* Clear the bit */ sig &= ~cur_bit; } } return (par_num); } static int bxe_check_blocks_with_parity3(struct bxe_softc *sc, uint32_t sig, int par_num, uint8_t *global, uint8_t print) { uint32_t cur_bit = 0; int i = 0; for (i = 0; sig; i++) { cur_bit = ((uint32_t)0x1 << i); if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY: if (print) bxe_print_next_block(sc, par_num++, "MCP ROM"); *global = TRUE; break; case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY: if (print) bxe_print_next_block(sc, par_num++, "MCP UMP RX"); *global = TRUE; break; case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY: if (print) bxe_print_next_block(sc, par_num++, "MCP UMP TX"); *global = TRUE; break; case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: if (print) bxe_print_next_block(sc, par_num++, "MCP SCPAD"); *global = TRUE; break; } /* Clear the bit */ sig &= ~cur_bit; } } return (par_num); } static int bxe_check_blocks_with_parity4(struct bxe_softc *sc, uint32_t sig, int par_num, uint8_t print) { uint32_t cur_bit = 0; int i = 0; for (i = 0; sig; i++) { cur_bit = ((uint32_t)0x1 << i); if (sig & cur_bit) { switch (cur_bit) { case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "PGLUE_B"); break; case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR: if (print) bxe_print_next_block(sc, par_num++, "ATC"); break; } /* Clear the bit */ sig &= ~cur_bit; } } return (par_num); } static uint8_t bxe_parity_attn(struct bxe_softc *sc, uint8_t *global, uint8_t print, uint32_t *sig) { int par_num = 0; if ((sig[0] & HW_PRTY_ASSERT_SET_0) || (sig[1] & HW_PRTY_ASSERT_SET_1) || (sig[2] & HW_PRTY_ASSERT_SET_2) || (sig[3] & HW_PRTY_ASSERT_SET_3) || (sig[4] & HW_PRTY_ASSERT_SET_4)) { BLOGE(sc, "Parity error: HW block parity attention:\n" "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n", (uint32_t)(sig[0] & HW_PRTY_ASSERT_SET_0), (uint32_t)(sig[1] & HW_PRTY_ASSERT_SET_1), (uint32_t)(sig[2] & HW_PRTY_ASSERT_SET_2), (uint32_t)(sig[3] & HW_PRTY_ASSERT_SET_3), (uint32_t)(sig[4] & HW_PRTY_ASSERT_SET_4)); if (print) BLOGI(sc, "Parity errors detected in blocks: "); par_num = bxe_check_blocks_with_parity0(sc, sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print); par_num = bxe_check_blocks_with_parity1(sc, sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print); par_num = bxe_check_blocks_with_parity2(sc, sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print); par_num = bxe_check_blocks_with_parity3(sc, sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print); par_num = bxe_check_blocks_with_parity4(sc, sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print); if (print) BLOGI(sc, "\n"); if( *global == TRUE ) { BXE_SET_ERROR_BIT(sc, BXE_ERR_GLOBAL); } return (TRUE); } return (FALSE); } static uint8_t bxe_chk_parity_attn(struct bxe_softc *sc, uint8_t *global, uint8_t print) { struct attn_route attn = { {0} }; int port = SC_PORT(sc); if(sc->state != BXE_STATE_OPEN) return FALSE; attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); /* * Since MCP attentions can't be disabled inside the block, we need to * read AEU registers to see whether they're currently disabled */ attn.sig[3] &= ((REG_RD(sc, (!port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0 : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0)) & MISC_AEU_ENABLE_MCP_PRTY_BITS) | ~MISC_AEU_ENABLE_MCP_PRTY_BITS); if (!CHIP_IS_E1x(sc)) attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); return (bxe_parity_attn(sc, global, print, attn.sig)); } static void bxe_attn_int_deasserted4(struct bxe_softc *sc, uint32_t attn) { uint32_t val; boolean_t err_flg = FALSE; if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) { val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR); BLOGE(sc, "PGLUE hw attention 0x%08x\n", val); err_flg = TRUE; if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n"); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW) BLOGE(sc, "PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n"); } if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) { val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR); BLOGE(sc, "ATC hw attention 0x%08x\n", val); err_flg = TRUE; if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR) BLOGE(sc, "ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n"); if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND) BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n"); if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS) BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n"); if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT) BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n"); if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR) BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n"); if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU) BLOGE(sc, "ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n"); } if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) { BLOGE(sc, "FATAL parity attention set4 0x%08x\n", (uint32_t)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR))); err_flg = TRUE; } if (err_flg) { BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC); taskqueue_enqueue_timeout(taskqueue_thread, &sc->sp_err_timeout_task, hz/10); } } static void bxe_e1h_disable(struct bxe_softc *sc) { int port = SC_PORT(sc); bxe_tx_disable(sc); REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 0); } static void bxe_e1h_enable(struct bxe_softc *sc) { int port = SC_PORT(sc); REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1); // XXX bxe_tx_enable(sc); } /* * called due to MCP event (on pmf): * reread new bandwidth configuration * configure FW * notify others function about the change */ static void bxe_config_mf_bw(struct bxe_softc *sc) { if (sc->link_vars.link_up) { bxe_cmng_fns_init(sc, TRUE, CMNG_FNS_MINMAX); // XXX bxe_link_sync_notify(sc); } storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); } static void bxe_set_mf_bw(struct bxe_softc *sc) { bxe_config_mf_bw(sc); bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW_ACK, 0); } static void bxe_handle_eee_event(struct bxe_softc *sc) { BLOGD(sc, DBG_INTR, "EEE - LLDP event\n"); bxe_fw_command(sc, DRV_MSG_CODE_EEE_RESULTS_ACK, 0); } #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3 static void bxe_drv_info_ether_stat(struct bxe_softc *sc) { struct eth_stats_info *ether_stat = &sc->sp->drv_info_to_mcp.ether_stat; strlcpy(ether_stat->version, BXE_DRIVER_VERSION, ETH_STAT_INFO_VERSION_LEN); /* XXX (+ MAC_PAD) taken from other driver... verify this is right */ sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj, DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, ether_stat->mac_local + MAC_PAD, MAC_PAD, ETH_ALEN); ether_stat->mtu_size = sc->mtu; ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK; if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) { ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK; } // XXX ether_stat->feature_flags |= ???; ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0; ether_stat->txq_size = sc->tx_ring_size; ether_stat->rxq_size = sc->rx_ring_size; } static void bxe_handle_drv_info_req(struct bxe_softc *sc) { enum drv_info_opcode op_code; uint32_t drv_info_ctl = SHMEM2_RD(sc, drv_info_control); /* if drv_info version supported by MFW doesn't match - send NACK */ if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) { bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0); return; } op_code = ((drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >> DRV_INFO_CONTROL_OP_CODE_SHIFT); memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp)); switch (op_code) { case ETH_STATS_OPCODE: bxe_drv_info_ether_stat(sc); break; case FCOE_STATS_OPCODE: case ISCSI_STATS_OPCODE: default: /* if op code isn't supported - send NACK */ bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_NACK, 0); return; } /* * If we got drv_info attn from MFW then these fields are defined in * shmem2 for sure */ SHMEM2_WR(sc, drv_info_host_addr_lo, U64_LO(BXE_SP_MAPPING(sc, drv_info_to_mcp))); SHMEM2_WR(sc, drv_info_host_addr_hi, U64_HI(BXE_SP_MAPPING(sc, drv_info_to_mcp))); bxe_fw_command(sc, DRV_MSG_CODE_DRV_INFO_ACK, 0); } static void bxe_dcc_event(struct bxe_softc *sc, uint32_t dcc_event) { BLOGD(sc, DBG_INTR, "dcc_event 0x%08x\n", dcc_event); if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) { /* * This is the only place besides the function initialization * where the sc->flags can change so it is done without any * locks */ if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) { BLOGD(sc, DBG_INTR, "mf_cfg function disabled\n"); sc->flags |= BXE_MF_FUNC_DIS; bxe_e1h_disable(sc); } else { BLOGD(sc, DBG_INTR, "mf_cfg function enabled\n"); sc->flags &= ~BXE_MF_FUNC_DIS; bxe_e1h_enable(sc); } dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF; } if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { bxe_config_mf_bw(sc); dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; } /* Report results to MCP */ if (dcc_event) bxe_fw_command(sc, DRV_MSG_CODE_DCC_FAILURE, 0); else bxe_fw_command(sc, DRV_MSG_CODE_DCC_OK, 0); } static void bxe_pmf_update(struct bxe_softc *sc) { int port = SC_PORT(sc); uint32_t val; sc->port.pmf = 1; BLOGD(sc, DBG_INTR, "pmf %d\n", sc->port.pmf); /* * We need the mb() to ensure the ordering between the writing to * sc->port.pmf here and reading it from the bxe_periodic_task(). */ mb(); /* queue a periodic task */ // XXX schedule task... // XXX bxe_dcbx_pmf_update(sc); /* enable nig attention */ val = (0xff0f | (1 << (SC_VN(sc) + 4))); if (sc->devinfo.int_block == INT_BLOCK_HC) { REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, val); REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, val); } else if (!CHIP_IS_E1x(sc)) { REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val); REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val); } bxe_stats_handle(sc, STATS_EVENT_PMF); } static int bxe_mc_assert(struct bxe_softc *sc) { char last_idx; int i, rc = 0; uint32_t row0, row1, row2, row3; /* XSTORM */ last_idx = REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET); if (last_idx) BLOGE(sc, "XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); /* print the asserts */ for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { row0 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i)); row1 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 4); row2 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 8); row3 = REG_RD(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_OFFSET(i) + 12); if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { BLOGE(sc, "XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", i, row3, row2, row1, row0); rc++; } else { break; } } /* TSTORM */ last_idx = REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET); if (last_idx) { BLOGE(sc, "TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); } /* print the asserts */ for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { row0 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i)); row1 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 4); row2 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 8); row3 = REG_RD(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_OFFSET(i) + 12); if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { BLOGE(sc, "TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", i, row3, row2, row1, row0); rc++; } else { break; } } /* CSTORM */ last_idx = REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET); if (last_idx) { BLOGE(sc, "CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); } /* print the asserts */ for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { row0 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i)); row1 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 4); row2 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 8); row3 = REG_RD(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_OFFSET(i) + 12); if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { BLOGE(sc, "CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", i, row3, row2, row1, row0); rc++; } else { break; } } /* USTORM */ last_idx = REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET); if (last_idx) { BLOGE(sc, "USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx); } /* print the asserts */ for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) { row0 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i)); row1 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 4); row2 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 8); row3 = REG_RD(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_OFFSET(i) + 12); if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) { BLOGE(sc, "USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n", i, row3, row2, row1, row0); rc++; } else { break; } } return (rc); } static void bxe_attn_int_deasserted3(struct bxe_softc *sc, uint32_t attn) { int func = SC_FUNC(sc); uint32_t val; if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) { if (attn & BXE_PMF_LINK_ASSERT(sc)) { REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); bxe_read_mf_cfg(sc); sc->devinfo.mf_info.mf_config[SC_VN(sc)] = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); val = SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_status); if (val & DRV_STATUS_DCC_EVENT_MASK) bxe_dcc_event(sc, (val & DRV_STATUS_DCC_EVENT_MASK)); if (val & DRV_STATUS_SET_MF_BW) bxe_set_mf_bw(sc); if (val & DRV_STATUS_DRV_INFO_REQ) bxe_handle_drv_info_req(sc); if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF)) bxe_pmf_update(sc); if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS) bxe_handle_eee_event(sc); if (sc->link_vars.periodic_flags & ELINK_PERIODIC_FLAGS_LINK_EVENT) { /* sync with link */ bxe_acquire_phy_lock(sc); sc->link_vars.periodic_flags &= ~ELINK_PERIODIC_FLAGS_LINK_EVENT; bxe_release_phy_lock(sc); if (IS_MF(sc)) ; // XXX bxe_link_sync_notify(sc); bxe_link_report(sc); } /* * Always call it here: bxe_link_report() will * prevent the link indication duplication. */ bxe_link_status_update(sc); } else if (attn & BXE_MC_ASSERT_BITS) { BLOGE(sc, "MC assert!\n"); bxe_mc_assert(sc); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_8, 0); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_7, 0); bxe_int_disable(sc); BXE_SET_ERROR_BIT(sc, BXE_ERR_MC_ASSERT); taskqueue_enqueue_timeout(taskqueue_thread, &sc->sp_err_timeout_task, hz/10); } else if (attn & BXE_MCP_ASSERT) { BLOGE(sc, "MCP assert!\n"); REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0); BXE_SET_ERROR_BIT(sc, BXE_ERR_MCP_ASSERT); taskqueue_enqueue_timeout(taskqueue_thread, &sc->sp_err_timeout_task, hz/10); bxe_int_disable(sc); /*avoid repetive assert alert */ } else { BLOGE(sc, "Unknown HW assert! (attn 0x%08x)\n", attn); } } if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) { BLOGE(sc, "LATCHED attention 0x%08x (masked)\n", attn); if (attn & BXE_GRC_TIMEOUT) { val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN); BLOGE(sc, "GRC time-out 0x%08x\n", val); } if (attn & BXE_GRC_RSV) { val = CHIP_IS_E1(sc) ? 0 : REG_RD(sc, MISC_REG_GRC_RSV_ATTN); BLOGE(sc, "GRC reserved 0x%08x\n", val); } REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff); } } static void bxe_attn_int_deasserted2(struct bxe_softc *sc, uint32_t attn) { int port = SC_PORT(sc); int reg_offset; uint32_t val0, mask0, val1, mask1; uint32_t val; boolean_t err_flg = FALSE; if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) { val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR); BLOGE(sc, "CFC hw attention 0x%08x\n", val); /* CFC error attention */ if (val & 0x2) { BLOGE(sc, "FATAL error from CFC\n"); err_flg = TRUE; } } if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) { val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0); BLOGE(sc, "PXP hw attention-0 0x%08x\n", val); /* RQ_USDMDP_FIFO_OVERFLOW */ if (val & 0x18000) { BLOGE(sc, "FATAL error from PXP\n"); err_flg = TRUE; } if (!CHIP_IS_E1x(sc)) { val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1); BLOGE(sc, "PXP hw attention-1 0x%08x\n", val); err_flg = TRUE; } } #define PXP2_EOP_ERROR_BIT PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR #define AEU_PXP2_HW_INT_BIT AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT if (attn & AEU_PXP2_HW_INT_BIT) { /* CQ47854 workaround do not panic on * PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR */ if (!CHIP_IS_E1x(sc)) { mask0 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_0); val1 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_1); mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1); val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0); /* * If the only PXP2_EOP_ERROR_BIT is set in * STS0 and STS1 - clear it * * probably we lose additional attentions between * STS0 and STS_CLR0, in this case user will not * be notified about them */ if (val0 & mask0 & PXP2_EOP_ERROR_BIT && !(val1 & mask1)) val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0); /* print the register, since no one can restore it */ BLOGE(sc, "PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x\n", val0); /* * if PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR * then notify */ if (val0 & PXP2_EOP_ERROR_BIT) { BLOGE(sc, "PXP2_WR_PGLUE_EOP_ERROR\n"); err_flg = TRUE; /* * if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is * set then clear attention from PXP2 block without panic */ if (((val0 & mask0) == PXP2_EOP_ERROR_BIT) && ((val1 & mask1) == 0)) attn &= ~AEU_PXP2_HW_INT_BIT; } } } if (attn & HW_INTERRUT_ASSERT_SET_2) { reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 : MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2); val = REG_RD(sc, reg_offset); val &= ~(attn & HW_INTERRUT_ASSERT_SET_2); REG_WR(sc, reg_offset, val); BLOGE(sc, "FATAL HW block attention set2 0x%x\n", (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_2)); err_flg = TRUE; bxe_panic(sc, ("HW block attention set2\n")); } if(err_flg) { BXE_SET_ERROR_BIT(sc, BXE_ERR_GLOBAL); taskqueue_enqueue_timeout(taskqueue_thread, &sc->sp_err_timeout_task, hz/10); } } static void bxe_attn_int_deasserted1(struct bxe_softc *sc, uint32_t attn) { int port = SC_PORT(sc); int reg_offset; uint32_t val; boolean_t err_flg = FALSE; if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) { val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR); BLOGE(sc, "DB hw attention 0x%08x\n", val); /* DORQ discard attention */ if (val & 0x2) { BLOGE(sc, "FATAL error from DORQ\n"); err_flg = TRUE; } } if (attn & HW_INTERRUT_ASSERT_SET_1) { reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 : MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1); val = REG_RD(sc, reg_offset); val &= ~(attn & HW_INTERRUT_ASSERT_SET_1); REG_WR(sc, reg_offset, val); BLOGE(sc, "FATAL HW block attention set1 0x%08x\n", (uint32_t)(attn & HW_INTERRUT_ASSERT_SET_1)); err_flg = TRUE; bxe_panic(sc, ("HW block attention set1\n")); } if(err_flg) { BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC); taskqueue_enqueue_timeout(taskqueue_thread, &sc->sp_err_timeout_task, hz/10); } } static void bxe_attn_int_deasserted0(struct bxe_softc *sc, uint32_t attn) { int port = SC_PORT(sc); int reg_offset; uint32_t val; reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) { val = REG_RD(sc, reg_offset); val &= ~AEU_INPUTS_ATTN_BITS_SPIO5; REG_WR(sc, reg_offset, val); BLOGW(sc, "SPIO5 hw attention\n"); /* Fan failure attention */ elink_hw_reset_phy(&sc->link_params); bxe_fan_failure(sc); } if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) { bxe_acquire_phy_lock(sc); elink_handle_module_detect_int(&sc->link_params); bxe_release_phy_lock(sc); } if (attn & HW_INTERRUT_ASSERT_SET_0) { val = REG_RD(sc, reg_offset); val &= ~(attn & HW_INTERRUT_ASSERT_SET_0); REG_WR(sc, reg_offset, val); BXE_SET_ERROR_BIT(sc, BXE_ERR_MISC); taskqueue_enqueue_timeout(taskqueue_thread, &sc->sp_err_timeout_task, hz/10); bxe_panic(sc, ("FATAL HW block attention set0 0x%lx\n", (attn & HW_INTERRUT_ASSERT_SET_0))); } } static void bxe_attn_int_deasserted(struct bxe_softc *sc, uint32_t deasserted) { struct attn_route attn; struct attn_route *group_mask; int port = SC_PORT(sc); int index; uint32_t reg_addr; uint32_t val; uint32_t aeu_mask; uint8_t global = FALSE; /* * Need to take HW lock because MCP or other port might also * try to handle this event. */ bxe_acquire_alr(sc); if (bxe_chk_parity_attn(sc, &global, TRUE)) { /* XXX * In case of parity errors don't handle attentions so that * other function would "see" parity errors. */ // XXX schedule a recovery task... /* disable HW interrupts */ bxe_int_disable(sc); BXE_SET_ERROR_BIT(sc, BXE_ERR_PARITY); taskqueue_enqueue_timeout(taskqueue_thread, &sc->sp_err_timeout_task, hz/10); bxe_release_alr(sc); return; } attn.sig[0] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); attn.sig[1] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); if (!CHIP_IS_E1x(sc)) { attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); } else { attn.sig[4] = 0; } BLOGD(sc, DBG_INTR, "attn: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]); for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { if (deasserted & (1 << index)) { group_mask = &sc->attn_group[index]; BLOGD(sc, DBG_INTR, "group[%d]: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", index, group_mask->sig[0], group_mask->sig[1], group_mask->sig[2], group_mask->sig[3], group_mask->sig[4]); bxe_attn_int_deasserted4(sc, attn.sig[4] & group_mask->sig[4]); bxe_attn_int_deasserted3(sc, attn.sig[3] & group_mask->sig[3]); bxe_attn_int_deasserted1(sc, attn.sig[1] & group_mask->sig[1]); bxe_attn_int_deasserted2(sc, attn.sig[2] & group_mask->sig[2]); bxe_attn_int_deasserted0(sc, attn.sig[0] & group_mask->sig[0]); } } bxe_release_alr(sc); if (sc->devinfo.int_block == INT_BLOCK_HC) { reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR); } else { reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8); } val = ~deasserted; BLOGD(sc, DBG_INTR, "about to mask 0x%08x at %s addr 0x%08x\n", val, (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr); REG_WR(sc, reg_addr, val); if (~sc->attn_state & deasserted) { BLOGE(sc, "IGU error\n"); } reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 : MISC_REG_AEU_MASK_ATTN_FUNC_0; bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); aeu_mask = REG_RD(sc, reg_addr); BLOGD(sc, DBG_INTR, "aeu_mask 0x%08x newly deasserted 0x%08x\n", aeu_mask, deasserted); aeu_mask |= (deasserted & 0x3ff); BLOGD(sc, DBG_INTR, "new mask 0x%08x\n", aeu_mask); REG_WR(sc, reg_addr, aeu_mask); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port); BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state); sc->attn_state &= ~deasserted; BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state); } static void bxe_attn_int(struct bxe_softc *sc) { /* read local copy of bits */ uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits); uint32_t attn_ack = le32toh(sc->def_sb->atten_status_block.attn_bits_ack); uint32_t attn_state = sc->attn_state; /* look for changed bits */ uint32_t asserted = attn_bits & ~attn_ack & ~attn_state; uint32_t deasserted = ~attn_bits & attn_ack & attn_state; BLOGD(sc, DBG_INTR, "attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x\n", attn_bits, attn_ack, asserted, deasserted); if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) { BLOGE(sc, "BAD attention state\n"); } /* handle bits that were raised */ if (asserted) { bxe_attn_int_asserted(sc, asserted); } if (deasserted) { bxe_attn_int_deasserted(sc, deasserted); } } static uint16_t bxe_update_dsb_idx(struct bxe_softc *sc) { struct host_sp_status_block *def_sb = sc->def_sb; uint16_t rc = 0; mb(); /* status block is written to by the chip */ if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) { sc->def_att_idx = def_sb->atten_status_block.attn_bits_index; rc |= BXE_DEF_SB_ATT_IDX; } if (sc->def_idx != def_sb->sp_sb.running_index) { sc->def_idx = def_sb->sp_sb.running_index; rc |= BXE_DEF_SB_IDX; } mb(); return (rc); } static inline struct ecore_queue_sp_obj * bxe_cid_to_q_obj(struct bxe_softc *sc, uint32_t cid) { BLOGD(sc, DBG_SP, "retrieving fp from cid %d\n", cid); return (&sc->sp_objs[CID_TO_FP(cid, sc)].q_obj); } static void bxe_handle_mcast_eqe(struct bxe_softc *sc) { struct ecore_mcast_ramrod_params rparam; int rc; memset(&rparam, 0, sizeof(rparam)); rparam.mcast_obj = &sc->mcast_obj; BXE_MCAST_LOCK(sc); /* clear pending state for the last command */ sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw); /* if there are pending mcast commands - send them */ if (sc->mcast_obj.check_pending(&sc->mcast_obj)) { rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); if (rc < 0) { BLOGD(sc, DBG_SP, "ERROR: Failed to send pending mcast commands (%d)\n", rc); } } BXE_MCAST_UNLOCK(sc); } static void bxe_handle_classification_eqe(struct bxe_softc *sc, union event_ring_elem *elem) { unsigned long ramrod_flags = 0; int rc = 0; uint32_t cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK; struct ecore_vlan_mac_obj *vlan_mac_obj; /* always push next commands out, don't wait here */ bit_set(&ramrod_flags, RAMROD_CONT); switch (le32toh(elem->message.data.eth_event.echo) >> BXE_SWCID_SHIFT) { case ECORE_FILTER_MAC_PENDING: BLOGD(sc, DBG_SP, "Got SETUP_MAC completions\n"); vlan_mac_obj = &sc->sp_objs[cid].mac_obj; break; case ECORE_FILTER_MCAST_PENDING: BLOGD(sc, DBG_SP, "Got SETUP_MCAST completions\n"); /* * This is only relevant for 57710 where multicast MACs are * configured as unicast MACs using the same ramrod. */ bxe_handle_mcast_eqe(sc); return; default: BLOGE(sc, "Unsupported classification command: %d\n", elem->message.data.eth_event.echo); return; } rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags); if (rc < 0) { BLOGE(sc, "Failed to schedule new commands (%d)\n", rc); } else if (rc > 0) { BLOGD(sc, DBG_SP, "Scheduled next pending commands...\n"); } } static void bxe_handle_rx_mode_eqe(struct bxe_softc *sc, union event_ring_elem *elem) { bxe_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state); /* send rx_mode command again if was requested */ if (bxe_test_and_clear_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state)) { bxe_set_storm_rx_mode(sc); } } static void bxe_update_eq_prod(struct bxe_softc *sc, uint16_t prod) { storm_memset_eq_prod(sc, prod, SC_FUNC(sc)); wmb(); /* keep prod updates ordered */ } static void bxe_eq_int(struct bxe_softc *sc) { uint16_t hw_cons, sw_cons, sw_prod; union event_ring_elem *elem; uint8_t echo; uint32_t cid; uint8_t opcode; int spqe_cnt = 0; struct ecore_queue_sp_obj *q_obj; struct ecore_func_sp_obj *f_obj = &sc->func_obj; struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw; hw_cons = le16toh(*sc->eq_cons_sb); /* * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256. * when we get to the next-page we need to adjust so the loop * condition below will be met. The next element is the size of a * regular element and hence incrementing by 1 */ if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE) { hw_cons++; } /* * This function may never run in parallel with itself for a * specific sc and no need for a read memory barrier here. */ sw_cons = sc->eq_cons; sw_prod = sc->eq_prod; BLOGD(sc, DBG_SP,"EQ: hw_cons=%u sw_cons=%u eq_spq_left=0x%lx\n", hw_cons, sw_cons, atomic_load_acq_long(&sc->eq_spq_left)); for (; sw_cons != hw_cons; sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { elem = &sc->eq[EQ_DESC(sw_cons)]; /* elem CID originates from FW, actually LE */ cid = SW_CID(elem->message.data.cfc_del_event.cid); opcode = elem->message.opcode; /* handle eq element */ switch (opcode) { case EVENT_RING_OPCODE_STAT_QUERY: BLOGD(sc, DBG_SP, "got statistics completion event %d\n", sc->stats_comp++); /* nothing to do with stats comp */ goto next_spqe; case EVENT_RING_OPCODE_CFC_DEL: /* handle according to cid range */ /* we may want to verify here that the sc state is HALTING */ BLOGD(sc, DBG_SP, "got delete ramrod for MULTI[%d]\n", cid); q_obj = bxe_cid_to_q_obj(sc, cid); if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) { break; } goto next_spqe; case EVENT_RING_OPCODE_STOP_TRAFFIC: BLOGD(sc, DBG_SP, "got STOP TRAFFIC\n"); if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) { break; } // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_PAUSED); goto next_spqe; case EVENT_RING_OPCODE_START_TRAFFIC: BLOGD(sc, DBG_SP, "got START TRAFFIC\n"); if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_START)) { break; } // XXX bxe_dcbx_set_params(sc, BXE_DCBX_STATE_TX_RELEASED); goto next_spqe; case EVENT_RING_OPCODE_FUNCTION_UPDATE: echo = elem->message.data.function_update_event.echo; if (echo == SWITCH_UPDATE) { BLOGD(sc, DBG_SP, "got FUNC_SWITCH_UPDATE ramrod\n"); if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_SWITCH_UPDATE)) { break; } } else { BLOGD(sc, DBG_SP, "AFEX: ramrod completed FUNCTION_UPDATE\n"); } goto next_spqe; case EVENT_RING_OPCODE_FORWARD_SETUP: q_obj = &bxe_fwd_sp_obj(sc, q_obj); if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_SETUP_TX_ONLY)) { break; } goto next_spqe; case EVENT_RING_OPCODE_FUNCTION_START: BLOGD(sc, DBG_SP, "got FUNC_START ramrod\n"); if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) { break; } goto next_spqe; case EVENT_RING_OPCODE_FUNCTION_STOP: BLOGD(sc, DBG_SP, "got FUNC_STOP ramrod\n"); if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) { break; } goto next_spqe; } switch (opcode | sc->state) { case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPEN): case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BXE_STATE_OPENING_WAITING_PORT): cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK; BLOGD(sc, DBG_SP, "got RSS_UPDATE ramrod. CID %d\n", cid); rss_raw->clear_pending(rss_raw); break; case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_OPEN): case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_DIAG): case (EVENT_RING_OPCODE_SET_MAC | BXE_STATE_CLOSING_WAITING_HALT): case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_OPEN): case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_DIAG): case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BXE_STATE_CLOSING_WAITING_HALT): BLOGD(sc, DBG_SP, "got (un)set mac ramrod\n"); bxe_handle_classification_eqe(sc, elem); break; case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_OPEN): case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_DIAG): case (EVENT_RING_OPCODE_MULTICAST_RULES | BXE_STATE_CLOSING_WAITING_HALT): BLOGD(sc, DBG_SP, "got mcast ramrod\n"); bxe_handle_mcast_eqe(sc); break; case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_OPEN): case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_DIAG): case (EVENT_RING_OPCODE_FILTERS_RULES | BXE_STATE_CLOSING_WAITING_HALT): BLOGD(sc, DBG_SP, "got rx_mode ramrod\n"); bxe_handle_rx_mode_eqe(sc, elem); break; default: /* unknown event log error and continue */ BLOGE(sc, "Unknown EQ event %d, sc->state 0x%x\n", elem->message.opcode, sc->state); } next_spqe: spqe_cnt++; } /* for */ mb(); atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt); sc->eq_cons = sw_cons; sc->eq_prod = sw_prod; /* make sure that above mem writes were issued towards the memory */ wmb(); /* update producer */ bxe_update_eq_prod(sc, sc->eq_prod); } static void bxe_handle_sp_tq(void *context, int pending) { struct bxe_softc *sc = (struct bxe_softc *)context; uint16_t status; BLOGD(sc, DBG_SP, "---> SP TASK <---\n"); /* what work needs to be performed? */ status = bxe_update_dsb_idx(sc); BLOGD(sc, DBG_SP, "dsb status 0x%04x\n", status); /* HW attentions */ if (status & BXE_DEF_SB_ATT_IDX) { BLOGD(sc, DBG_SP, "---> ATTN INTR <---\n"); bxe_attn_int(sc); status &= ~BXE_DEF_SB_ATT_IDX; } /* SP events: STAT_QUERY and others */ if (status & BXE_DEF_SB_IDX) { /* handle EQ completions */ BLOGD(sc, DBG_SP, "---> EQ INTR <---\n"); bxe_eq_int(sc); bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, le16toh(sc->def_idx), IGU_INT_NOP, 1); status &= ~BXE_DEF_SB_IDX; } /* if status is non zero then something went wrong */ if (__predict_false(status)) { BLOGE(sc, "Got an unknown SP interrupt! (0x%04x)\n", status); } /* ack status block only if something was actually handled */ bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID, le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1); /* * Must be called after the EQ processing (since eq leads to sriov * ramrod completion flows). * This flow may have been scheduled by the arrival of a ramrod * completion, or by the sriov code rescheduling itself. */ // XXX bxe_iov_sp_task(sc); } static void bxe_handle_fp_tq(void *context, int pending) { struct bxe_fastpath *fp = (struct bxe_fastpath *)context; struct bxe_softc *sc = fp->sc; uint8_t more_tx = FALSE; uint8_t more_rx = FALSE; BLOGD(sc, DBG_INTR, "---> FP TASK QUEUE (%d) <---\n", fp->index); /* XXX * IFF_DRV_RUNNING state can't be checked here since we process * slowpath events on a client queue during setup. Instead * we need to add a "process/continue" flag here that the driver * can use to tell the task here not to do anything. */ #if 0 if (!(if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) { return; } #endif /* update the fastpath index */ bxe_update_fp_sb_idx(fp); /* XXX add loop here if ever support multiple tx CoS */ /* fp->txdata[cos] */ if (bxe_has_tx_work(fp)) { BXE_FP_TX_LOCK(fp); more_tx = bxe_txeof(sc, fp); BXE_FP_TX_UNLOCK(fp); } if (bxe_has_rx_work(fp)) { more_rx = bxe_rxeof(sc, fp); } if (more_rx /*|| more_tx*/) { /* still more work to do */ taskqueue_enqueue(fp->tq, &fp->tq_task); return; } bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1); } static void bxe_task_fp(struct bxe_fastpath *fp) { struct bxe_softc *sc = fp->sc; uint8_t more_tx = FALSE; uint8_t more_rx = FALSE; BLOGD(sc, DBG_INTR, "---> FP TASK ISR (%d) <---\n", fp->index); /* update the fastpath index */ bxe_update_fp_sb_idx(fp); /* XXX add loop here if ever support multiple tx CoS */ /* fp->txdata[cos] */ if (bxe_has_tx_work(fp)) { BXE_FP_TX_LOCK(fp); more_tx = bxe_txeof(sc, fp); BXE_FP_TX_UNLOCK(fp); } if (bxe_has_rx_work(fp)) { more_rx = bxe_rxeof(sc, fp); } if (more_rx /*|| more_tx*/) { /* still more work to do, bail out if this ISR and process later */ taskqueue_enqueue(fp->tq, &fp->tq_task); return; } /* * Here we write the fastpath index taken before doing any tx or rx work. * It is very well possible other hw events occurred up to this point and * they were actually processed accordingly above. Since we're going to * write an older fastpath index, an interrupt is coming which we might * not do any work in. */ bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1); } /* * Legacy interrupt entry point. * * Verifies that the controller generated the interrupt and * then calls a separate routine to handle the various * interrupt causes: link, RX, and TX. */ static void bxe_intr_legacy(void *xsc) { struct bxe_softc *sc = (struct bxe_softc *)xsc; struct bxe_fastpath *fp; uint16_t status, mask; int i; BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n"); /* * 0 for ustorm, 1 for cstorm * the bits returned from ack_int() are 0-15 * bit 0 = attention status block * bit 1 = fast path status block * a mask of 0x2 or more = tx/rx event * a mask of 1 = slow path event */ status = bxe_ack_int(sc); /* the interrupt is not for us */ if (__predict_false(status == 0)) { BLOGD(sc, DBG_INTR, "Not our interrupt!\n"); return; } BLOGD(sc, DBG_INTR, "Interrupt status 0x%04x\n", status); FOR_EACH_ETH_QUEUE(sc, i) { fp = &sc->fp[i]; mask = (0x2 << (fp->index + CNIC_SUPPORT(sc))); if (status & mask) { /* acknowledge and disable further fastpath interrupts */ bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); bxe_task_fp(fp); status &= ~mask; } } if (__predict_false(status & 0x1)) { /* acknowledge and disable further slowpath interrupts */ bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); /* schedule slowpath handler */ taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task); status &= ~0x1; } if (__predict_false(status)) { BLOGW(sc, "Unexpected fastpath status (0x%08x)!\n", status); } } /* slowpath interrupt entry point */ static void bxe_intr_sp(void *xsc) { struct bxe_softc *sc = (struct bxe_softc *)xsc; BLOGD(sc, (DBG_INTR | DBG_SP), "---> SP INTR <---\n"); /* acknowledge and disable further slowpath interrupts */ bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); /* schedule slowpath handler */ taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task); } /* fastpath interrupt entry point */ static void bxe_intr_fp(void *xfp) { struct bxe_fastpath *fp = (struct bxe_fastpath *)xfp; struct bxe_softc *sc = fp->sc; BLOGD(sc, DBG_INTR, "---> FP INTR %d <---\n", fp->index); BLOGD(sc, DBG_INTR, "(cpu=%d) MSI-X fp=%d fw_sb=%d igu_sb=%d\n", curcpu, fp->index, fp->fw_sb_id, fp->igu_sb_id); /* acknowledge and disable further fastpath interrupts */ bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); bxe_task_fp(fp); } /* Release all interrupts allocated by the driver. */ static void bxe_interrupt_free(struct bxe_softc *sc) { int i; switch (sc->interrupt_mode) { case INTR_MODE_INTX: BLOGD(sc, DBG_LOAD, "Releasing legacy INTx vector\n"); if (sc->intr[0].resource != NULL) { bus_release_resource(sc->dev, SYS_RES_IRQ, sc->intr[0].rid, sc->intr[0].resource); } break; case INTR_MODE_MSI: for (i = 0; i < sc->intr_count; i++) { BLOGD(sc, DBG_LOAD, "Releasing MSI vector %d\n", i); if (sc->intr[i].resource && sc->intr[i].rid) { bus_release_resource(sc->dev, SYS_RES_IRQ, sc->intr[i].rid, sc->intr[i].resource); } } pci_release_msi(sc->dev); break; case INTR_MODE_MSIX: for (i = 0; i < sc->intr_count; i++) { BLOGD(sc, DBG_LOAD, "Releasing MSI-X vector %d\n", i); if (sc->intr[i].resource && sc->intr[i].rid) { bus_release_resource(sc->dev, SYS_RES_IRQ, sc->intr[i].rid, sc->intr[i].resource); } } pci_release_msi(sc->dev); break; default: /* nothing to do as initial allocation failed */ break; } } /* * This function determines and allocates the appropriate * interrupt based on system capabilites and user request. * * The user may force a particular interrupt mode, specify * the number of receive queues, specify the method for * distribuitng received frames to receive queues, or use * the default settings which will automatically select the * best supported combination. In addition, the OS may or * may not support certain combinations of these settings. * This routine attempts to reconcile the settings requested * by the user with the capabilites available from the system * to select the optimal combination of features. * * Returns: * 0 = Success, !0 = Failure. */ static int bxe_interrupt_alloc(struct bxe_softc *sc) { int msix_count = 0; int msi_count = 0; int num_requested = 0; int num_allocated = 0; int rid, i, j; int rc; /* get the number of available MSI/MSI-X interrupts from the OS */ if (sc->interrupt_mode > 0) { if (sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) { msix_count = pci_msix_count(sc->dev); } if (sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) { msi_count = pci_msi_count(sc->dev); } BLOGD(sc, DBG_LOAD, "%d MSI and %d MSI-X vectors available\n", msi_count, msix_count); } do { /* try allocating MSI-X interrupt resources (at least 2) */ if (sc->interrupt_mode != INTR_MODE_MSIX) { break; } if (((sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) == 0) || (msix_count < 2)) { sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ break; } /* ask for the necessary number of MSI-X vectors */ num_requested = min((sc->num_queues + 1), msix_count); BLOGD(sc, DBG_LOAD, "Requesting %d MSI-X vectors\n", num_requested); num_allocated = num_requested; if ((rc = pci_alloc_msix(sc->dev, &num_allocated)) != 0) { BLOGE(sc, "MSI-X alloc failed! (%d)\n", rc); sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ break; } if (num_allocated < 2) { /* possible? */ BLOGE(sc, "MSI-X allocation less than 2!\n"); sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ pci_release_msi(sc->dev); break; } BLOGI(sc, "MSI-X vectors Requested %d and Allocated %d\n", num_requested, num_allocated); /* best effort so use the number of vectors allocated to us */ sc->intr_count = num_allocated; sc->num_queues = num_allocated - 1; rid = 1; /* initial resource identifier */ /* allocate the MSI-X vectors */ for (i = 0; i < num_allocated; i++) { sc->intr[i].rid = (rid + i); if ((sc->intr[i].resource = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->intr[i].rid, RF_ACTIVE)) == NULL) { BLOGE(sc, "Failed to map MSI-X[%d] (rid=%d)!\n", i, (rid + i)); for (j = (i - 1); j >= 0; j--) { bus_release_resource(sc->dev, SYS_RES_IRQ, sc->intr[j].rid, sc->intr[j].resource); } sc->intr_count = 0; sc->num_queues = 0; sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */ pci_release_msi(sc->dev); break; } BLOGD(sc, DBG_LOAD, "Mapped MSI-X[%d] (rid=%d)\n", i, (rid + i)); } } while (0); do { /* try allocating MSI vector resources (at least 2) */ if (sc->interrupt_mode != INTR_MODE_MSI) { break; } if (((sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) == 0) || (msi_count < 1)) { sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ break; } /* ask for a single MSI vector */ num_requested = 1; BLOGD(sc, DBG_LOAD, "Requesting %d MSI vectors\n", num_requested); num_allocated = num_requested; if ((rc = pci_alloc_msi(sc->dev, &num_allocated)) != 0) { BLOGE(sc, "MSI alloc failed (%d)!\n", rc); sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ break; } if (num_allocated != 1) { /* possible? */ BLOGE(sc, "MSI allocation is not 1!\n"); sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ pci_release_msi(sc->dev); break; } BLOGI(sc, "MSI vectors Requested %d and Allocated %d\n", num_requested, num_allocated); /* best effort so use the number of vectors allocated to us */ sc->intr_count = num_allocated; sc->num_queues = num_allocated; rid = 1; /* initial resource identifier */ sc->intr[0].rid = rid; if ((sc->intr[0].resource = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->intr[0].rid, RF_ACTIVE)) == NULL) { BLOGE(sc, "Failed to map MSI[0] (rid=%d)!\n", rid); sc->intr_count = 0; sc->num_queues = 0; sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */ pci_release_msi(sc->dev); break; } BLOGD(sc, DBG_LOAD, "Mapped MSI[0] (rid=%d)\n", rid); } while (0); do { /* try allocating INTx vector resources */ if (sc->interrupt_mode != INTR_MODE_INTX) { break; } BLOGD(sc, DBG_LOAD, "Requesting legacy INTx interrupt\n"); /* only one vector for INTx */ sc->intr_count = 1; sc->num_queues = 1; rid = 0; /* initial resource identifier */ sc->intr[0].rid = rid; if ((sc->intr[0].resource = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->intr[0].rid, (RF_ACTIVE | RF_SHAREABLE))) == NULL) { BLOGE(sc, "Failed to map INTx (rid=%d)!\n", rid); sc->intr_count = 0; sc->num_queues = 0; sc->interrupt_mode = -1; /* Failed! */ break; } BLOGD(sc, DBG_LOAD, "Mapped INTx (rid=%d)\n", rid); } while (0); if (sc->interrupt_mode == -1) { BLOGE(sc, "Interrupt Allocation: FAILED!!!\n"); rc = 1; } else { BLOGD(sc, DBG_LOAD, "Interrupt Allocation: interrupt_mode=%d, num_queues=%d\n", sc->interrupt_mode, sc->num_queues); rc = 0; } return (rc); } static void bxe_interrupt_detach(struct bxe_softc *sc) { struct bxe_fastpath *fp; int i; /* release interrupt resources */ for (i = 0; i < sc->intr_count; i++) { if (sc->intr[i].resource && sc->intr[i].tag) { BLOGD(sc, DBG_LOAD, "Disabling interrupt vector %d\n", i); bus_teardown_intr(sc->dev, sc->intr[i].resource, sc->intr[i].tag); } } for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; if (fp->tq) { taskqueue_drain(fp->tq, &fp->tq_task); taskqueue_drain(fp->tq, &fp->tx_task); while (taskqueue_cancel_timeout(fp->tq, &fp->tx_timeout_task, NULL)) taskqueue_drain_timeout(fp->tq, &fp->tx_timeout_task); } for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; if (fp->tq != NULL) { taskqueue_free(fp->tq); fp->tq = NULL; } } } if (sc->sp_tq) { taskqueue_drain(sc->sp_tq, &sc->sp_tq_task); taskqueue_free(sc->sp_tq); sc->sp_tq = NULL; } } /* * Enables interrupts and attach to the ISR. * * When using multiple MSI/MSI-X vectors the first vector * is used for slowpath operations while all remaining * vectors are used for fastpath operations. If only a * single MSI/MSI-X vector is used (SINGLE_ISR) then the * ISR must look for both slowpath and fastpath completions. */ static int bxe_interrupt_attach(struct bxe_softc *sc) { struct bxe_fastpath *fp; int rc = 0; int i; snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name), "bxe%d_sp_tq", sc->unit); TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc); sc->sp_tq = taskqueue_create(sc->sp_tq_name, M_NOWAIT, taskqueue_thread_enqueue, &sc->sp_tq); taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */ "%s", sc->sp_tq_name); for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; snprintf(fp->tq_name, sizeof(fp->tq_name), "bxe%d_fp%d_tq", sc->unit, i); - TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp); + NET_TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp); TASK_INIT(&fp->tx_task, 0, bxe_tx_mq_start_deferred, fp); fp->tq = taskqueue_create(fp->tq_name, M_NOWAIT, taskqueue_thread_enqueue, &fp->tq); TIMEOUT_TASK_INIT(fp->tq, &fp->tx_timeout_task, 0, bxe_tx_mq_start_deferred, fp); taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */ "%s", fp->tq_name); } /* setup interrupt handlers */ if (sc->interrupt_mode == INTR_MODE_MSIX) { BLOGD(sc, DBG_LOAD, "Enabling slowpath MSI-X[0] vector\n"); /* * Setup the interrupt handler. Note that we pass the driver instance * to the interrupt handler for the slowpath. */ if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, (INTR_TYPE_NET | INTR_MPSAFE), NULL, bxe_intr_sp, sc, &sc->intr[0].tag)) != 0) { BLOGE(sc, "Failed to allocate MSI-X[0] vector (%d)\n", rc); goto bxe_interrupt_attach_exit; } bus_describe_intr(sc->dev, sc->intr[0].resource, sc->intr[0].tag, "sp"); /* bus_bind_intr(sc->dev, sc->intr[0].resource, 0); */ /* initialize the fastpath vectors (note the first was used for sp) */ for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; BLOGD(sc, DBG_LOAD, "Enabling MSI-X[%d] vector\n", (i + 1)); /* * Setup the interrupt handler. Note that we pass the * fastpath context to the interrupt handler in this * case. */ if ((rc = bus_setup_intr(sc->dev, sc->intr[i + 1].resource, (INTR_TYPE_NET | INTR_MPSAFE), NULL, bxe_intr_fp, fp, &sc->intr[i + 1].tag)) != 0) { BLOGE(sc, "Failed to allocate MSI-X[%d] vector (%d)\n", (i + 1), rc); goto bxe_interrupt_attach_exit; } bus_describe_intr(sc->dev, sc->intr[i + 1].resource, sc->intr[i + 1].tag, "fp%02d", i); /* bind the fastpath instance to a cpu */ if (sc->num_queues > 1) { bus_bind_intr(sc->dev, sc->intr[i + 1].resource, i); } fp->state = BXE_FP_STATE_IRQ; } } else if (sc->interrupt_mode == INTR_MODE_MSI) { BLOGD(sc, DBG_LOAD, "Enabling MSI[0] vector\n"); /* * Setup the interrupt handler. Note that we pass the * driver instance to the interrupt handler which * will handle both the slowpath and fastpath. */ if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, (INTR_TYPE_NET | INTR_MPSAFE), NULL, bxe_intr_legacy, sc, &sc->intr[0].tag)) != 0) { BLOGE(sc, "Failed to allocate MSI[0] vector (%d)\n", rc); goto bxe_interrupt_attach_exit; } } else { /* (sc->interrupt_mode == INTR_MODE_INTX) */ BLOGD(sc, DBG_LOAD, "Enabling INTx interrupts\n"); /* * Setup the interrupt handler. Note that we pass the * driver instance to the interrupt handler which * will handle both the slowpath and fastpath. */ if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource, (INTR_TYPE_NET | INTR_MPSAFE), NULL, bxe_intr_legacy, sc, &sc->intr[0].tag)) != 0) { BLOGE(sc, "Failed to allocate INTx interrupt (%d)\n", rc); goto bxe_interrupt_attach_exit; } } bxe_interrupt_attach_exit: return (rc); } static int bxe_init_hw_common_chip(struct bxe_softc *sc); static int bxe_init_hw_common(struct bxe_softc *sc); static int bxe_init_hw_port(struct bxe_softc *sc); static int bxe_init_hw_func(struct bxe_softc *sc); static void bxe_reset_common(struct bxe_softc *sc); static void bxe_reset_port(struct bxe_softc *sc); static void bxe_reset_func(struct bxe_softc *sc); static int bxe_gunzip_init(struct bxe_softc *sc); static void bxe_gunzip_end(struct bxe_softc *sc); static int bxe_init_firmware(struct bxe_softc *sc); static void bxe_release_firmware(struct bxe_softc *sc); static struct ecore_func_sp_drv_ops bxe_func_sp_drv = { .init_hw_cmn_chip = bxe_init_hw_common_chip, .init_hw_cmn = bxe_init_hw_common, .init_hw_port = bxe_init_hw_port, .init_hw_func = bxe_init_hw_func, .reset_hw_cmn = bxe_reset_common, .reset_hw_port = bxe_reset_port, .reset_hw_func = bxe_reset_func, .gunzip_init = bxe_gunzip_init, .gunzip_end = bxe_gunzip_end, .init_fw = bxe_init_firmware, .release_fw = bxe_release_firmware, }; static void bxe_init_func_obj(struct bxe_softc *sc) { sc->dmae_ready = 0; ecore_init_func_obj(sc, &sc->func_obj, BXE_SP(sc, func_rdata), BXE_SP_MAPPING(sc, func_rdata), BXE_SP(sc, func_afex_rdata), BXE_SP_MAPPING(sc, func_afex_rdata), &bxe_func_sp_drv); } static int bxe_init_hw(struct bxe_softc *sc, uint32_t load_code) { struct ecore_func_state_params func_params = { NULL }; int rc; /* prepare the parameters for function state transitions */ bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT); func_params.f_obj = &sc->func_obj; func_params.cmd = ECORE_F_CMD_HW_INIT; func_params.params.hw_init.load_phase = load_code; /* * Via a plethora of function pointers, we will eventually reach * bxe_init_hw_common(), bxe_init_hw_port(), or bxe_init_hw_func(). */ rc = ecore_func_state_change(sc, &func_params); return (rc); } static void bxe_fill(struct bxe_softc *sc, uint32_t addr, int fill, uint32_t len) { uint32_t i; if (!(len % 4) && !(addr % 4)) { for (i = 0; i < len; i += 4) { REG_WR(sc, (addr + i), fill); } } else { for (i = 0; i < len; i++) { REG_WR8(sc, (addr + i), fill); } } } /* writes FP SP data to FW - data_size in dwords */ static void bxe_wr_fp_sb_data(struct bxe_softc *sc, int fw_sb_id, uint32_t *sb_data_p, uint32_t data_size) { int index; for (index = 0; index < data_size; index++) { REG_WR(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) + (sizeof(uint32_t) * index)), *(sb_data_p + index)); } } static void bxe_zero_fp_sb(struct bxe_softc *sc, int fw_sb_id) { struct hc_status_block_data_e2 sb_data_e2; struct hc_status_block_data_e1x sb_data_e1x; uint32_t *sb_data_p; uint32_t data_size = 0; if (!CHIP_IS_E1x(sc)) { memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); sb_data_e2.common.state = SB_DISABLED; sb_data_e2.common.p_func.vf_valid = FALSE; sb_data_p = (uint32_t *)&sb_data_e2; data_size = (sizeof(struct hc_status_block_data_e2) / sizeof(uint32_t)); } else { memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x)); sb_data_e1x.common.state = SB_DISABLED; sb_data_e1x.common.p_func.vf_valid = FALSE; sb_data_p = (uint32_t *)&sb_data_e1x; data_size = (sizeof(struct hc_status_block_data_e1x) / sizeof(uint32_t)); } bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size); bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id)), 0, CSTORM_STATUS_BLOCK_SIZE); bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id)), 0, CSTORM_SYNC_BLOCK_SIZE); } static void bxe_wr_sp_sb_data(struct bxe_softc *sc, struct hc_sp_status_block_data *sp_sb_data) { int i; for (i = 0; i < (sizeof(struct hc_sp_status_block_data) / sizeof(uint32_t)); i++) { REG_WR(sc, (BAR_CSTRORM_INTMEM + CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(SC_FUNC(sc)) + (i * sizeof(uint32_t))), *((uint32_t *)sp_sb_data + i)); } } static void bxe_zero_sp_sb(struct bxe_softc *sc) { struct hc_sp_status_block_data sp_sb_data; memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); sp_sb_data.state = SB_DISABLED; sp_sb_data.p_func.vf_valid = FALSE; bxe_wr_sp_sb_data(sc, &sp_sb_data); bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SP_STATUS_BLOCK_OFFSET(SC_FUNC(sc))), 0, CSTORM_SP_STATUS_BLOCK_SIZE); bxe_fill(sc, (BAR_CSTRORM_INTMEM + CSTORM_SP_SYNC_BLOCK_OFFSET(SC_FUNC(sc))), 0, CSTORM_SP_SYNC_BLOCK_SIZE); } static void bxe_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm, int igu_sb_id, int igu_seg_id) { hc_sm->igu_sb_id = igu_sb_id; hc_sm->igu_seg_id = igu_seg_id; hc_sm->timer_value = 0xFF; hc_sm->time_to_expire = 0xFFFFFFFF; } static void bxe_map_sb_state_machines(struct hc_index_data *index_data) { /* zero out state machine indices */ /* rx indices */ index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; /* tx indices */ index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID; index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID; index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID; index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID; /* map indices */ /* rx indices */ index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |= (SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT); /* tx indices */ index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |= (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |= (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |= (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |= (SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT); } static void bxe_init_sb(struct bxe_softc *sc, bus_addr_t busaddr, int vfid, uint8_t vf_valid, int fw_sb_id, int igu_sb_id) { struct hc_status_block_data_e2 sb_data_e2; struct hc_status_block_data_e1x sb_data_e1x; struct hc_status_block_sm *hc_sm_p; uint32_t *sb_data_p; int igu_seg_id; int data_size; if (CHIP_INT_MODE_IS_BC(sc)) { igu_seg_id = HC_SEG_ACCESS_NORM; } else { igu_seg_id = IGU_SEG_ACCESS_NORM; } bxe_zero_fp_sb(sc, fw_sb_id); if (!CHIP_IS_E1x(sc)) { memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2)); sb_data_e2.common.state = SB_ENABLED; sb_data_e2.common.p_func.pf_id = SC_FUNC(sc); sb_data_e2.common.p_func.vf_id = vfid; sb_data_e2.common.p_func.vf_valid = vf_valid; sb_data_e2.common.p_func.vnic_id = SC_VN(sc); sb_data_e2.common.same_igu_sb_1b = TRUE; sb_data_e2.common.host_sb_addr.hi = U64_HI(busaddr); sb_data_e2.common.host_sb_addr.lo = U64_LO(busaddr); hc_sm_p = sb_data_e2.common.state_machine; sb_data_p = (uint32_t *)&sb_data_e2; data_size = (sizeof(struct hc_status_block_data_e2) / sizeof(uint32_t)); bxe_map_sb_state_machines(sb_data_e2.index_data); } else { memset(&sb_data_e1x, 0, sizeof(struct hc_status_block_data_e1x)); sb_data_e1x.common.state = SB_ENABLED; sb_data_e1x.common.p_func.pf_id = SC_FUNC(sc); sb_data_e1x.common.p_func.vf_id = 0xff; sb_data_e1x.common.p_func.vf_valid = FALSE; sb_data_e1x.common.p_func.vnic_id = SC_VN(sc); sb_data_e1x.common.same_igu_sb_1b = TRUE; sb_data_e1x.common.host_sb_addr.hi = U64_HI(busaddr); sb_data_e1x.common.host_sb_addr.lo = U64_LO(busaddr); hc_sm_p = sb_data_e1x.common.state_machine; sb_data_p = (uint32_t *)&sb_data_e1x; data_size = (sizeof(struct hc_status_block_data_e1x) / sizeof(uint32_t)); bxe_map_sb_state_machines(sb_data_e1x.index_data); } bxe_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID], igu_sb_id, igu_seg_id); bxe_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID], igu_sb_id, igu_seg_id); BLOGD(sc, DBG_LOAD, "Init FW SB %d\n", fw_sb_id); /* write indices to HW - PCI guarantees endianity of regpairs */ bxe_wr_fp_sb_data(sc, fw_sb_id, sb_data_p, data_size); } static inline uint8_t bxe_fp_qzone_id(struct bxe_fastpath *fp) { if (CHIP_IS_E1x(fp->sc)) { return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H); } else { return (fp->cl_id); } } static inline uint32_t bxe_rx_ustorm_prods_offset(struct bxe_softc *sc, struct bxe_fastpath *fp) { uint32_t offset = BAR_USTRORM_INTMEM; if (!CHIP_IS_E1x(sc)) { offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); } else { offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id); } return (offset); } static void bxe_init_eth_fp(struct bxe_softc *sc, int idx) { struct bxe_fastpath *fp = &sc->fp[idx]; uint32_t cids[ECORE_MULTI_TX_COS] = { 0 }; unsigned long q_type = 0; int cos; fp->sc = sc; fp->index = idx; fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc)); fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc)); fp->cl_id = (CHIP_IS_E1x(sc)) ? (SC_L_ID(sc) + idx) : /* want client ID same as IGU SB ID for non-E1 */ fp->igu_sb_id; fp->cl_qzone_id = bxe_fp_qzone_id(fp); /* setup sb indices */ if (!CHIP_IS_E1x(sc)) { fp->sb_index_values = fp->status_block.e2_sb->sb.index_values; fp->sb_running_index = fp->status_block.e2_sb->sb.running_index; } else { fp->sb_index_values = fp->status_block.e1x_sb->sb.index_values; fp->sb_running_index = fp->status_block.e1x_sb->sb.running_index; } /* init shortcut */ fp->ustorm_rx_prods_offset = bxe_rx_ustorm_prods_offset(sc, fp); fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS]; /* * XXX If multiple CoS is ever supported then each fastpath structure * will need to maintain tx producer/consumer/dma/etc values *per* CoS. */ for (cos = 0; cos < sc->max_cos; cos++) { cids[cos] = idx; } fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0]; /* nothing more for a VF to do */ if (IS_VF(sc)) { return; } bxe_init_sb(sc, fp->sb_dma.paddr, BXE_VF_ID_INVALID, FALSE, fp->fw_sb_id, fp->igu_sb_id); bxe_update_fp_sb_idx(fp); /* Configure Queue State object */ bit_set(&q_type, ECORE_Q_TYPE_HAS_RX); bit_set(&q_type, ECORE_Q_TYPE_HAS_TX); ecore_init_queue_obj(sc, &sc->sp_objs[idx].q_obj, fp->cl_id, cids, sc->max_cos, SC_FUNC(sc), BXE_SP(sc, q_rdata), BXE_SP_MAPPING(sc, q_rdata), q_type); /* configure classification DBs */ ecore_init_mac_obj(sc, &sc->sp_objs[idx].mac_obj, fp->cl_id, idx, SC_FUNC(sc), BXE_SP(sc, mac_rdata), BXE_SP_MAPPING(sc, mac_rdata), ECORE_FILTER_MAC_PENDING, &sc->sp_state, ECORE_OBJ_TYPE_RX_TX, &sc->macs_pool); BLOGD(sc, DBG_LOAD, "fp[%d]: sb=%p cl_id=%d fw_sb=%d igu_sb=%d\n", idx, fp->status_block.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id); } static inline void bxe_update_rx_prod(struct bxe_softc *sc, struct bxe_fastpath *fp, uint16_t rx_bd_prod, uint16_t rx_cq_prod, uint16_t rx_sge_prod) { struct ustorm_eth_rx_producers rx_prods = { 0 }; uint32_t i; /* update producers */ rx_prods.bd_prod = rx_bd_prod; rx_prods.cqe_prod = rx_cq_prod; rx_prods.sge_prod = rx_sge_prod; /* * Make sure that the BD and SGE data is updated before updating the * producers since FW might read the BD/SGE right after the producer * is updated. * This is only applicable for weak-ordered memory model archs such * as IA-64. The following barrier is also mandatory since FW will * assumes BDs must have buffers. */ wmb(); for (i = 0; i < (sizeof(rx_prods) / 4); i++) { REG_WR(sc, (fp->ustorm_rx_prods_offset + (i * 4)), ((uint32_t *)&rx_prods)[i]); } wmb(); /* keep prod updates ordered */ BLOGD(sc, DBG_RX, "RX fp[%d]: wrote prods bd_prod=%u cqe_prod=%u sge_prod=%u\n", fp->index, rx_bd_prod, rx_cq_prod, rx_sge_prod); } static void bxe_init_rx_rings(struct bxe_softc *sc) { struct bxe_fastpath *fp; int i; for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; fp->rx_bd_cons = 0; /* * Activate the BD ring... * Warning, this will generate an interrupt (to the TSTORM) * so this can only be done after the chip is initialized */ bxe_update_rx_prod(sc, fp, fp->rx_bd_prod, fp->rx_cq_prod, fp->rx_sge_prod); if (i != 0) { continue; } if (CHIP_IS_E1(sc)) { REG_WR(sc, (BAR_USTRORM_INTMEM + USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc))), U64_LO(fp->rcq_dma.paddr)); REG_WR(sc, (BAR_USTRORM_INTMEM + USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(SC_FUNC(sc)) + 4), U64_HI(fp->rcq_dma.paddr)); } } } static void bxe_init_tx_ring_one(struct bxe_fastpath *fp) { SET_FLAG(fp->tx_db.data.header.data, DOORBELL_HDR_T_DB_TYPE, 1); fp->tx_db.data.zero_fill1 = 0; fp->tx_db.data.prod = 0; fp->tx_pkt_prod = 0; fp->tx_pkt_cons = 0; fp->tx_bd_prod = 0; fp->tx_bd_cons = 0; fp->eth_q_stats.tx_pkts = 0; } static inline void bxe_init_tx_rings(struct bxe_softc *sc) { int i; for (i = 0; i < sc->num_queues; i++) { bxe_init_tx_ring_one(&sc->fp[i]); } } static void bxe_init_def_sb(struct bxe_softc *sc) { struct host_sp_status_block *def_sb = sc->def_sb; bus_addr_t mapping = sc->def_sb_dma.paddr; int igu_sp_sb_index; int igu_seg_id; int port = SC_PORT(sc); int func = SC_FUNC(sc); int reg_offset, reg_offset_en5; uint64_t section; int index, sindex; struct hc_sp_status_block_data sp_sb_data; memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data)); if (CHIP_INT_MODE_IS_BC(sc)) { igu_sp_sb_index = DEF_SB_IGU_ID; igu_seg_id = HC_SEG_ACCESS_DEF; } else { igu_sp_sb_index = sc->igu_dsb_id; igu_seg_id = IGU_SEG_ACCESS_DEF; } /* attentions */ section = ((uint64_t)mapping + offsetof(struct host_sp_status_block, atten_status_block)); def_sb->atten_status_block.status_block_id = igu_sp_sb_index; sc->attn_state = 0; reg_offset = (port) ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0; reg_offset_en5 = (port) ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 : MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0; for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { /* take care of sig[0]..sig[4] */ for (sindex = 0; sindex < 4; sindex++) { sc->attn_group[index].sig[sindex] = REG_RD(sc, (reg_offset + (sindex * 0x4) + (0x10 * index))); } if (!CHIP_IS_E1x(sc)) { /* * enable5 is separate from the rest of the registers, * and the address skip is 4 and not 16 between the * different groups */ sc->attn_group[index].sig[4] = REG_RD(sc, (reg_offset_en5 + (0x4 * index))); } else { sc->attn_group[index].sig[4] = 0; } } if (sc->devinfo.int_block == INT_BLOCK_HC) { reg_offset = (port) ? HC_REG_ATTN_MSG1_ADDR_L : HC_REG_ATTN_MSG0_ADDR_L; REG_WR(sc, reg_offset, U64_LO(section)); REG_WR(sc, (reg_offset + 4), U64_HI(section)); } else if (!CHIP_IS_E1x(sc)) { REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section)); REG_WR(sc, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section)); } section = ((uint64_t)mapping + offsetof(struct host_sp_status_block, sp_sb)); bxe_zero_sp_sb(sc); /* PCI guarantees endianity of regpair */ sp_sb_data.state = SB_ENABLED; sp_sb_data.host_sb_addr.lo = U64_LO(section); sp_sb_data.host_sb_addr.hi = U64_HI(section); sp_sb_data.igu_sb_id = igu_sp_sb_index; sp_sb_data.igu_seg_id = igu_seg_id; sp_sb_data.p_func.pf_id = func; sp_sb_data.p_func.vnic_id = SC_VN(sc); sp_sb_data.p_func.vf_id = 0xff; bxe_wr_sp_sb_data(sc, &sp_sb_data); bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); } static void bxe_init_sp_ring(struct bxe_softc *sc) { atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING); sc->spq_prod_idx = 0; sc->dsb_sp_prod = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS]; sc->spq_prod_bd = sc->spq; sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT); } static void bxe_init_eq_ring(struct bxe_softc *sc) { union event_ring_elem *elem; int i; for (i = 1; i <= NUM_EQ_PAGES; i++) { elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1]; elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr + BCM_PAGE_SIZE * (i % NUM_EQ_PAGES))); elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr + BCM_PAGE_SIZE * (i % NUM_EQ_PAGES))); } sc->eq_cons = 0; sc->eq_prod = NUM_EQ_DESC; sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS]; atomic_store_rel_long(&sc->eq_spq_left, (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING), NUM_EQ_DESC) - 1)); } static void bxe_init_internal_common(struct bxe_softc *sc) { int i; /* * Zero this manually as its initialization is currently missing * in the initTool. */ for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) { REG_WR(sc, (BAR_USTRORM_INTMEM + USTORM_AGG_DATA_OFFSET + (i * 4)), 0); } if (!CHIP_IS_E1x(sc)) { REG_WR8(sc, (BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET), CHIP_INT_MODE_IS_BC(sc) ? HC_IGU_BC_MODE : HC_IGU_NBC_MODE); } } static void bxe_init_internal(struct bxe_softc *sc, uint32_t load_code) { switch (load_code) { case FW_MSG_CODE_DRV_LOAD_COMMON: case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: bxe_init_internal_common(sc); /* no break */ case FW_MSG_CODE_DRV_LOAD_PORT: /* nothing to do */ /* no break */ case FW_MSG_CODE_DRV_LOAD_FUNCTION: /* internal memory per function is initialized inside bxe_pf_init */ break; default: BLOGE(sc, "Unknown load_code (0x%x) from MCP\n", load_code); break; } } static void storm_memset_func_cfg(struct bxe_softc *sc, struct tstorm_eth_function_common_config *tcfg, uint16_t abs_fid) { uint32_t addr; size_t size; addr = (BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid)); size = sizeof(struct tstorm_eth_function_common_config); ecore_storm_memset_struct(sc, addr, size, (uint32_t *)tcfg); } static void bxe_func_init(struct bxe_softc *sc, struct bxe_func_init_params *p) { struct tstorm_eth_function_common_config tcfg = { 0 }; if (CHIP_IS_E1x(sc)) { storm_memset_func_cfg(sc, &tcfg, p->func_id); } /* Enable the function in the FW */ storm_memset_vf_to_pf(sc, p->func_id, p->pf_id); storm_memset_func_en(sc, p->func_id, 1); /* spq */ if (p->func_flgs & FUNC_FLG_SPQ) { storm_memset_spq_addr(sc, p->spq_map, p->func_id); REG_WR(sc, (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id)), p->spq_prod); } } /* * Calculates the sum of vn_min_rates. * It's needed for further normalizing of the min_rates. * Returns: * sum of vn_min_rates. * or * 0 - if all the min_rates are 0. * In the later case fainess algorithm should be deactivated. * If all min rates are not zero then those that are zeroes will be set to 1. */ static void bxe_calc_vn_min(struct bxe_softc *sc, struct cmng_init_input *input) { uint32_t vn_cfg; uint32_t vn_min_rate; int all_zero = 1; int vn; for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { vn_cfg = sc->devinfo.mf_info.mf_config[vn]; vn_min_rate = (((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT) * 100); if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { /* skip hidden VNs */ vn_min_rate = 0; } else if (!vn_min_rate) { /* If min rate is zero - set it to 100 */ vn_min_rate = DEF_MIN_RATE; } else { all_zero = 0; } input->vnic_min_rate[vn] = vn_min_rate; } /* if ETS or all min rates are zeros - disable fairness */ if (BXE_IS_ETS_ENABLED(sc)) { input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; BLOGD(sc, DBG_LOAD, "Fairness disabled (ETS)\n"); } else if (all_zero) { input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; BLOGD(sc, DBG_LOAD, "Fariness disabled (all MIN values are zeroes)\n"); } else { input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN; } } static inline uint16_t bxe_extract_max_cfg(struct bxe_softc *sc, uint32_t mf_cfg) { uint16_t max_cfg = ((mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT); if (!max_cfg) { BLOGD(sc, DBG_LOAD, "Max BW configured to 0 - using 100 instead\n"); max_cfg = 100; } return (max_cfg); } static void bxe_calc_vn_max(struct bxe_softc *sc, int vn, struct cmng_init_input *input) { uint16_t vn_max_rate; uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn]; uint32_t max_cfg; if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) { vn_max_rate = 0; } else { max_cfg = bxe_extract_max_cfg(sc, vn_cfg); if (IS_MF_SI(sc)) { /* max_cfg in percents of linkspeed */ vn_max_rate = ((sc->link_vars.line_speed * max_cfg) / 100); } else { /* SD modes */ /* max_cfg is absolute in 100Mb units */ vn_max_rate = (max_cfg * 100); } } BLOGD(sc, DBG_LOAD, "vn %d: vn_max_rate %d\n", vn, vn_max_rate); input->vnic_max_rate[vn] = vn_max_rate; } static void bxe_cmng_fns_init(struct bxe_softc *sc, uint8_t read_cfg, uint8_t cmng_type) { struct cmng_init_input input; int vn; memset(&input, 0, sizeof(struct cmng_init_input)); input.port_rate = sc->link_vars.line_speed; if (cmng_type == CMNG_FNS_MINMAX) { /* read mf conf from shmem */ if (read_cfg) { bxe_read_mf_cfg(sc); } /* get VN min rate and enable fairness if not 0 */ bxe_calc_vn_min(sc, &input); /* get VN max rate */ if (sc->port.pmf) { for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { bxe_calc_vn_max(sc, vn, &input); } } /* always enable rate shaping and fairness */ input.flags.cmng_enables |= CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; ecore_init_cmng(&input, &sc->cmng); return; } /* rate shaping and fairness are disabled */ BLOGD(sc, DBG_LOAD, "rate shaping and fairness have been disabled\n"); } static int bxe_get_cmng_fns_mode(struct bxe_softc *sc) { if (CHIP_REV_IS_SLOW(sc)) { return (CMNG_FNS_NONE); } if (IS_MF(sc)) { return (CMNG_FNS_MINMAX); } return (CMNG_FNS_NONE); } static void storm_memset_cmng(struct bxe_softc *sc, struct cmng_init *cmng, uint8_t port) { int vn; int func; uint32_t addr; size_t size; addr = (BAR_XSTRORM_INTMEM + XSTORM_CMNG_PER_PORT_VARS_OFFSET(port)); size = sizeof(struct cmng_struct_per_port); ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->port); for (vn = VN_0; vn < SC_MAX_VN_NUM(sc); vn++) { func = func_by_vn(sc, vn); addr = (BAR_XSTRORM_INTMEM + XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func)); size = sizeof(struct rate_shaping_vars_per_vn); ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->vnic.vnic_max_rate[vn]); addr = (BAR_XSTRORM_INTMEM + XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func)); size = sizeof(struct fairness_vars_per_vn); ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->vnic.vnic_min_rate[vn]); } } static void bxe_pf_init(struct bxe_softc *sc) { struct bxe_func_init_params func_init = { 0 }; struct event_ring_data eq_data = { { 0 } }; uint16_t flags; if (!CHIP_IS_E1x(sc)) { /* reset IGU PF statistics: MSIX + ATTN */ /* PF */ REG_WR(sc, (IGU_REG_STATISTIC_NUM_MESSAGE_SENT + (BXE_IGU_STAS_MSG_VF_CNT * 4) + ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)), 0); /* ATTN */ REG_WR(sc, (IGU_REG_STATISTIC_NUM_MESSAGE_SENT + (BXE_IGU_STAS_MSG_VF_CNT * 4) + (BXE_IGU_STAS_MSG_PF_CNT * 4) + ((CHIP_IS_MODE_4_PORT(sc) ? SC_FUNC(sc) : SC_VN(sc)) * 4)), 0); } /* function setup flags */ flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ); /* * This flag is relevant for E1x only. * E2 doesn't have a TPA configuration in a function level. */ flags |= (if_getcapenable(sc->ifp) & IFCAP_LRO) ? FUNC_FLG_TPA : 0; func_init.func_flgs = flags; func_init.pf_id = SC_FUNC(sc); func_init.func_id = SC_FUNC(sc); func_init.spq_map = sc->spq_dma.paddr; func_init.spq_prod = sc->spq_prod_idx; bxe_func_init(sc, &func_init); memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port)); /* * Congestion management values depend on the link rate. * There is no active link so initial link rate is set to 10Gbps. * When the link comes up the congestion management values are * re-calculated according to the actual link rate. */ sc->link_vars.line_speed = SPEED_10000; bxe_cmng_fns_init(sc, TRUE, bxe_get_cmng_fns_mode(sc)); /* Only the PMF sets the HW */ if (sc->port.pmf) { storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc)); } /* init Event Queue - PCI bus guarantees correct endainity */ eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr); eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr); eq_data.producer = sc->eq_prod; eq_data.index_id = HC_SP_INDEX_EQ_CONS; eq_data.sb_id = DEF_SB_ID; storm_memset_eq_data(sc, &eq_data, SC_FUNC(sc)); } static void bxe_hc_int_enable(struct bxe_softc *sc) { int port = SC_PORT(sc); uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; uint32_t val = REG_RD(sc, addr); uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE; uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) && (sc->intr_count == 1)) ? TRUE : FALSE; uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE; if (msix) { val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | HC_CONFIG_0_REG_INT_LINE_EN_0); val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); if (single_msix) { val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0; } } else if (msi) { val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0; val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); } else { val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | HC_CONFIG_0_REG_INT_LINE_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); if (!CHIP_IS_E1(sc)) { BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr); REG_WR(sc, addr, val); val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0; } } if (CHIP_IS_E1(sc)) { REG_WR(sc, (HC_REG_INT_MASK + port*4), 0x1FFFF); } BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx"))); REG_WR(sc, addr, val); /* ensure that HC_CONFIG is written before leading/trailing edge config */ mb(); if (!CHIP_IS_E1(sc)) { /* init leading/trailing edge */ if (IS_MF(sc)) { val = (0xee0f | (1 << (SC_VN(sc) + 4))); if (sc->port.pmf) { /* enable nig and gpio3 attention */ val |= 0x1100; } } else { val = 0xffff; } REG_WR(sc, (HC_REG_TRAILING_EDGE_0 + port*8), val); REG_WR(sc, (HC_REG_LEADING_EDGE_0 + port*8), val); } /* make sure that interrupts are indeed enabled from here on */ mb(); } static void bxe_igu_int_enable(struct bxe_softc *sc) { uint32_t val; uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE; uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) && (sc->intr_count == 1)) ? TRUE : FALSE; uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE; val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); if (msix) { val &= ~(IGU_PF_CONF_INT_LINE_EN | IGU_PF_CONF_SINGLE_ISR_EN); val |= (IGU_PF_CONF_MSI_MSIX_EN | IGU_PF_CONF_ATTN_BIT_EN); if (single_msix) { val |= IGU_PF_CONF_SINGLE_ISR_EN; } } else if (msi) { val &= ~IGU_PF_CONF_INT_LINE_EN; val |= (IGU_PF_CONF_MSI_MSIX_EN | IGU_PF_CONF_ATTN_BIT_EN | IGU_PF_CONF_SINGLE_ISR_EN); } else { val &= ~IGU_PF_CONF_MSI_MSIX_EN; val |= (IGU_PF_CONF_INT_LINE_EN | IGU_PF_CONF_ATTN_BIT_EN | IGU_PF_CONF_SINGLE_ISR_EN); } /* clean previous status - need to configure igu prior to ack*/ if ((!msix) || single_msix) { REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); bxe_ack_int(sc); } val |= IGU_PF_CONF_FUNC_EN; BLOGD(sc, DBG_INTR, "write 0x%x to IGU mode %s\n", val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx"))); REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); mb(); /* init leading/trailing edge */ if (IS_MF(sc)) { val = (0xee0f | (1 << (SC_VN(sc) + 4))); if (sc->port.pmf) { /* enable nig and gpio3 attention */ val |= 0x1100; } } else { val = 0xffff; } REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, val); REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, val); /* make sure that interrupts are indeed enabled from here on */ mb(); } static void bxe_int_enable(struct bxe_softc *sc) { if (sc->devinfo.int_block == INT_BLOCK_HC) { bxe_hc_int_enable(sc); } else { bxe_igu_int_enable(sc); } } static void bxe_hc_int_disable(struct bxe_softc *sc) { int port = SC_PORT(sc); uint32_t addr = (port) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; uint32_t val = REG_RD(sc, addr); /* * In E1 we must use only PCI configuration space to disable MSI/MSIX * capablility. It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC * block */ if (CHIP_IS_E1(sc)) { /* * Since IGU_PF_CONF_MSI_MSIX_EN still always on use mask register * to prevent from HC sending interrupts after we exit the function */ REG_WR(sc, (HC_REG_INT_MASK + port*4), 0); val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | HC_CONFIG_0_REG_INT_LINE_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); } else { val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | HC_CONFIG_0_REG_INT_LINE_EN_0 | HC_CONFIG_0_REG_ATTN_BIT_EN_0); } BLOGD(sc, DBG_INTR, "write %x to HC %d (addr 0x%x)\n", val, port, addr); /* flush all outstanding writes */ mb(); REG_WR(sc, addr, val); if (REG_RD(sc, addr) != val) { BLOGE(sc, "proper val not read from HC IGU!\n"); } } static void bxe_igu_int_disable(struct bxe_softc *sc) { uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); val &= ~(IGU_PF_CONF_MSI_MSIX_EN | IGU_PF_CONF_INT_LINE_EN | IGU_PF_CONF_ATTN_BIT_EN); BLOGD(sc, DBG_INTR, "write %x to IGU\n", val); /* flush all outstanding writes */ mb(); REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) { BLOGE(sc, "proper val not read from IGU!\n"); } } static void bxe_int_disable(struct bxe_softc *sc) { if (sc->devinfo.int_block == INT_BLOCK_HC) { bxe_hc_int_disable(sc); } else { bxe_igu_int_disable(sc); } } static void bxe_nic_init(struct bxe_softc *sc, int load_code) { int i; for (i = 0; i < sc->num_queues; i++) { bxe_init_eth_fp(sc, i); } rmb(); /* ensure status block indices were read */ bxe_init_rx_rings(sc); bxe_init_tx_rings(sc); if (IS_VF(sc)) { return; } /* initialize MOD_ABS interrupts */ elink_init_mod_abs_int(sc, &sc->link_vars, sc->devinfo.chip_id, sc->devinfo.shmem_base, sc->devinfo.shmem2_base, SC_PORT(sc)); bxe_init_def_sb(sc); bxe_update_dsb_idx(sc); bxe_init_sp_ring(sc); bxe_init_eq_ring(sc); bxe_init_internal(sc, load_code); bxe_pf_init(sc); bxe_stats_init(sc); /* flush all before enabling interrupts */ mb(); bxe_int_enable(sc); /* check for SPIO5 */ bxe_attn_int_deasserted0(sc, REG_RD(sc, (MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + SC_PORT(sc)*4)) & AEU_INPUTS_ATTN_BITS_SPIO5); } static inline void bxe_init_objs(struct bxe_softc *sc) { /* mcast rules must be added to tx if tx switching is enabled */ ecore_obj_type o_type = (sc->flags & BXE_TX_SWITCHING) ? ECORE_OBJ_TYPE_RX_TX : ECORE_OBJ_TYPE_RX; /* RX_MODE controlling object */ ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj); /* multicast configuration controlling object */ ecore_init_mcast_obj(sc, &sc->mcast_obj, sc->fp[0].cl_id, sc->fp[0].index, SC_FUNC(sc), SC_FUNC(sc), BXE_SP(sc, mcast_rdata), BXE_SP_MAPPING(sc, mcast_rdata), ECORE_FILTER_MCAST_PENDING, &sc->sp_state, o_type); /* Setup CAM credit pools */ ecore_init_mac_credit_pool(sc, &sc->macs_pool, SC_FUNC(sc), CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) : VNICS_PER_PATH(sc)); ecore_init_vlan_credit_pool(sc, &sc->vlans_pool, SC_ABS_FUNC(sc) >> 1, CHIP_IS_E1x(sc) ? VNICS_PER_PORT(sc) : VNICS_PER_PATH(sc)); /* RSS configuration object */ ecore_init_rss_config_obj(sc, &sc->rss_conf_obj, sc->fp[0].cl_id, sc->fp[0].index, SC_FUNC(sc), SC_FUNC(sc), BXE_SP(sc, rss_rdata), BXE_SP_MAPPING(sc, rss_rdata), ECORE_FILTER_RSS_CONF_PENDING, &sc->sp_state, ECORE_OBJ_TYPE_RX); } /* * Initialize the function. This must be called before sending CLIENT_SETUP * for the first client. */ static inline int bxe_func_start(struct bxe_softc *sc) { struct ecore_func_state_params func_params = { NULL }; struct ecore_func_start_params *start_params = &func_params.params.start; /* Prepare parameters for function state transitions */ bit_set(&func_params.ramrod_flags, RAMROD_COMP_WAIT); func_params.f_obj = &sc->func_obj; func_params.cmd = ECORE_F_CMD_START; /* Function parameters */ start_params->mf_mode = sc->devinfo.mf_info.mf_mode; start_params->sd_vlan_tag = OVLAN(sc); if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) { start_params->network_cos_mode = STATIC_COS; } else { /* CHIP_IS_E1X */ start_params->network_cos_mode = FW_WRR; } //start_params->gre_tunnel_mode = 0; //start_params->gre_tunnel_rss = 0; return (ecore_func_state_change(sc, &func_params)); } static int bxe_set_power_state(struct bxe_softc *sc, uint8_t state) { uint16_t pmcsr; /* If there is no power capability, silently succeed */ if (!(sc->devinfo.pcie_cap_flags & BXE_PM_CAPABLE_FLAG)) { BLOGW(sc, "No power capability\n"); return (0); } pmcsr = pci_read_config(sc->dev, (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), 2); switch (state) { case PCI_PM_D0: pci_write_config(sc->dev, (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), ((pmcsr & ~PCIM_PSTAT_DMASK) | PCIM_PSTAT_PME), 2); if (pmcsr & PCIM_PSTAT_DMASK) { /* delay required during transition out of D3hot */ DELAY(20000); } break; case PCI_PM_D3hot: /* XXX if there are other clients above don't shut down the power */ /* don't shut down the power for emulation and FPGA */ if (CHIP_REV_IS_SLOW(sc)) { return (0); } pmcsr &= ~PCIM_PSTAT_DMASK; pmcsr |= PCIM_PSTAT_D3; if (sc->wol) { pmcsr |= PCIM_PSTAT_PMEENABLE; } pci_write_config(sc->dev, (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS), pmcsr, 4); /* * No more memory access after this point until device is brought back * to D0 state. */ break; default: BLOGE(sc, "Can't support PCI power state = 0x%x pmcsr 0x%x\n", state, pmcsr); return (-1); } return (0); } /* return true if succeeded to acquire the lock */ static uint8_t bxe_trylock_hw_lock(struct bxe_softc *sc, uint32_t resource) { uint32_t lock_status; uint32_t resource_bit = (1 << resource); int func = SC_FUNC(sc); uint32_t hw_lock_control_reg; BLOGD(sc, DBG_LOAD, "Trying to take a resource lock 0x%x\n", resource); /* Validating that the resource is within range */ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { BLOGD(sc, DBG_LOAD, "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n", resource, HW_LOCK_MAX_RESOURCE_VALUE); return (FALSE); } if (func <= 5) { hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8); } else { hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8); } /* try to acquire the lock */ REG_WR(sc, hw_lock_control_reg + 4, resource_bit); lock_status = REG_RD(sc, hw_lock_control_reg); if (lock_status & resource_bit) { return (TRUE); } BLOGE(sc, "Failed to get a resource lock 0x%x func %d " "lock_status 0x%x resource_bit 0x%x\n", resource, func, lock_status, resource_bit); return (FALSE); } /* * Get the recovery leader resource id according to the engine this function * belongs to. Currently only only 2 engines is supported. */ static int bxe_get_leader_lock_resource(struct bxe_softc *sc) { if (SC_PATH(sc)) { return (HW_LOCK_RESOURCE_RECOVERY_LEADER_1); } else { return (HW_LOCK_RESOURCE_RECOVERY_LEADER_0); } } /* try to acquire a leader lock for current engine */ static uint8_t bxe_trylock_leader_lock(struct bxe_softc *sc) { return (bxe_trylock_hw_lock(sc, bxe_get_leader_lock_resource(sc))); } static int bxe_release_leader_lock(struct bxe_softc *sc) { return (bxe_release_hw_lock(sc, bxe_get_leader_lock_resource(sc))); } /* close gates #2, #3 and #4 */ static void bxe_set_234_gates(struct bxe_softc *sc, uint8_t close) { uint32_t val; /* gates #2 and #4a are closed/opened for "not E1" only */ if (!CHIP_IS_E1(sc)) { /* #4 */ REG_WR(sc, PXP_REG_HST_DISCARD_DOORBELLS, !!close); /* #2 */ REG_WR(sc, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close); } /* #3 */ if (CHIP_IS_E1x(sc)) { /* prevent interrupts from HC on both ports */ val = REG_RD(sc, HC_REG_CONFIG_1); REG_WR(sc, HC_REG_CONFIG_1, (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) : (val & ~(uint32_t)HC_CONFIG_1_REG_BLOCK_DISABLE_1)); val = REG_RD(sc, HC_REG_CONFIG_0); REG_WR(sc, HC_REG_CONFIG_0, (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) : (val & ~(uint32_t)HC_CONFIG_0_REG_BLOCK_DISABLE_0)); } else { /* Prevent incoming interrupts in IGU */ val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION); REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, (!close) ? (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) : (val & ~(uint32_t)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE)); } BLOGD(sc, DBG_LOAD, "%s gates #2, #3 and #4\n", close ? "closing" : "opening"); wmb(); } /* poll for pending writes bit, it should get cleared in no more than 1s */ static int bxe_er_poll_igu_vq(struct bxe_softc *sc) { uint32_t cnt = 1000; uint32_t pend_bits = 0; do { pend_bits = REG_RD(sc, IGU_REG_PENDING_BITS_STATUS); if (pend_bits == 0) { break; } DELAY(1000); } while (--cnt > 0); if (cnt == 0) { BLOGE(sc, "Still pending IGU requests bits=0x%08x!\n", pend_bits); return (-1); } return (0); } #define SHARED_MF_CLP_MAGIC 0x80000000 /* 'magic' bit */ static void bxe_clp_reset_prep(struct bxe_softc *sc, uint32_t *magic_val) { /* Do some magic... */ uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb); *magic_val = val & SHARED_MF_CLP_MAGIC; MFCFG_WR(sc, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC); } /* restore the value of the 'magic' bit */ static void bxe_clp_reset_done(struct bxe_softc *sc, uint32_t magic_val) { /* Restore the 'magic' bit value... */ uint32_t val = MFCFG_RD(sc, shared_mf_config.clp_mb); MFCFG_WR(sc, shared_mf_config.clp_mb, (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); } /* prepare for MCP reset, takes care of CLP configurations */ static void bxe_reset_mcp_prep(struct bxe_softc *sc, uint32_t *magic_val) { uint32_t shmem; uint32_t validity_offset; /* set `magic' bit in order to save MF config */ if (!CHIP_IS_E1(sc)) { bxe_clp_reset_prep(sc, magic_val); } /* get shmem offset */ shmem = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); validity_offset = offsetof(struct shmem_region, validity_map[SC_PORT(sc)]); /* Clear validity map flags */ if (shmem > 0) { REG_WR(sc, shmem + validity_offset, 0); } } #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */ #define MCP_ONE_TIMEOUT 100 /* 100 ms */ static void bxe_mcp_wait_one(struct bxe_softc *sc) { /* special handling for emulation and FPGA (10 times longer) */ if (CHIP_REV_IS_SLOW(sc)) { DELAY((MCP_ONE_TIMEOUT*10) * 1000); } else { DELAY((MCP_ONE_TIMEOUT) * 1000); } } /* initialize shmem_base and waits for validity signature to appear */ static int bxe_init_shmem(struct bxe_softc *sc) { int cnt = 0; uint32_t val = 0; do { sc->devinfo.shmem_base = sc->link_params.shmem_base = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); if (sc->devinfo.shmem_base) { val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]); if (val & SHR_MEM_VALIDITY_MB) return (0); } bxe_mcp_wait_one(sc); } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT)); BLOGE(sc, "BAD MCP validity signature\n"); return (-1); } static int bxe_reset_mcp_comp(struct bxe_softc *sc, uint32_t magic_val) { int rc = bxe_init_shmem(sc); /* Restore the `magic' bit value */ if (!CHIP_IS_E1(sc)) { bxe_clp_reset_done(sc, magic_val); } return (rc); } static void bxe_pxp_prep(struct bxe_softc *sc) { if (!CHIP_IS_E1(sc)) { REG_WR(sc, PXP2_REG_RD_START_INIT, 0); REG_WR(sc, PXP2_REG_RQ_RBC_DONE, 0); wmb(); } } /* * Reset the whole chip except for: * - PCIE core * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit) * - IGU * - MISC (including AEU) * - GRC * - RBCN, RBCP */ static void bxe_process_kill_chip_reset(struct bxe_softc *sc, uint8_t global) { uint32_t not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2; uint32_t global_bits2, stay_reset2; /* * Bits that have to be set in reset_mask2 if we want to reset 'global' * (per chip) blocks. */ global_bits2 = MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU | MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE; /* * Don't reset the following blocks. * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be * reset, as in 4 port device they might still be owned * by the MCP (there is only one leader per path). */ not_reset_mask1 = MISC_REGISTERS_RESET_REG_1_RST_HC | MISC_REGISTERS_RESET_REG_1_RST_PXPV | MISC_REGISTERS_RESET_REG_1_RST_PXP; not_reset_mask2 = MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO | MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE | MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE | MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE | MISC_REGISTERS_RESET_REG_2_RST_RBCN | MISC_REGISTERS_RESET_REG_2_RST_GRC | MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE | MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B | MISC_REGISTERS_RESET_REG_2_RST_ATC | MISC_REGISTERS_RESET_REG_2_PGLC | MISC_REGISTERS_RESET_REG_2_RST_BMAC0 | MISC_REGISTERS_RESET_REG_2_RST_BMAC1 | MISC_REGISTERS_RESET_REG_2_RST_EMAC0 | MISC_REGISTERS_RESET_REG_2_RST_EMAC1 | MISC_REGISTERS_RESET_REG_2_UMAC0 | MISC_REGISTERS_RESET_REG_2_UMAC1; /* * Keep the following blocks in reset: * - all xxMACs are handled by the elink code. */ stay_reset2 = MISC_REGISTERS_RESET_REG_2_XMAC | MISC_REGISTERS_RESET_REG_2_XMAC_SOFT; /* Full reset masks according to the chip */ reset_mask1 = 0xffffffff; if (CHIP_IS_E1(sc)) reset_mask2 = 0xffff; else if (CHIP_IS_E1H(sc)) reset_mask2 = 0x1ffff; else if (CHIP_IS_E2(sc)) reset_mask2 = 0xfffff; else /* CHIP_IS_E3 */ reset_mask2 = 0x3ffffff; /* Don't reset global blocks unless we need to */ if (!global) reset_mask2 &= ~global_bits2; /* * In case of attention in the QM, we need to reset PXP * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM * because otherwise QM reset would release 'close the gates' shortly * before resetting the PXP, then the PSWRQ would send a write * request to PGLUE. Then when PXP is reset, PGLUE would try to * read the payload data from PSWWR, but PSWWR would not * respond. The write queue in PGLUE would stuck, dmae commands * would not return. Therefore it's important to reset the second * reset register (containing the * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM * bit). */ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, reset_mask2 & (~not_reset_mask2)); REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, reset_mask1 & (~not_reset_mask1)); mb(); wmb(); REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2 & (~stay_reset2)); mb(); wmb(); REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1); wmb(); } static int bxe_process_kill(struct bxe_softc *sc, uint8_t global) { int cnt = 1000; uint32_t val = 0; uint32_t sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2; uint32_t tags_63_32 = 0; /* Empty the Tetris buffer, wait for 1s */ do { sr_cnt = REG_RD(sc, PXP2_REG_RD_SR_CNT); blk_cnt = REG_RD(sc, PXP2_REG_RD_BLK_CNT); port_is_idle_0 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_0); port_is_idle_1 = REG_RD(sc, PXP2_REG_RD_PORT_IS_IDLE_1); pgl_exp_rom2 = REG_RD(sc, PXP2_REG_PGL_EXP_ROM2); if (CHIP_IS_E3(sc)) { tags_63_32 = REG_RD(sc, PGLUE_B_REG_TAGS_63_32); } if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) && ((port_is_idle_0 & 0x1) == 0x1) && ((port_is_idle_1 & 0x1) == 0x1) && (pgl_exp_rom2 == 0xffffffff) && (!CHIP_IS_E3(sc) || (tags_63_32 == 0xffffffff))) break; DELAY(1000); } while (cnt-- > 0); if (cnt <= 0) { BLOGE(sc, "ERROR: Tetris buffer didn't get empty or there " "are still outstanding read requests after 1s! " "sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, " "port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n", sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2); return (-1); } mb(); /* Close gates #2, #3 and #4 */ bxe_set_234_gates(sc, TRUE); /* Poll for IGU VQs for 57712 and newer chips */ if (!CHIP_IS_E1x(sc) && bxe_er_poll_igu_vq(sc)) { return (-1); } /* XXX indicate that "process kill" is in progress to MCP */ /* clear "unprepared" bit */ REG_WR(sc, MISC_REG_UNPREPARED, 0); mb(); /* Make sure all is written to the chip before the reset */ wmb(); /* * Wait for 1ms to empty GLUE and PCI-E core queues, * PSWHST, GRC and PSWRD Tetris buffer. */ DELAY(1000); /* Prepare to chip reset: */ /* MCP */ if (global) { bxe_reset_mcp_prep(sc, &val); } /* PXP */ bxe_pxp_prep(sc); mb(); /* reset the chip */ bxe_process_kill_chip_reset(sc, global); mb(); /* clear errors in PGB */ if (!CHIP_IS_E1(sc)) REG_WR(sc, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f); /* Recover after reset: */ /* MCP */ if (global && bxe_reset_mcp_comp(sc, val)) { return (-1); } /* XXX add resetting the NO_MCP mode DB here */ /* Open the gates #2, #3 and #4 */ bxe_set_234_gates(sc, FALSE); /* XXX * IGU/AEU preparation bring back the AEU/IGU to a reset state * re-enable attentions */ return (0); } static int bxe_leader_reset(struct bxe_softc *sc) { int rc = 0; uint8_t global = bxe_reset_is_global(sc); uint32_t load_code; /* * If not going to reset MCP, load "fake" driver to reset HW while * driver is owner of the HW. */ if (!global && !BXE_NOMCP(sc)) { load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_REQ, DRV_MSG_CODE_LOAD_REQ_WITH_LFA); if (!load_code) { BLOGE(sc, "MCP response failure, aborting\n"); rc = -1; goto exit_leader_reset; } if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) && (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) { BLOGE(sc, "MCP unexpected response, aborting\n"); rc = -1; goto exit_leader_reset2; } load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); if (!load_code) { BLOGE(sc, "MCP response failure, aborting\n"); rc = -1; goto exit_leader_reset2; } } /* try to recover after the failure */ if (bxe_process_kill(sc, global)) { BLOGE(sc, "Something bad occurred on engine %d!\n", SC_PATH(sc)); rc = -1; goto exit_leader_reset2; } /* * Clear the RESET_IN_PROGRESS and RESET_GLOBAL bits and update the driver * state. */ bxe_set_reset_done(sc); if (global) { bxe_clear_reset_global(sc); } exit_leader_reset2: /* unload "fake driver" if it was loaded */ if (!global && !BXE_NOMCP(sc)) { bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0); } exit_leader_reset: sc->is_leader = 0; bxe_release_leader_lock(sc); mb(); return (rc); } /* * prepare INIT transition, parameters configured: * - HC configuration * - Queue's CDU context */ static void bxe_pf_q_prep_init(struct bxe_softc *sc, struct bxe_fastpath *fp, struct ecore_queue_init_params *init_params) { uint8_t cos; int cxt_index, cxt_offset; bxe_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags); bxe_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags); bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags); bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags); /* HC rate */ init_params->rx.hc_rate = sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0; init_params->tx.hc_rate = sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0; /* FW SB ID */ init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id; /* CQ index among the SB indices */ init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS; /* set maximum number of COSs supported by this queue */ init_params->max_cos = sc->max_cos; BLOGD(sc, DBG_LOAD, "fp %d setting queue params max cos to %d\n", fp->index, init_params->max_cos); /* set the context pointers queue object */ for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) { /* XXX change index/cid here if ever support multiple tx CoS */ /* fp->txdata[cos]->cid */ cxt_index = fp->index / ILT_PAGE_CIDS; cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS); init_params->cxts[cos] = &sc->context[cxt_index].vcxt[cxt_offset].eth; } } /* set flags that are common for the Tx-only and not normal connections */ static unsigned long bxe_get_common_flags(struct bxe_softc *sc, struct bxe_fastpath *fp, uint8_t zero_stats) { unsigned long flags = 0; /* PF driver will always initialize the Queue to an ACTIVE state */ bxe_set_bit(ECORE_Q_FLG_ACTIVE, &flags); /* * tx only connections collect statistics (on the same index as the * parent connection). The statistics are zeroed when the parent * connection is initialized. */ bxe_set_bit(ECORE_Q_FLG_STATS, &flags); if (zero_stats) { bxe_set_bit(ECORE_Q_FLG_ZERO_STATS, &flags); } /* * tx only connections can support tx-switching, though their * CoS-ness doesn't survive the loopback */ if (sc->flags & BXE_TX_SWITCHING) { bxe_set_bit(ECORE_Q_FLG_TX_SWITCH, &flags); } bxe_set_bit(ECORE_Q_FLG_PCSUM_ON_PKT, &flags); return (flags); } static unsigned long bxe_get_q_flags(struct bxe_softc *sc, struct bxe_fastpath *fp, uint8_t leading) { unsigned long flags = 0; if (IS_MF_SD(sc)) { bxe_set_bit(ECORE_Q_FLG_OV, &flags); } if (if_getcapenable(sc->ifp) & IFCAP_LRO) { bxe_set_bit(ECORE_Q_FLG_TPA, &flags); #if __FreeBSD_version >= 800000 bxe_set_bit(ECORE_Q_FLG_TPA_IPV6, &flags); #endif } if (leading) { bxe_set_bit(ECORE_Q_FLG_LEADING_RSS, &flags); bxe_set_bit(ECORE_Q_FLG_MCAST, &flags); } bxe_set_bit(ECORE_Q_FLG_VLAN, &flags); /* merge with common flags */ return (flags | bxe_get_common_flags(sc, fp, TRUE)); } static void bxe_pf_q_prep_general(struct bxe_softc *sc, struct bxe_fastpath *fp, struct ecore_general_setup_params *gen_init, uint8_t cos) { gen_init->stat_id = bxe_stats_id(fp); gen_init->spcl_id = fp->cl_id; gen_init->mtu = sc->mtu; gen_init->cos = cos; } static void bxe_pf_rx_q_prep(struct bxe_softc *sc, struct bxe_fastpath *fp, struct rxq_pause_params *pause, struct ecore_rxq_setup_params *rxq_init) { uint8_t max_sge = 0; uint16_t sge_sz = 0; uint16_t tpa_agg_size = 0; pause->sge_th_lo = SGE_TH_LO(sc); pause->sge_th_hi = SGE_TH_HI(sc); /* validate SGE ring has enough to cross high threshold */ if (sc->dropless_fc && (pause->sge_th_hi + FW_PREFETCH_CNT) > (RX_SGE_USABLE_PER_PAGE * RX_SGE_NUM_PAGES)) { BLOGW(sc, "sge ring threshold limit\n"); } /* minimum max_aggregation_size is 2*MTU (two full buffers) */ tpa_agg_size = (2 * sc->mtu); if (tpa_agg_size < sc->max_aggregation_size) { tpa_agg_size = sc->max_aggregation_size; } max_sge = SGE_PAGE_ALIGN(sc->mtu) >> SGE_PAGE_SHIFT; max_sge = ((max_sge + PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT; sge_sz = (uint16_t)min(SGE_PAGES, 0xffff); /* pause - not for e1 */ if (!CHIP_IS_E1(sc)) { pause->bd_th_lo = BD_TH_LO(sc); pause->bd_th_hi = BD_TH_HI(sc); pause->rcq_th_lo = RCQ_TH_LO(sc); pause->rcq_th_hi = RCQ_TH_HI(sc); /* validate rings have enough entries to cross high thresholds */ if (sc->dropless_fc && pause->bd_th_hi + FW_PREFETCH_CNT > sc->rx_ring_size) { BLOGW(sc, "rx bd ring threshold limit\n"); } if (sc->dropless_fc && pause->rcq_th_hi + FW_PREFETCH_CNT > RCQ_NUM_PAGES * RCQ_USABLE_PER_PAGE) { BLOGW(sc, "rcq ring threshold limit\n"); } pause->pri_map = 1; } /* rxq setup */ rxq_init->dscr_map = fp->rx_dma.paddr; rxq_init->sge_map = fp->rx_sge_dma.paddr; rxq_init->rcq_map = fp->rcq_dma.paddr; rxq_init->rcq_np_map = (fp->rcq_dma.paddr + BCM_PAGE_SIZE); /* * This should be a maximum number of data bytes that may be * placed on the BD (not including paddings). */ rxq_init->buf_sz = (fp->rx_buf_size - IP_HEADER_ALIGNMENT_PADDING); rxq_init->cl_qzone_id = fp->cl_qzone_id; rxq_init->tpa_agg_sz = tpa_agg_size; rxq_init->sge_buf_sz = sge_sz; rxq_init->max_sges_pkt = max_sge; rxq_init->rss_engine_id = SC_FUNC(sc); rxq_init->mcast_engine_id = SC_FUNC(sc); /* * Maximum number or simultaneous TPA aggregation for this Queue. * For PF Clients it should be the maximum available number. * VF driver(s) may want to define it to a smaller value. */ rxq_init->max_tpa_queues = MAX_AGG_QS(sc); rxq_init->cache_line_log = BXE_RX_ALIGN_SHIFT; rxq_init->fw_sb_id = fp->fw_sb_id; rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; /* * configure silent vlan removal * if multi function mode is afex, then mask default vlan */ if (IS_MF_AFEX(sc)) { rxq_init->silent_removal_value = sc->devinfo.mf_info.afex_def_vlan_tag; rxq_init->silent_removal_mask = EVL_VLID_MASK; } } static void bxe_pf_tx_q_prep(struct bxe_softc *sc, struct bxe_fastpath *fp, struct ecore_txq_setup_params *txq_init, uint8_t cos) { /* * XXX If multiple CoS is ever supported then each fastpath structure * will need to maintain tx producer/consumer/dma/etc values *per* CoS. * fp->txdata[cos]->tx_dma.paddr; */ txq_init->dscr_map = fp->tx_dma.paddr; txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; txq_init->fw_sb_id = fp->fw_sb_id; /* * set the TSS leading client id for TX classfication to the * leading RSS client id */ txq_init->tss_leading_cl_id = BXE_FP(sc, 0, cl_id); } /* * This function performs 2 steps in a queue state machine: * 1) RESET->INIT * 2) INIT->SETUP */ static int bxe_setup_queue(struct bxe_softc *sc, struct bxe_fastpath *fp, uint8_t leading) { struct ecore_queue_state_params q_params = { NULL }; struct ecore_queue_setup_params *setup_params = &q_params.params.setup; int rc; BLOGD(sc, DBG_LOAD, "setting up queue %d\n", fp->index); bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); q_params.q_obj = &BXE_SP_OBJ(sc, fp).q_obj; /* we want to wait for completion in this context */ bxe_set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); /* prepare the INIT parameters */ bxe_pf_q_prep_init(sc, fp, &q_params.params.init); /* Set the command */ q_params.cmd = ECORE_Q_CMD_INIT; /* Change the state to INIT */ rc = ecore_queue_state_change(sc, &q_params); if (rc) { BLOGE(sc, "Queue(%d) INIT failed rc = %d\n", fp->index, rc); return (rc); } BLOGD(sc, DBG_LOAD, "init complete\n"); /* now move the Queue to the SETUP state */ memset(setup_params, 0, sizeof(*setup_params)); /* set Queue flags */ setup_params->flags = bxe_get_q_flags(sc, fp, leading); /* set general SETUP parameters */ bxe_pf_q_prep_general(sc, fp, &setup_params->gen_params, FIRST_TX_COS_INDEX); bxe_pf_rx_q_prep(sc, fp, &setup_params->pause_params, &setup_params->rxq_params); bxe_pf_tx_q_prep(sc, fp, &setup_params->txq_params, FIRST_TX_COS_INDEX); /* Set the command */ q_params.cmd = ECORE_Q_CMD_SETUP; /* change the state to SETUP */ rc = ecore_queue_state_change(sc, &q_params); if (rc) { BLOGE(sc, "Queue(%d) SETUP failed (rc = %d)\n", fp->index, rc); return (rc); } return (rc); } static int bxe_setup_leading(struct bxe_softc *sc) { return (bxe_setup_queue(sc, &sc->fp[0], TRUE)); } static int bxe_config_rss_pf(struct bxe_softc *sc, struct ecore_rss_config_obj *rss_obj, uint8_t config_hash) { struct ecore_config_rss_params params = { NULL }; int i; /* * Although RSS is meaningless when there is a single HW queue we * still need it enabled in order to have HW Rx hash generated. */ params.rss_obj = rss_obj; bxe_set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); bxe_set_bit(ECORE_RSS_MODE_REGULAR, ¶ms.rss_flags); /* RSS configuration */ bxe_set_bit(ECORE_RSS_IPV4, ¶ms.rss_flags); bxe_set_bit(ECORE_RSS_IPV4_TCP, ¶ms.rss_flags); bxe_set_bit(ECORE_RSS_IPV6, ¶ms.rss_flags); bxe_set_bit(ECORE_RSS_IPV6_TCP, ¶ms.rss_flags); if (rss_obj->udp_rss_v4) { bxe_set_bit(ECORE_RSS_IPV4_UDP, ¶ms.rss_flags); } if (rss_obj->udp_rss_v6) { bxe_set_bit(ECORE_RSS_IPV6_UDP, ¶ms.rss_flags); } /* Hash bits */ params.rss_result_mask = MULTI_MASK; memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table)); if (config_hash) { /* RSS keys */ for (i = 0; i < sizeof(params.rss_key) / 4; i++) { params.rss_key[i] = arc4random(); } bxe_set_bit(ECORE_RSS_SET_SRCH, ¶ms.rss_flags); } return (ecore_config_rss(sc, ¶ms)); } static int bxe_config_rss_eth(struct bxe_softc *sc, uint8_t config_hash) { return (bxe_config_rss_pf(sc, &sc->rss_conf_obj, config_hash)); } static int bxe_init_rss_pf(struct bxe_softc *sc) { uint8_t num_eth_queues = BXE_NUM_ETH_QUEUES(sc); int i; /* * Prepare the initial contents of the indirection table if * RSS is enabled */ for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) { sc->rss_conf_obj.ind_table[i] = (sc->fp->cl_id + (i % num_eth_queues)); } if (sc->udp_rss) { sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1; } /* * For 57710 and 57711 SEARCHER configuration (rss_keys) is * per-port, so if explicit configuration is needed, do it only * for a PMF. * * For 57712 and newer it's a per-function configuration. */ return (bxe_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc))); } static int bxe_set_mac_one(struct bxe_softc *sc, uint8_t *mac, struct ecore_vlan_mac_obj *obj, uint8_t set, int mac_type, unsigned long *ramrod_flags) { struct ecore_vlan_mac_ramrod_params ramrod_param; int rc; memset(&ramrod_param, 0, sizeof(ramrod_param)); /* fill in general parameters */ ramrod_param.vlan_mac_obj = obj; ramrod_param.ramrod_flags = *ramrod_flags; /* fill a user request section if needed */ if (!bxe_test_bit(RAMROD_CONT, ramrod_flags)) { memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN); bxe_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags); /* Set the command: ADD or DEL */ ramrod_param.user_req.cmd = (set) ? ECORE_VLAN_MAC_ADD : ECORE_VLAN_MAC_DEL; } rc = ecore_config_vlan_mac(sc, &ramrod_param); if (rc == ECORE_EXISTS) { BLOGD(sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n"); /* do not treat adding same MAC as error */ rc = 0; } else if (rc < 0) { BLOGE(sc, "%s MAC failed (%d)\n", (set ? "Set" : "Delete"), rc); } return (rc); } static int bxe_set_eth_mac(struct bxe_softc *sc, uint8_t set) { unsigned long ramrod_flags = 0; BLOGD(sc, DBG_LOAD, "Adding Ethernet MAC\n"); bxe_set_bit(RAMROD_COMP_WAIT, &ramrod_flags); /* Eth MAC is set on RSS leading client (fp[0]) */ return (bxe_set_mac_one(sc, sc->link_params.mac_addr, &sc->sp_objs->mac_obj, set, ECORE_ETH_MAC, &ramrod_flags)); } static int bxe_get_cur_phy_idx(struct bxe_softc *sc) { uint32_t sel_phy_idx = 0; if (sc->link_params.num_phys <= 1) { return (ELINK_INT_PHY); } if (sc->link_vars.link_up) { sel_phy_idx = ELINK_EXT_PHY1; /* In case link is SERDES, check if the ELINK_EXT_PHY2 is the one */ if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) && (sc->link_params.phy[ELINK_EXT_PHY2].supported & ELINK_SUPPORTED_FIBRE)) sel_phy_idx = ELINK_EXT_PHY2; } else { switch (elink_phy_selection(&sc->link_params)) { case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY: case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: sel_phy_idx = ELINK_EXT_PHY1; break; case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY: case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: sel_phy_idx = ELINK_EXT_PHY2; break; } } return (sel_phy_idx); } static int bxe_get_link_cfg_idx(struct bxe_softc *sc) { uint32_t sel_phy_idx = bxe_get_cur_phy_idx(sc); /* * The selected activated PHY is always after swapping (in case PHY * swapping is enabled). So when swapping is enabled, we need to reverse * the configuration */ if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) { if (sel_phy_idx == ELINK_EXT_PHY1) sel_phy_idx = ELINK_EXT_PHY2; else if (sel_phy_idx == ELINK_EXT_PHY2) sel_phy_idx = ELINK_EXT_PHY1; } return (ELINK_LINK_CONFIG_IDX(sel_phy_idx)); } static void bxe_set_requested_fc(struct bxe_softc *sc) { /* * Initialize link parameters structure variables * It is recommended to turn off RX FC for jumbo frames * for better performance */ if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) { sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX; } else { sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH; } } static void bxe_calc_fc_adv(struct bxe_softc *sc) { uint8_t cfg_idx = bxe_get_link_cfg_idx(sc); sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause); switch (sc->link_vars.ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | ADVERTISED_Pause); break; case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC: sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause; break; default: break; } } static uint16_t bxe_get_mf_speed(struct bxe_softc *sc) { uint16_t line_speed = sc->link_vars.line_speed; if (IS_MF(sc)) { uint16_t maxCfg = bxe_extract_max_cfg(sc, sc->devinfo.mf_info.mf_config[SC_VN(sc)]); /* calculate the current MAX line speed limit for the MF devices */ if (IS_MF_SI(sc)) { line_speed = (line_speed * maxCfg) / 100; } else { /* SD mode */ uint16_t vn_max_rate = maxCfg * 100; if (vn_max_rate < line_speed) { line_speed = vn_max_rate; } } } return (line_speed); } static void bxe_fill_report_data(struct bxe_softc *sc, struct bxe_link_report_data *data) { uint16_t line_speed = bxe_get_mf_speed(sc); memset(data, 0, sizeof(*data)); /* fill the report data with the effective line speed */ data->line_speed = line_speed; /* Link is down */ if (!sc->link_vars.link_up || (sc->flags & BXE_MF_FUNC_DIS)) { bxe_set_bit(BXE_LINK_REPORT_LINK_DOWN, &data->link_report_flags); } /* Full DUPLEX */ if (sc->link_vars.duplex == DUPLEX_FULL) { bxe_set_bit(BXE_LINK_REPORT_FULL_DUPLEX, &data->link_report_flags); } /* Rx Flow Control is ON */ if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) { bxe_set_bit(BXE_LINK_REPORT_RX_FC_ON, &data->link_report_flags); } /* Tx Flow Control is ON */ if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) { bxe_set_bit(BXE_LINK_REPORT_TX_FC_ON, &data->link_report_flags); } } /* report link status to OS, should be called under phy_lock */ static void bxe_link_report_locked(struct bxe_softc *sc) { struct bxe_link_report_data cur_data; /* reread mf_cfg */ if (IS_PF(sc) && !CHIP_IS_E1(sc)) { bxe_read_mf_cfg(sc); } /* Read the current link report info */ bxe_fill_report_data(sc, &cur_data); /* Don't report link down or exactly the same link status twice */ if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) || (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, &sc->last_reported_link.link_report_flags) && bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, &cur_data.link_report_flags))) { return; } ELINK_DEBUG_P2(sc, "Change in link status : cur_data = %x, last_reported_link = %x\n", cur_data.link_report_flags, sc->last_reported_link.link_report_flags); sc->link_cnt++; ELINK_DEBUG_P1(sc, "link status change count = %x\n", sc->link_cnt); /* report new link params and remember the state for the next time */ memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data)); if (bxe_test_bit(BXE_LINK_REPORT_LINK_DOWN, &cur_data.link_report_flags)) { if_link_state_change(sc->ifp, LINK_STATE_DOWN); } else { const char *duplex; const char *flow; if (bxe_test_and_clear_bit(BXE_LINK_REPORT_FULL_DUPLEX, &cur_data.link_report_flags)) { duplex = "full"; ELINK_DEBUG_P0(sc, "link set to full duplex\n"); } else { duplex = "half"; ELINK_DEBUG_P0(sc, "link set to half duplex\n"); } /* * Handle the FC at the end so that only these flags would be * possibly set. This way we may easily check if there is no FC * enabled. */ if (cur_data.link_report_flags) { if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, &cur_data.link_report_flags) && bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, &cur_data.link_report_flags)) { flow = "ON - receive & transmit"; } else if (bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, &cur_data.link_report_flags) && !bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, &cur_data.link_report_flags)) { flow = "ON - receive"; } else if (!bxe_test_bit(BXE_LINK_REPORT_RX_FC_ON, &cur_data.link_report_flags) && bxe_test_bit(BXE_LINK_REPORT_TX_FC_ON, &cur_data.link_report_flags)) { flow = "ON - transmit"; } else { flow = "none"; /* possible? */ } } else { flow = "none"; } if_link_state_change(sc->ifp, LINK_STATE_UP); BLOGI(sc, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n", cur_data.line_speed, duplex, flow); } } static void bxe_link_report(struct bxe_softc *sc) { bxe_acquire_phy_lock(sc); bxe_link_report_locked(sc); bxe_release_phy_lock(sc); } static void bxe_link_status_update(struct bxe_softc *sc) { if (sc->state != BXE_STATE_OPEN) { return; } if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) { elink_link_status_update(&sc->link_params, &sc->link_vars); } else { sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half | ELINK_SUPPORTED_10baseT_Full | ELINK_SUPPORTED_100baseT_Half | ELINK_SUPPORTED_100baseT_Full | ELINK_SUPPORTED_1000baseT_Full | ELINK_SUPPORTED_2500baseX_Full | ELINK_SUPPORTED_10000baseT_Full | ELINK_SUPPORTED_TP | ELINK_SUPPORTED_FIBRE | ELINK_SUPPORTED_Autoneg | ELINK_SUPPORTED_Pause | ELINK_SUPPORTED_Asym_Pause); sc->port.advertising[0] = sc->port.supported[0]; sc->link_params.sc = sc; sc->link_params.port = SC_PORT(sc); sc->link_params.req_duplex[0] = DUPLEX_FULL; sc->link_params.req_flow_ctrl[0] = ELINK_FLOW_CTRL_NONE; sc->link_params.req_line_speed[0] = SPEED_10000; sc->link_params.speed_cap_mask[0] = 0x7f0000; sc->link_params.switch_cfg = ELINK_SWITCH_CFG_10G; if (CHIP_REV_IS_FPGA(sc)) { sc->link_vars.mac_type = ELINK_MAC_TYPE_EMAC; sc->link_vars.line_speed = ELINK_SPEED_1000; sc->link_vars.link_status = (LINK_STATUS_LINK_UP | LINK_STATUS_SPEED_AND_DUPLEX_1000TFD); } else { sc->link_vars.mac_type = ELINK_MAC_TYPE_BMAC; sc->link_vars.line_speed = ELINK_SPEED_10000; sc->link_vars.link_status = (LINK_STATUS_LINK_UP | LINK_STATUS_SPEED_AND_DUPLEX_10GTFD); } sc->link_vars.link_up = 1; sc->link_vars.duplex = DUPLEX_FULL; sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE; if (IS_PF(sc)) { REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + sc->link_params.port*4, 0); bxe_stats_handle(sc, STATS_EVENT_LINK_UP); bxe_link_report(sc); } } if (IS_PF(sc)) { if (sc->link_vars.link_up) { bxe_stats_handle(sc, STATS_EVENT_LINK_UP); } else { bxe_stats_handle(sc, STATS_EVENT_STOP); } bxe_link_report(sc); } else { bxe_link_report(sc); bxe_stats_handle(sc, STATS_EVENT_LINK_UP); } } static int bxe_initial_phy_init(struct bxe_softc *sc, int load_mode) { int rc, cfg_idx = bxe_get_link_cfg_idx(sc); uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx]; struct elink_params *lp = &sc->link_params; bxe_set_requested_fc(sc); if (CHIP_REV_IS_SLOW(sc)) { uint32_t bond = CHIP_BOND_ID(sc); uint32_t feat = 0; if (CHIP_IS_E2(sc) && CHIP_IS_MODE_4_PORT(sc)) { feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC; } else if (bond & 0x4) { if (CHIP_IS_E3(sc)) { feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC; } else { feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC; } } else if (bond & 0x8) { if (CHIP_IS_E3(sc)) { feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC; } else { feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC; } } /* disable EMAC for E3 and above */ if (bond & 0x2) { feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC; } sc->link_params.feature_config_flags |= feat; } bxe_acquire_phy_lock(sc); if (load_mode == LOAD_DIAG) { lp->loopback_mode = ELINK_LOOPBACK_XGXS; /* Prefer doing PHY loopback at 10G speed, if possible */ if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) { if (lp->speed_cap_mask[cfg_idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) { lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000; } else { lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000; } } } if (load_mode == LOAD_LOOPBACK_EXT) { lp->loopback_mode = ELINK_LOOPBACK_EXT; } rc = elink_phy_init(&sc->link_params, &sc->link_vars); bxe_release_phy_lock(sc); bxe_calc_fc_adv(sc); if (sc->link_vars.link_up) { bxe_stats_handle(sc, STATS_EVENT_LINK_UP); bxe_link_report(sc); } if (!CHIP_REV_IS_SLOW(sc)) { bxe_periodic_start(sc); } sc->link_params.req_line_speed[cfg_idx] = req_line_speed; return (rc); } static u_int bxe_push_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { struct ecore_mcast_list_elem *mc_mac = arg; mc_mac += cnt; mc_mac->mac = (uint8_t *)LLADDR(sdl); return (1); } static int bxe_init_mcast_macs_list(struct bxe_softc *sc, struct ecore_mcast_ramrod_params *p) { if_t ifp = sc->ifp; int mc_count; struct ecore_mcast_list_elem *mc_mac; ECORE_LIST_INIT(&p->mcast_list); p->mcast_list_len = 0; /* XXXGL: multicast count may change later */ mc_count = if_llmaddr_count(ifp); if (!mc_count) { return (0); } mc_mac = malloc(sizeof(*mc_mac) * mc_count, M_DEVBUF, (M_NOWAIT | M_ZERO)); if (!mc_mac) { BLOGE(sc, "Failed to allocate temp mcast list\n"); return (-1); } bzero(mc_mac, (sizeof(*mc_mac) * mc_count)); if_foreach_llmaddr(ifp, bxe_push_maddr, mc_mac); for (int i = 0; i < mc_count; i ++) { ECORE_LIST_PUSH_TAIL(&mc_mac[i].link, &p->mcast_list); BLOGD(sc, DBG_LOAD, "Setting MCAST %02X:%02X:%02X:%02X:%02X:%02X and mc_count %d\n", mc_mac[i].mac[0], mc_mac[i].mac[1], mc_mac[i].mac[2], mc_mac[i].mac[3], mc_mac[i].mac[4], mc_mac[i].mac[5], mc_count); } p->mcast_list_len = mc_count; return (0); } static void bxe_free_mcast_macs_list(struct ecore_mcast_ramrod_params *p) { struct ecore_mcast_list_elem *mc_mac = ECORE_LIST_FIRST_ENTRY(&p->mcast_list, struct ecore_mcast_list_elem, link); if (mc_mac) { /* only a single free as all mc_macs are in the same heap array */ free(mc_mac, M_DEVBUF); } } static int bxe_set_mc_list(struct bxe_softc *sc) { struct ecore_mcast_ramrod_params rparam = { NULL }; int rc = 0; rparam.mcast_obj = &sc->mcast_obj; BXE_MCAST_LOCK(sc); /* first, clear all configured multicast MACs */ rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL); if (rc < 0) { BLOGE(sc, "Failed to clear multicast configuration: %d\n", rc); /* Manual backport parts of FreeBSD upstream r284470. */ BXE_MCAST_UNLOCK(sc); return (rc); } /* configure a new MACs list */ rc = bxe_init_mcast_macs_list(sc, &rparam); if (rc) { BLOGE(sc, "Failed to create mcast MACs list (%d)\n", rc); BXE_MCAST_UNLOCK(sc); return (rc); } /* Now add the new MACs */ rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_ADD); if (rc < 0) { BLOGE(sc, "Failed to set new mcast config (%d)\n", rc); } bxe_free_mcast_macs_list(&rparam); BXE_MCAST_UNLOCK(sc); return (rc); } struct bxe_set_addr_ctx { struct bxe_softc *sc; unsigned long ramrod_flags; int rc; }; static u_int bxe_set_addr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { struct bxe_set_addr_ctx *ctx = arg; struct ecore_vlan_mac_obj *mac_obj = &ctx->sc->sp_objs->mac_obj; int rc; if (ctx->rc < 0) return (0); rc = bxe_set_mac_one(ctx->sc, (uint8_t *)LLADDR(sdl), mac_obj, TRUE, ECORE_UC_LIST_MAC, &ctx->ramrod_flags); /* do not treat adding same MAC as an error */ if (rc == -EEXIST) BLOGD(ctx->sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n"); else if (rc < 0) { BLOGE(ctx->sc, "Failed to schedule ADD operations (%d)\n", rc); ctx->rc = rc; } return (1); } static int bxe_set_uc_list(struct bxe_softc *sc) { if_t ifp = sc->ifp; struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj; struct bxe_set_addr_ctx ctx = { sc, 0, 0 }; int rc; /* first schedule a cleanup up of old configuration */ rc = bxe_del_all_macs(sc, mac_obj, ECORE_UC_LIST_MAC, FALSE); if (rc < 0) { BLOGE(sc, "Failed to schedule delete of all ETH MACs (%d)\n", rc); return (rc); } if_foreach_lladdr(ifp, bxe_set_addr, &ctx); if (ctx.rc < 0) return (ctx.rc); /* Execute the pending commands */ bit_set(&ctx.ramrod_flags, RAMROD_CONT); return (bxe_set_mac_one(sc, NULL, mac_obj, FALSE /* don't care */, ECORE_UC_LIST_MAC, &ctx.ramrod_flags)); } static void bxe_set_rx_mode(struct bxe_softc *sc) { if_t ifp = sc->ifp; uint32_t rx_mode = BXE_RX_MODE_NORMAL; if (sc->state != BXE_STATE_OPEN) { BLOGD(sc, DBG_SP, "state is %x, returning\n", sc->state); return; } BLOGD(sc, DBG_SP, "if_flags(ifp)=0x%x\n", if_getflags(sc->ifp)); if (if_getflags(ifp) & IFF_PROMISC) { rx_mode = BXE_RX_MODE_PROMISC; } else if ((if_getflags(ifp) & IFF_ALLMULTI) || ((if_getamcount(ifp) > BXE_MAX_MULTICAST) && CHIP_IS_E1(sc))) { rx_mode = BXE_RX_MODE_ALLMULTI; } else { if (IS_PF(sc)) { /* some multicasts */ if (bxe_set_mc_list(sc) < 0) { rx_mode = BXE_RX_MODE_ALLMULTI; } if (bxe_set_uc_list(sc) < 0) { rx_mode = BXE_RX_MODE_PROMISC; } } } sc->rx_mode = rx_mode; /* schedule the rx_mode command */ if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) { BLOGD(sc, DBG_LOAD, "Scheduled setting rx_mode with ECORE...\n"); bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state); return; } if (IS_PF(sc)) { bxe_set_storm_rx_mode(sc); } } /* update flags in shmem */ static void bxe_update_drv_flags(struct bxe_softc *sc, uint32_t flags, uint32_t set) { uint32_t drv_flags; if (SHMEM2_HAS(sc, drv_flags)) { bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS); drv_flags = SHMEM2_RD(sc, drv_flags); if (set) { SET_FLAGS(drv_flags, flags); } else { RESET_FLAGS(drv_flags, flags); } SHMEM2_WR(sc, drv_flags, drv_flags); BLOGD(sc, DBG_LOAD, "drv_flags 0x%08x\n", drv_flags); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_DRV_FLAGS); } } /* periodic timer callout routine, only runs when the interface is up */ static void bxe_periodic_callout_func(void *xsc) { struct bxe_softc *sc = (struct bxe_softc *)xsc; int i; if (!BXE_CORE_TRYLOCK(sc)) { /* just bail and try again next time */ if ((sc->state == BXE_STATE_OPEN) && (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) { /* schedule the next periodic callout */ callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc); } return; } if ((sc->state != BXE_STATE_OPEN) || (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) { BLOGW(sc, "periodic callout exit (state=0x%x)\n", sc->state); BXE_CORE_UNLOCK(sc); return; } /* Check for TX timeouts on any fastpath. */ FOR_EACH_QUEUE(sc, i) { if (bxe_watchdog(sc, &sc->fp[i]) != 0) { /* Ruh-Roh, chip was reset! */ break; } } if (!CHIP_REV_IS_SLOW(sc)) { /* * This barrier is needed to ensure the ordering between the writing * to the sc->port.pmf in the bxe_nic_load() or bxe_pmf_update() and * the reading here. */ mb(); if (sc->port.pmf) { bxe_acquire_phy_lock(sc); elink_period_func(&sc->link_params, &sc->link_vars); bxe_release_phy_lock(sc); } } if (IS_PF(sc) && !(sc->flags & BXE_NO_PULSE)) { int mb_idx = SC_FW_MB_IDX(sc); uint32_t drv_pulse; uint32_t mcp_pulse; ++sc->fw_drv_pulse_wr_seq; sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK; drv_pulse = sc->fw_drv_pulse_wr_seq; bxe_drv_pulse(sc); mcp_pulse = (SHMEM_RD(sc, func_mb[mb_idx].mcp_pulse_mb) & MCP_PULSE_SEQ_MASK); /* * The delta between driver pulse and mcp response should * be 1 (before mcp response) or 0 (after mcp response). */ if ((drv_pulse != mcp_pulse) && (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) { /* someone lost a heartbeat... */ BLOGE(sc, "drv_pulse (0x%x) != mcp_pulse (0x%x)\n", drv_pulse, mcp_pulse); } } /* state is BXE_STATE_OPEN */ bxe_stats_handle(sc, STATS_EVENT_UPDATE); BXE_CORE_UNLOCK(sc); if ((sc->state == BXE_STATE_OPEN) && (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) { /* schedule the next periodic callout */ callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc); } } static void bxe_periodic_start(struct bxe_softc *sc) { atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO); callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc); } static void bxe_periodic_stop(struct bxe_softc *sc) { atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP); callout_drain(&sc->periodic_callout); } void bxe_parity_recover(struct bxe_softc *sc) { uint8_t global = FALSE; uint32_t error_recovered, error_unrecovered; bool is_parity; if ((sc->recovery_state == BXE_RECOVERY_FAILED) && (sc->state == BXE_STATE_ERROR)) { BLOGE(sc, "RECOVERY failed, " "stack notified driver is NOT running! " "Please reboot/power cycle the system.\n"); return; } while (1) { BLOGD(sc, DBG_SP, "%s sc=%p state=0x%x rec_state=0x%x error_status=%x\n", __func__, sc, sc->state, sc->recovery_state, sc->error_status); switch(sc->recovery_state) { case BXE_RECOVERY_INIT: is_parity = bxe_chk_parity_attn(sc, &global, FALSE); if ((CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) || (sc->error_status & BXE_ERR_MCP_ASSERT) || (sc->error_status & BXE_ERR_GLOBAL)) { BXE_CORE_LOCK(sc); if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { bxe_periodic_stop(sc); } bxe_nic_unload(sc, UNLOAD_RECOVERY, false); sc->state = BXE_STATE_ERROR; sc->recovery_state = BXE_RECOVERY_FAILED; BLOGE(sc, " No Recovery tried for error 0x%x" " stack notified driver is NOT running!" " Please reboot/power cycle the system.\n", sc->error_status); BXE_CORE_UNLOCK(sc); return; } /* Try to get a LEADER_LOCK HW lock */ if (bxe_trylock_leader_lock(sc)) { bxe_set_reset_in_progress(sc); /* * Check if there is a global attention and if * there was a global attention, set the global * reset bit. */ if (global) { bxe_set_reset_global(sc); } sc->is_leader = 1; } /* If interface has been removed - break */ if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { bxe_periodic_stop(sc); } BXE_CORE_LOCK(sc); bxe_nic_unload(sc,UNLOAD_RECOVERY, false); sc->recovery_state = BXE_RECOVERY_WAIT; BXE_CORE_UNLOCK(sc); /* * Ensure "is_leader", MCP command sequence and * "recovery_state" update values are seen on other * CPUs. */ mb(); break; case BXE_RECOVERY_WAIT: if (sc->is_leader) { int other_engine = SC_PATH(sc) ? 0 : 1; bool other_load_status = bxe_get_load_status(sc, other_engine); bool load_status = bxe_get_load_status(sc, SC_PATH(sc)); global = bxe_reset_is_global(sc); /* * In case of a parity in a global block, let * the first leader that performs a * leader_reset() reset the global blocks in * order to clear global attentions. Otherwise * the gates will remain closed for that * engine. */ if (load_status || (global && other_load_status)) { /* * Wait until all other functions get * down. */ taskqueue_enqueue_timeout(taskqueue_thread, &sc->sp_err_timeout_task, hz/10); return; } else { /* * If all other functions got down * try to bring the chip back to * normal. In any case it's an exit * point for a leader. */ if (bxe_leader_reset(sc)) { BLOGE(sc, "RECOVERY failed, " "stack notified driver is NOT running!\n"); sc->recovery_state = BXE_RECOVERY_FAILED; sc->state = BXE_STATE_ERROR; mb(); return; } /* * If we are here, means that the * leader has succeeded and doesn't * want to be a leader any more. Try * to continue as a none-leader. */ break; } } else { /* non-leader */ if (!bxe_reset_is_done(sc, SC_PATH(sc))) { /* * Try to get a LEADER_LOCK HW lock as * long as a former leader may have * been unloaded by the user or * released a leadership by another * reason. */ if (bxe_trylock_leader_lock(sc)) { /* * I'm a leader now! Restart a * switch case. */ sc->is_leader = 1; break; } taskqueue_enqueue_timeout(taskqueue_thread, &sc->sp_err_timeout_task, hz/10); return; } else { /* * If there was a global attention, wait * for it to be cleared. */ if (bxe_reset_is_global(sc)) { taskqueue_enqueue_timeout(taskqueue_thread, &sc->sp_err_timeout_task, hz/10); return; } error_recovered = sc->eth_stats.recoverable_error; error_unrecovered = sc->eth_stats.unrecoverable_error; BXE_CORE_LOCK(sc); sc->recovery_state = BXE_RECOVERY_NIC_LOADING; if (bxe_nic_load(sc, LOAD_NORMAL)) { error_unrecovered++; sc->recovery_state = BXE_RECOVERY_FAILED; sc->state = BXE_STATE_ERROR; BLOGE(sc, "Recovery is NOT successfull, " " state=0x%x recovery_state=0x%x error=%x\n", sc->state, sc->recovery_state, sc->error_status); sc->error_status = 0; } else { sc->recovery_state = BXE_RECOVERY_DONE; error_recovered++; BLOGI(sc, "Recovery is successfull from errors %x," " state=0x%x" " recovery_state=0x%x \n", sc->error_status, sc->state, sc->recovery_state); mb(); } sc->error_status = 0; BXE_CORE_UNLOCK(sc); sc->eth_stats.recoverable_error = error_recovered; sc->eth_stats.unrecoverable_error = error_unrecovered; return; } } default: return; } } } void bxe_handle_error(struct bxe_softc * sc) { if(sc->recovery_state == BXE_RECOVERY_WAIT) { return; } if(sc->error_status) { if (sc->state == BXE_STATE_OPEN) { bxe_int_disable(sc); } if (sc->link_vars.link_up) { if_link_state_change(sc->ifp, LINK_STATE_DOWN); } sc->recovery_state = BXE_RECOVERY_INIT; BLOGI(sc, "bxe%d: Recovery started errors 0x%x recovery state 0x%x\n", sc->unit, sc->error_status, sc->recovery_state); bxe_parity_recover(sc); } } static void bxe_sp_err_timeout_task(void *arg, int pending) { struct bxe_softc *sc = (struct bxe_softc *)arg; BLOGD(sc, DBG_SP, "%s state = 0x%x rec state=0x%x error_status=%x\n", __func__, sc->state, sc->recovery_state, sc->error_status); if((sc->recovery_state == BXE_RECOVERY_FAILED) && (sc->state == BXE_STATE_ERROR)) { return; } /* if can be taken */ if ((sc->error_status) && (sc->trigger_grcdump)) { bxe_grc_dump(sc); } if (sc->recovery_state != BXE_RECOVERY_DONE) { bxe_handle_error(sc); bxe_parity_recover(sc); } else if (sc->error_status) { bxe_handle_error(sc); } return; } /* start the controller */ static __noinline int bxe_nic_load(struct bxe_softc *sc, int load_mode) { uint32_t val; int load_code = 0; int i, rc = 0; BXE_CORE_LOCK_ASSERT(sc); BLOGD(sc, DBG_LOAD, "Starting NIC load...\n"); sc->state = BXE_STATE_OPENING_WAITING_LOAD; if (IS_PF(sc)) { /* must be called before memory allocation and HW init */ bxe_ilt_set_info(sc); } sc->last_reported_link_state = LINK_STATE_UNKNOWN; bxe_set_fp_rx_buf_size(sc); if (bxe_alloc_fp_buffers(sc) != 0) { BLOGE(sc, "Failed to allocate fastpath memory\n"); sc->state = BXE_STATE_CLOSED; rc = ENOMEM; goto bxe_nic_load_error0; } if (bxe_alloc_mem(sc) != 0) { sc->state = BXE_STATE_CLOSED; rc = ENOMEM; goto bxe_nic_load_error0; } if (bxe_alloc_fw_stats_mem(sc) != 0) { sc->state = BXE_STATE_CLOSED; rc = ENOMEM; goto bxe_nic_load_error0; } if (IS_PF(sc)) { /* set pf load just before approaching the MCP */ bxe_set_pf_load(sc); /* if MCP exists send load request and analyze response */ if (!BXE_NOMCP(sc)) { /* attempt to load pf */ if (bxe_nic_load_request(sc, &load_code) != 0) { sc->state = BXE_STATE_CLOSED; rc = ENXIO; goto bxe_nic_load_error1; } /* what did the MCP say? */ if (bxe_nic_load_analyze_req(sc, load_code) != 0) { bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); sc->state = BXE_STATE_CLOSED; rc = ENXIO; goto bxe_nic_load_error2; } } else { BLOGI(sc, "Device has no MCP!\n"); load_code = bxe_nic_load_no_mcp(sc); } /* mark PMF if applicable */ bxe_nic_load_pmf(sc, load_code); /* Init Function state controlling object */ bxe_init_func_obj(sc); /* Initialize HW */ if (bxe_init_hw(sc, load_code) != 0) { BLOGE(sc, "HW init failed\n"); bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); sc->state = BXE_STATE_CLOSED; rc = ENXIO; goto bxe_nic_load_error2; } } /* set ALWAYS_ALIVE bit in shmem */ sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; bxe_drv_pulse(sc); sc->flags |= BXE_NO_PULSE; /* attach interrupts */ if (bxe_interrupt_attach(sc) != 0) { sc->state = BXE_STATE_CLOSED; rc = ENXIO; goto bxe_nic_load_error2; } bxe_nic_init(sc, load_code); /* Init per-function objects */ if (IS_PF(sc)) { bxe_init_objs(sc); // XXX bxe_iov_nic_init(sc); /* set AFEX default VLAN tag to an invalid value */ sc->devinfo.mf_info.afex_def_vlan_tag = -1; // XXX bxe_nic_load_afex_dcc(sc, load_code); sc->state = BXE_STATE_OPENING_WAITING_PORT; rc = bxe_func_start(sc); if (rc) { BLOGE(sc, "Function start failed! rc = %d\n", rc); bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); sc->state = BXE_STATE_ERROR; goto bxe_nic_load_error3; } /* send LOAD_DONE command to MCP */ if (!BXE_NOMCP(sc)) { load_code = bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); if (!load_code) { BLOGE(sc, "MCP response failure, aborting\n"); sc->state = BXE_STATE_ERROR; rc = ENXIO; goto bxe_nic_load_error3; } } rc = bxe_setup_leading(sc); if (rc) { BLOGE(sc, "Setup leading failed! rc = %d\n", rc); sc->state = BXE_STATE_ERROR; goto bxe_nic_load_error3; } FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) { rc = bxe_setup_queue(sc, &sc->fp[i], FALSE); if (rc) { BLOGE(sc, "Queue(%d) setup failed rc = %d\n", i, rc); sc->state = BXE_STATE_ERROR; goto bxe_nic_load_error3; } } rc = bxe_init_rss_pf(sc); if (rc) { BLOGE(sc, "PF RSS init failed\n"); sc->state = BXE_STATE_ERROR; goto bxe_nic_load_error3; } } /* XXX VF */ /* now when Clients are configured we are ready to work */ sc->state = BXE_STATE_OPEN; /* Configure a ucast MAC */ if (IS_PF(sc)) { rc = bxe_set_eth_mac(sc, TRUE); } if (rc) { BLOGE(sc, "Setting Ethernet MAC failed rc = %d\n", rc); sc->state = BXE_STATE_ERROR; goto bxe_nic_load_error3; } if (sc->port.pmf) { rc = bxe_initial_phy_init(sc, /* XXX load_mode */LOAD_OPEN); if (rc) { sc->state = BXE_STATE_ERROR; goto bxe_nic_load_error3; } } sc->link_params.feature_config_flags &= ~ELINK_FEATURE_CONFIG_BOOT_FROM_SAN; /* start fast path */ /* Initialize Rx filter */ bxe_set_rx_mode(sc); /* start the Tx */ switch (/* XXX load_mode */LOAD_OPEN) { case LOAD_NORMAL: case LOAD_OPEN: break; case LOAD_DIAG: case LOAD_LOOPBACK_EXT: sc->state = BXE_STATE_DIAG; break; default: break; } if (sc->port.pmf) { bxe_update_drv_flags(sc, 1 << DRV_FLAGS_PORT_MASK, 0); } else { bxe_link_status_update(sc); } /* start the periodic timer callout */ bxe_periodic_start(sc); if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) { /* mark driver is loaded in shmem2 */ val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]); SHMEM2_WR(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)], (val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED | DRV_FLAGS_CAPABILITIES_LOADED_L2)); } /* wait for all pending SP commands to complete */ if (IS_PF(sc) && !bxe_wait_sp_comp(sc, ~0x0UL)) { BLOGE(sc, "Timeout waiting for all SPs to complete!\n"); bxe_periodic_stop(sc); bxe_nic_unload(sc, UNLOAD_CLOSE, FALSE); return (ENXIO); } /* Tell the stack the driver is running! */ if_setdrvflags(sc->ifp, IFF_DRV_RUNNING); BLOGD(sc, DBG_LOAD, "NIC successfully loaded\n"); return (0); bxe_nic_load_error3: if (IS_PF(sc)) { bxe_int_disable_sync(sc, 1); /* clean out queued objects */ bxe_squeeze_objects(sc); } bxe_interrupt_detach(sc); bxe_nic_load_error2: if (IS_PF(sc) && !BXE_NOMCP(sc)) { bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0); bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, 0); } sc->port.pmf = 0; bxe_nic_load_error1: /* clear pf_load status, as it was already set */ if (IS_PF(sc)) { bxe_clear_pf_load(sc); } bxe_nic_load_error0: bxe_free_fw_stats_mem(sc); bxe_free_fp_buffers(sc); bxe_free_mem(sc); return (rc); } static int bxe_init_locked(struct bxe_softc *sc) { int other_engine = SC_PATH(sc) ? 0 : 1; uint8_t other_load_status, load_status; uint8_t global = FALSE; int rc; BXE_CORE_LOCK_ASSERT(sc); /* check if the driver is already running */ if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { BLOGD(sc, DBG_LOAD, "Init called while driver is running!\n"); return (0); } if((sc->state == BXE_STATE_ERROR) && (sc->recovery_state == BXE_RECOVERY_FAILED)) { BLOGE(sc, "Initialization not done, " "as previous recovery failed." "Reboot/Power-cycle the system\n" ); return (ENXIO); } bxe_set_power_state(sc, PCI_PM_D0); /* * If parity occurred during the unload, then attentions and/or * RECOVERY_IN_PROGRES may still be set. If so we want the first function * loaded on the current engine to complete the recovery. Parity recovery * is only relevant for PF driver. */ if (IS_PF(sc)) { other_load_status = bxe_get_load_status(sc, other_engine); load_status = bxe_get_load_status(sc, SC_PATH(sc)); if (!bxe_reset_is_done(sc, SC_PATH(sc)) || bxe_chk_parity_attn(sc, &global, TRUE)) { do { /* * If there are attentions and they are in global blocks, set * the GLOBAL_RESET bit regardless whether it will be this * function that will complete the recovery or not. */ if (global) { bxe_set_reset_global(sc); } /* * Only the first function on the current engine should try * to recover in open. In case of attentions in global blocks * only the first in the chip should try to recover. */ if ((!load_status && (!global || !other_load_status)) && bxe_trylock_leader_lock(sc) && !bxe_leader_reset(sc)) { BLOGI(sc, "Recovered during init\n"); break; } /* recovery has failed... */ bxe_set_power_state(sc, PCI_PM_D3hot); sc->recovery_state = BXE_RECOVERY_FAILED; BLOGE(sc, "Recovery flow hasn't properly " "completed yet, try again later. " "If you still see this message after a " "few retries then power cycle is required.\n"); rc = ENXIO; goto bxe_init_locked_done; } while (0); } } sc->recovery_state = BXE_RECOVERY_DONE; rc = bxe_nic_load(sc, LOAD_OPEN); bxe_init_locked_done: if (rc) { /* Tell the stack the driver is NOT running! */ BLOGE(sc, "Initialization failed, " "stack notified driver is NOT running!\n"); if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING); } return (rc); } static int bxe_stop_locked(struct bxe_softc *sc) { BXE_CORE_LOCK_ASSERT(sc); return (bxe_nic_unload(sc, UNLOAD_NORMAL, TRUE)); } /* * Handles controller initialization when called from an unlocked routine. * ifconfig calls this function. * * Returns: * void */ static void bxe_init(void *xsc) { struct bxe_softc *sc = (struct bxe_softc *)xsc; BXE_CORE_LOCK(sc); bxe_init_locked(sc); BXE_CORE_UNLOCK(sc); } static int bxe_init_ifnet(struct bxe_softc *sc) { if_t ifp; int capabilities; /* ifconfig entrypoint for media type/status reporting */ ifmedia_init(&sc->ifmedia, IFM_IMASK, bxe_ifmedia_update, bxe_ifmedia_status); /* set the default interface values */ ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_FDX | sc->media), 0, NULL); ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL); ifmedia_set(&sc->ifmedia, (IFM_ETHER | IFM_AUTO)); sc->ifmedia.ifm_media = sc->ifmedia.ifm_cur->ifm_media; /* XXX ? */ BLOGI(sc, "IFMEDIA flags : %x\n", sc->ifmedia.ifm_media); /* allocate the ifnet structure */ if ((ifp = if_gethandle(IFT_ETHER)) == NULL) { BLOGE(sc, "Interface allocation failed!\n"); return (ENXIO); } if_setsoftc(ifp, sc); if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev)); if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST)); if_setioctlfn(ifp, bxe_ioctl); if_setstartfn(ifp, bxe_tx_start); if_setgetcounterfn(ifp, bxe_get_counter); #if __FreeBSD_version >= 901504 if_settransmitfn(ifp, bxe_tx_mq_start); if_setqflushfn(ifp, bxe_mq_flush); #endif #ifdef FreeBSD8_0 if_settimer(ifp, 0); #endif if_setinitfn(ifp, bxe_init); if_setmtu(ifp, sc->mtu); if_sethwassist(ifp, (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO | CSUM_TCP_IPV6 | CSUM_UDP_IPV6)); capabilities = #if __FreeBSD_version < 700000 (IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | IFCAP_HWCSUM | IFCAP_JUMBO_MTU | IFCAP_LRO); #else (IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWCSUM | IFCAP_HWCSUM | IFCAP_JUMBO_MTU | IFCAP_LRO | IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_WOL_MAGIC); #endif if_setcapabilitiesbit(ifp, capabilities, 0); /* XXX */ if_setcapenable(ifp, if_getcapabilities(ifp)); if_setbaudrate(ifp, IF_Gbps(10)); /* XXX */ if_setsendqlen(ifp, sc->tx_ring_size); if_setsendqready(ifp); /* XXX */ sc->ifp = ifp; /* attach to the Ethernet interface list */ ether_ifattach(ifp, sc->link_params.mac_addr); /* Attach driver debugnet methods. */ DEBUGNET_SET(ifp, bxe); return (0); } static void bxe_deallocate_bars(struct bxe_softc *sc) { int i; for (i = 0; i < MAX_BARS; i++) { if (sc->bar[i].resource != NULL) { bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->bar[i].rid, sc->bar[i].resource); BLOGD(sc, DBG_LOAD, "Released PCI BAR%d [%02x] memory\n", i, PCIR_BAR(i)); } } } static int bxe_allocate_bars(struct bxe_softc *sc) { u_int flags; int i; memset(sc->bar, 0, sizeof(sc->bar)); for (i = 0; i < MAX_BARS; i++) { /* memory resources reside at BARs 0, 2, 4 */ /* Run `pciconf -lb` to see mappings */ if ((i != 0) && (i != 2) && (i != 4)) { continue; } sc->bar[i].rid = PCIR_BAR(i); flags = RF_ACTIVE; if (i == 0) { flags |= RF_SHAREABLE; } if ((sc->bar[i].resource = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &sc->bar[i].rid, flags)) == NULL) { return (0); } sc->bar[i].tag = rman_get_bustag(sc->bar[i].resource); sc->bar[i].handle = rman_get_bushandle(sc->bar[i].resource); sc->bar[i].kva = (vm_offset_t)rman_get_virtual(sc->bar[i].resource); BLOGI(sc, "PCI BAR%d [%02x] memory allocated: %#jx-%#jx (%jd) -> %#jx\n", i, PCIR_BAR(i), rman_get_start(sc->bar[i].resource), rman_get_end(sc->bar[i].resource), rman_get_size(sc->bar[i].resource), (uintmax_t)sc->bar[i].kva); } return (0); } static void bxe_get_function_num(struct bxe_softc *sc) { uint32_t val = 0; /* * Read the ME register to get the function number. The ME register * holds the relative-function number and absolute-function number. The * absolute-function number appears only in E2 and above. Before that * these bits always contained zero, therefore we cannot blindly use them. */ val = REG_RD(sc, BAR_ME_REGISTER); sc->pfunc_rel = (uint8_t)((val & ME_REG_PF_NUM) >> ME_REG_PF_NUM_SHIFT); sc->path_id = (uint8_t)((val & ME_REG_ABS_PF_NUM) >> ME_REG_ABS_PF_NUM_SHIFT) & 1; if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id); } else { sc->pfunc_abs = (sc->pfunc_rel | sc->path_id); } BLOGD(sc, DBG_LOAD, "Relative function %d, Absolute function %d, Path %d\n", sc->pfunc_rel, sc->pfunc_abs, sc->path_id); } static uint32_t bxe_get_shmem_mf_cfg_base(struct bxe_softc *sc) { uint32_t shmem2_size; uint32_t offset; uint32_t mf_cfg_offset_value; /* Non 57712 */ offset = (SHMEM_RD(sc, func_mb) + (MAX_FUNC_NUM * sizeof(struct drv_func_mb))); /* 57712 plus */ if (sc->devinfo.shmem2_base != 0) { shmem2_size = SHMEM2_RD(sc, size); if (shmem2_size > offsetof(struct shmem2_region, mf_cfg_addr)) { mf_cfg_offset_value = SHMEM2_RD(sc, mf_cfg_addr); if (SHMEM_MF_CFG_ADDR_NONE != mf_cfg_offset_value) { offset = mf_cfg_offset_value; } } } return (offset); } static uint32_t bxe_pcie_capability_read(struct bxe_softc *sc, int reg, int width) { int pcie_reg; /* ensure PCIe capability is enabled */ if (pci_find_cap(sc->dev, PCIY_EXPRESS, &pcie_reg) == 0) { if (pcie_reg != 0) { BLOGD(sc, DBG_LOAD, "PCIe capability at 0x%04x\n", pcie_reg); return (pci_read_config(sc->dev, (pcie_reg + reg), width)); } } BLOGE(sc, "PCIe capability NOT FOUND!!!\n"); return (0); } static uint8_t bxe_is_pcie_pending(struct bxe_softc *sc) { return (bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_STA, 2) & PCIM_EXP_STA_TRANSACTION_PND); } /* * Walk the PCI capabiites list for the device to find what features are * supported. These capabilites may be enabled/disabled by firmware so it's * best to walk the list rather than make assumptions. */ static void bxe_probe_pci_caps(struct bxe_softc *sc) { uint16_t link_status; int reg; /* check if PCI Power Management is enabled */ if (pci_find_cap(sc->dev, PCIY_PMG, ®) == 0) { if (reg != 0) { BLOGD(sc, DBG_LOAD, "Found PM capability at 0x%04x\n", reg); sc->devinfo.pcie_cap_flags |= BXE_PM_CAPABLE_FLAG; sc->devinfo.pcie_pm_cap_reg = (uint16_t)reg; } } link_status = bxe_pcie_capability_read(sc, PCIR_EXPRESS_LINK_STA, 2); /* handle PCIe 2.0 workarounds for 57710 */ if (CHIP_IS_E1(sc)) { /* workaround for 57710 errata E4_57710_27462 */ sc->devinfo.pcie_link_speed = (REG_RD(sc, 0x3d04) & (1 << 24)) ? 2 : 1; /* workaround for 57710 errata E4_57710_27488 */ sc->devinfo.pcie_link_width = ((link_status & PCIM_LINK_STA_WIDTH) >> 4); if (sc->devinfo.pcie_link_speed > 1) { sc->devinfo.pcie_link_width = ((link_status & PCIM_LINK_STA_WIDTH) >> 4) >> 1; } } else { sc->devinfo.pcie_link_speed = (link_status & PCIM_LINK_STA_SPEED); sc->devinfo.pcie_link_width = ((link_status & PCIM_LINK_STA_WIDTH) >> 4); } BLOGD(sc, DBG_LOAD, "PCIe link speed=%d width=%d\n", sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width); sc->devinfo.pcie_cap_flags |= BXE_PCIE_CAPABLE_FLAG; sc->devinfo.pcie_pcie_cap_reg = (uint16_t)reg; /* check if MSI capability is enabled */ if (pci_find_cap(sc->dev, PCIY_MSI, ®) == 0) { if (reg != 0) { BLOGD(sc, DBG_LOAD, "Found MSI capability at 0x%04x\n", reg); sc->devinfo.pcie_cap_flags |= BXE_MSI_CAPABLE_FLAG; sc->devinfo.pcie_msi_cap_reg = (uint16_t)reg; } } /* check if MSI-X capability is enabled */ if (pci_find_cap(sc->dev, PCIY_MSIX, ®) == 0) { if (reg != 0) { BLOGD(sc, DBG_LOAD, "Found MSI-X capability at 0x%04x\n", reg); sc->devinfo.pcie_cap_flags |= BXE_MSIX_CAPABLE_FLAG; sc->devinfo.pcie_msix_cap_reg = (uint16_t)reg; } } } static int bxe_get_shmem_mf_cfg_info_sd(struct bxe_softc *sc) { struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; uint32_t val; /* get the outer vlan if we're in switch-dependent mode */ val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); mf_info->ext_id = (uint16_t)val; mf_info->multi_vnics_mode = 1; if (!VALID_OVLAN(mf_info->ext_id)) { BLOGE(sc, "Invalid VLAN (%d)\n", mf_info->ext_id); return (1); } /* get the capabilities */ if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) == FUNC_MF_CFG_PROTOCOL_ISCSI) { mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI; } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) == FUNC_MF_CFG_PROTOCOL_FCOE) { mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE; } else { mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET; } mf_info->vnics_per_port = (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; return (0); } static uint32_t bxe_get_shmem_ext_proto_support_flags(struct bxe_softc *sc) { uint32_t retval = 0; uint32_t val; val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg); if (val & MACP_FUNC_CFG_FLAGS_ENABLED) { if (val & MACP_FUNC_CFG_FLAGS_ETHERNET) { retval |= MF_PROTO_SUPPORT_ETHERNET; } if (val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { retval |= MF_PROTO_SUPPORT_ISCSI; } if (val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { retval |= MF_PROTO_SUPPORT_FCOE; } } return (retval); } static int bxe_get_shmem_mf_cfg_info_si(struct bxe_softc *sc) { struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; uint32_t val; /* * There is no outer vlan if we're in switch-independent mode. * If the mac is valid then assume multi-function. */ val = MFCFG_RD(sc, func_ext_config[SC_ABS_FUNC(sc)].func_cfg); mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0); mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc); mf_info->vnics_per_port = (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; return (0); } static int bxe_get_shmem_mf_cfg_info_niv(struct bxe_softc *sc) { struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; uint32_t e1hov_tag; uint32_t func_config; uint32_t niv_config; mf_info->multi_vnics_mode = 1; e1hov_tag = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); func_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); niv_config = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].afex_config); mf_info->ext_id = (uint16_t)((e1hov_tag & FUNC_MF_CFG_E1HOV_TAG_MASK) >> FUNC_MF_CFG_E1HOV_TAG_SHIFT); mf_info->default_vlan = (uint16_t)((e1hov_tag & FUNC_MF_CFG_AFEX_VLAN_MASK) >> FUNC_MF_CFG_AFEX_VLAN_SHIFT); mf_info->niv_allowed_priorities = (uint8_t)((niv_config & FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >> FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT); mf_info->niv_default_cos = (uint8_t)((func_config & FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >> FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT); mf_info->afex_vlan_mode = ((niv_config & FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >> FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT); mf_info->niv_mba_enabled = ((niv_config & FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK) >> FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT); mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc); mf_info->vnics_per_port = (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) ? 2 : 4; return (0); } static int bxe_check_valid_mf_cfg(struct bxe_softc *sc) { struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; uint32_t mf_cfg1; uint32_t mf_cfg2; uint32_t ovlan1; uint32_t ovlan2; uint8_t i, j; BLOGD(sc, DBG_LOAD, "MF config parameters for function %d\n", SC_PORT(sc)); BLOGD(sc, DBG_LOAD, "\tmf_config=0x%x\n", mf_info->mf_config[SC_VN(sc)]); BLOGD(sc, DBG_LOAD, "\tmulti_vnics_mode=%d\n", mf_info->multi_vnics_mode); BLOGD(sc, DBG_LOAD, "\tvnics_per_port=%d\n", mf_info->vnics_per_port); BLOGD(sc, DBG_LOAD, "\tovlan/vifid=%d\n", mf_info->ext_id); BLOGD(sc, DBG_LOAD, "\tmin_bw=%d/%d/%d/%d\n", mf_info->min_bw[0], mf_info->min_bw[1], mf_info->min_bw[2], mf_info->min_bw[3]); BLOGD(sc, DBG_LOAD, "\tmax_bw=%d/%d/%d/%d\n", mf_info->max_bw[0], mf_info->max_bw[1], mf_info->max_bw[2], mf_info->max_bw[3]); BLOGD(sc, DBG_LOAD, "\tmac_addr: %s\n", sc->mac_addr_str); /* various MF mode sanity checks... */ if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) { BLOGE(sc, "Enumerated function %d is marked as hidden\n", SC_PORT(sc)); return (1); } if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) { BLOGE(sc, "vnics_per_port=%d multi_vnics_mode=%d\n", mf_info->vnics_per_port, mf_info->multi_vnics_mode); return (1); } if (mf_info->mf_mode == MULTI_FUNCTION_SD) { /* vnic id > 0 must have valid ovlan in switch-dependent mode */ if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) { BLOGE(sc, "mf_mode=SD vnic_id=%d ovlan=%d\n", SC_VN(sc), OVLAN(sc)); return (1); } if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) { BLOGE(sc, "mf_mode=SD multi_vnics_mode=%d ovlan=%d\n", mf_info->multi_vnics_mode, OVLAN(sc)); return (1); } /* * Verify all functions are either MF or SF mode. If MF, make sure * sure that all non-hidden functions have a valid ovlan. If SF, * make sure that all non-hidden functions have an invalid ovlan. */ FOREACH_ABS_FUNC_IN_PORT(sc, i) { mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config); ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag); if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) && (((mf_info->multi_vnics_mode) && !VALID_OVLAN(ovlan1)) || ((!mf_info->multi_vnics_mode) && VALID_OVLAN(ovlan1)))) { BLOGE(sc, "mf_mode=SD function %d MF config " "mismatch, multi_vnics_mode=%d ovlan=%d\n", i, mf_info->multi_vnics_mode, ovlan1); return (1); } } /* Verify all funcs on the same port each have a different ovlan. */ FOREACH_ABS_FUNC_IN_PORT(sc, i) { mf_cfg1 = MFCFG_RD(sc, func_mf_config[i].config); ovlan1 = MFCFG_RD(sc, func_mf_config[i].e1hov_tag); /* iterate from the next function on the port to the max func */ for (j = i + 2; j < MAX_FUNC_NUM; j += 2) { mf_cfg2 = MFCFG_RD(sc, func_mf_config[j].config); ovlan2 = MFCFG_RD(sc, func_mf_config[j].e1hov_tag); if (!(mf_cfg1 & FUNC_MF_CFG_FUNC_HIDE) && VALID_OVLAN(ovlan1) && !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE) && VALID_OVLAN(ovlan2) && (ovlan1 == ovlan2)) { BLOGE(sc, "mf_mode=SD functions %d and %d " "have the same ovlan (%d)\n", i, j, ovlan1); return (1); } } } } /* MULTI_FUNCTION_SD */ return (0); } static int bxe_get_mf_cfg_info(struct bxe_softc *sc) { struct bxe_mf_info *mf_info = &sc->devinfo.mf_info; uint32_t val, mac_upper; uint8_t i, vnic; /* initialize mf_info defaults */ mf_info->vnics_per_port = 1; mf_info->multi_vnics_mode = FALSE; mf_info->path_has_ovlan = FALSE; mf_info->mf_mode = SINGLE_FUNCTION; if (!CHIP_IS_MF_CAP(sc)) { return (0); } if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) { BLOGE(sc, "Invalid mf_cfg_base!\n"); return (1); } /* get the MF mode (switch dependent / independent / single-function) */ val = SHMEM_RD(sc, dev_info.shared_feature_config.config); switch (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK) { case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT: mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); /* check for legal upper mac bytes */ if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) { mf_info->mf_mode = MULTI_FUNCTION_SI; } else { BLOGE(sc, "Invalid config for Switch Independent mode\n"); } break; case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: case SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4: /* get outer vlan configuration */ val = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].e1hov_tag); if ((val & FUNC_MF_CFG_E1HOV_TAG_MASK) != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { mf_info->mf_mode = MULTI_FUNCTION_SD; } else { BLOGE(sc, "Invalid config for Switch Dependent mode\n"); } break; case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF: /* not in MF mode, vnics_per_port=1 and multi_vnics_mode=FALSE */ return (0); case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE: /* * Mark MF mode as NIV if MCP version includes NPAR-SD support * and the MAC address is valid. */ mac_upper = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); if ((SHMEM2_HAS(sc, afex_driver_support)) && (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) { mf_info->mf_mode = MULTI_FUNCTION_AFEX; } else { BLOGE(sc, "Invalid config for AFEX mode\n"); } break; default: BLOGE(sc, "Unknown MF mode (0x%08x)\n", (val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK)); return (1); } /* set path mf_mode (which could be different than function mf_mode) */ if (mf_info->mf_mode == MULTI_FUNCTION_SD) { mf_info->path_has_ovlan = TRUE; } else if (mf_info->mf_mode == SINGLE_FUNCTION) { /* * Decide on path multi vnics mode. If we're not in MF mode and in * 4-port mode, this is good enough to check vnic-0 of the other port * on the same path */ if (CHIP_PORT_MODE(sc) == CHIP_4_PORT_MODE) { uint8_t other_port = !(PORT_ID(sc) & 1); uint8_t abs_func_other_port = (SC_PATH(sc) + (2 * other_port)); val = MFCFG_RD(sc, func_mf_config[abs_func_other_port].e1hov_tag); mf_info->path_has_ovlan = VALID_OVLAN((uint16_t)val) ? 1 : 0; } } if (mf_info->mf_mode == SINGLE_FUNCTION) { /* invalid MF config */ if (SC_VN(sc) >= 1) { BLOGE(sc, "VNIC ID >= 1 in SF mode\n"); return (1); } return (0); } /* get the MF configuration */ mf_info->mf_config[SC_VN(sc)] = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].config); switch(mf_info->mf_mode) { case MULTI_FUNCTION_SD: bxe_get_shmem_mf_cfg_info_sd(sc); break; case MULTI_FUNCTION_SI: bxe_get_shmem_mf_cfg_info_si(sc); break; case MULTI_FUNCTION_AFEX: bxe_get_shmem_mf_cfg_info_niv(sc); break; default: BLOGE(sc, "Get MF config failed (mf_mode=0x%08x)\n", mf_info->mf_mode); return (1); } /* get the congestion management parameters */ vnic = 0; FOREACH_ABS_FUNC_IN_PORT(sc, i) { /* get min/max bw */ val = MFCFG_RD(sc, func_mf_config[i].config); mf_info->min_bw[vnic] = ((val & FUNC_MF_CFG_MIN_BW_MASK) >> FUNC_MF_CFG_MIN_BW_SHIFT); mf_info->max_bw[vnic] = ((val & FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT); vnic++; } return (bxe_check_valid_mf_cfg(sc)); } static int bxe_get_shmem_info(struct bxe_softc *sc) { int port; uint32_t mac_hi, mac_lo, val; port = SC_PORT(sc); mac_hi = mac_lo = 0; sc->link_params.sc = sc; sc->link_params.port = port; /* get the hardware config info */ sc->devinfo.hw_config = SHMEM_RD(sc, dev_info.shared_hw_config.config); sc->devinfo.hw_config2 = SHMEM_RD(sc, dev_info.shared_hw_config.config2); sc->link_params.hw_led_mode = ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >> SHARED_HW_CFG_LED_MODE_SHIFT); /* get the port feature config */ sc->port.config = SHMEM_RD(sc, dev_info.port_feature_config[port].config); /* get the link params */ sc->link_params.speed_cap_mask[0] = SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask); sc->link_params.speed_cap_mask[1] = SHMEM_RD(sc, dev_info.port_hw_config[port].speed_capability_mask2); /* get the lane config */ sc->link_params.lane_config = SHMEM_RD(sc, dev_info.port_hw_config[port].lane_config); /* get the link config */ val = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config); sc->port.link_config[ELINK_INT_PHY] = val; sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK); sc->port.link_config[ELINK_EXT_PHY1] = SHMEM_RD(sc, dev_info.port_feature_config[port].link_config2); /* get the override preemphasis flag and enable it or turn it off */ val = SHMEM_RD(sc, dev_info.shared_feature_config.config); if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED) { sc->link_params.feature_config_flags |= ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; } else { sc->link_params.feature_config_flags &= ~ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED; } /* get the initial value of the link params */ sc->link_params.multi_phy_config = SHMEM_RD(sc, dev_info.port_hw_config[port].multi_phy_config); /* get external phy info */ sc->port.ext_phy_config = SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); /* get the multifunction configuration */ bxe_get_mf_cfg_info(sc); /* get the mac address */ if (IS_MF(sc)) { mac_hi = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_upper); mac_lo = MFCFG_RD(sc, func_mf_config[SC_ABS_FUNC(sc)].mac_lower); } else { mac_hi = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_upper); mac_lo = SHMEM_RD(sc, dev_info.port_hw_config[port].mac_lower); } if ((mac_lo == 0) && (mac_hi == 0)) { *sc->mac_addr_str = 0; BLOGE(sc, "No Ethernet address programmed!\n"); } else { sc->link_params.mac_addr[0] = (uint8_t)(mac_hi >> 8); sc->link_params.mac_addr[1] = (uint8_t)(mac_hi); sc->link_params.mac_addr[2] = (uint8_t)(mac_lo >> 24); sc->link_params.mac_addr[3] = (uint8_t)(mac_lo >> 16); sc->link_params.mac_addr[4] = (uint8_t)(mac_lo >> 8); sc->link_params.mac_addr[5] = (uint8_t)(mac_lo); snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str), "%02x:%02x:%02x:%02x:%02x:%02x", sc->link_params.mac_addr[0], sc->link_params.mac_addr[1], sc->link_params.mac_addr[2], sc->link_params.mac_addr[3], sc->link_params.mac_addr[4], sc->link_params.mac_addr[5]); BLOGD(sc, DBG_LOAD, "Ethernet address: %s\n", sc->mac_addr_str); } return (0); } static void bxe_get_tunable_params(struct bxe_softc *sc) { /* sanity checks */ if ((bxe_interrupt_mode != INTR_MODE_INTX) && (bxe_interrupt_mode != INTR_MODE_MSI) && (bxe_interrupt_mode != INTR_MODE_MSIX)) { BLOGW(sc, "invalid interrupt_mode value (%d)\n", bxe_interrupt_mode); bxe_interrupt_mode = INTR_MODE_MSIX; } if ((bxe_queue_count < 0) || (bxe_queue_count > MAX_RSS_CHAINS)) { BLOGW(sc, "invalid queue_count value (%d)\n", bxe_queue_count); bxe_queue_count = 0; } if ((bxe_max_rx_bufs < 1) || (bxe_max_rx_bufs > RX_BD_USABLE)) { if (bxe_max_rx_bufs == 0) { bxe_max_rx_bufs = RX_BD_USABLE; } else { BLOGW(sc, "invalid max_rx_bufs (%d)\n", bxe_max_rx_bufs); bxe_max_rx_bufs = 2048; } } if ((bxe_hc_rx_ticks < 1) || (bxe_hc_rx_ticks > 100)) { BLOGW(sc, "invalid hc_rx_ticks (%d)\n", bxe_hc_rx_ticks); bxe_hc_rx_ticks = 25; } if ((bxe_hc_tx_ticks < 1) || (bxe_hc_tx_ticks > 100)) { BLOGW(sc, "invalid hc_tx_ticks (%d)\n", bxe_hc_tx_ticks); bxe_hc_tx_ticks = 50; } if (bxe_max_aggregation_size == 0) { bxe_max_aggregation_size = TPA_AGG_SIZE; } if (bxe_max_aggregation_size > 0xffff) { BLOGW(sc, "invalid max_aggregation_size (%d)\n", bxe_max_aggregation_size); bxe_max_aggregation_size = TPA_AGG_SIZE; } if ((bxe_mrrs < -1) || (bxe_mrrs > 3)) { BLOGW(sc, "invalid mrrs (%d)\n", bxe_mrrs); bxe_mrrs = -1; } if ((bxe_autogreeen < 0) || (bxe_autogreeen > 2)) { BLOGW(sc, "invalid autogreeen (%d)\n", bxe_autogreeen); bxe_autogreeen = 0; } if ((bxe_udp_rss < 0) || (bxe_udp_rss > 1)) { BLOGW(sc, "invalid udp_rss (%d)\n", bxe_udp_rss); bxe_udp_rss = 0; } /* pull in user settings */ sc->interrupt_mode = bxe_interrupt_mode; sc->max_rx_bufs = bxe_max_rx_bufs; sc->hc_rx_ticks = bxe_hc_rx_ticks; sc->hc_tx_ticks = bxe_hc_tx_ticks; sc->max_aggregation_size = bxe_max_aggregation_size; sc->mrrs = bxe_mrrs; sc->autogreeen = bxe_autogreeen; sc->udp_rss = bxe_udp_rss; if (bxe_interrupt_mode == INTR_MODE_INTX) { sc->num_queues = 1; } else { /* INTR_MODE_MSI or INTR_MODE_MSIX */ sc->num_queues = min((bxe_queue_count ? bxe_queue_count : mp_ncpus), MAX_RSS_CHAINS); if (sc->num_queues > mp_ncpus) { sc->num_queues = mp_ncpus; } } BLOGD(sc, DBG_LOAD, "User Config: " "debug=0x%lx " "interrupt_mode=%d " "queue_count=%d " "hc_rx_ticks=%d " "hc_tx_ticks=%d " "rx_budget=%d " "max_aggregation_size=%d " "mrrs=%d " "autogreeen=%d " "udp_rss=%d\n", bxe_debug, sc->interrupt_mode, sc->num_queues, sc->hc_rx_ticks, sc->hc_tx_ticks, bxe_rx_budget, sc->max_aggregation_size, sc->mrrs, sc->autogreeen, sc->udp_rss); } static int bxe_media_detect(struct bxe_softc *sc) { int port_type; uint32_t phy_idx = bxe_get_cur_phy_idx(sc); switch (sc->link_params.phy[phy_idx].media_type) { case ELINK_ETH_PHY_SFPP_10G_FIBER: case ELINK_ETH_PHY_XFP_FIBER: BLOGI(sc, "Found 10Gb Fiber media.\n"); sc->media = IFM_10G_SR; port_type = PORT_FIBRE; break; case ELINK_ETH_PHY_SFP_1G_FIBER: BLOGI(sc, "Found 1Gb Fiber media.\n"); sc->media = IFM_1000_SX; port_type = PORT_FIBRE; break; case ELINK_ETH_PHY_KR: case ELINK_ETH_PHY_CX4: BLOGI(sc, "Found 10GBase-CX4 media.\n"); sc->media = IFM_10G_CX4; port_type = PORT_FIBRE; break; case ELINK_ETH_PHY_DA_TWINAX: BLOGI(sc, "Found 10Gb Twinax media.\n"); sc->media = IFM_10G_TWINAX; port_type = PORT_DA; break; case ELINK_ETH_PHY_BASE_T: if (sc->link_params.speed_cap_mask[0] & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) { BLOGI(sc, "Found 10GBase-T media.\n"); sc->media = IFM_10G_T; port_type = PORT_TP; } else { BLOGI(sc, "Found 1000Base-T media.\n"); sc->media = IFM_1000_T; port_type = PORT_TP; } break; case ELINK_ETH_PHY_NOT_PRESENT: BLOGI(sc, "Media not present.\n"); sc->media = 0; port_type = PORT_OTHER; break; case ELINK_ETH_PHY_UNSPECIFIED: default: BLOGI(sc, "Unknown media!\n"); sc->media = 0; port_type = PORT_OTHER; break; } return port_type; } #define GET_FIELD(value, fname) \ (((value) & (fname##_MASK)) >> (fname##_SHIFT)) #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID) #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR) static int bxe_get_igu_cam_info(struct bxe_softc *sc) { int pfid = SC_FUNC(sc); int igu_sb_id; uint32_t val; uint8_t fid, igu_sb_cnt = 0; sc->igu_base_sb = 0xff; if (CHIP_INT_MODE_IS_BC(sc)) { int vn = SC_VN(sc); igu_sb_cnt = sc->igu_sb_cnt; sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) * FP_SB_MAX_E1x); sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x + (CHIP_IS_MODE_4_PORT(sc) ? pfid : vn)); return (0); } /* IGU in normal mode - read CAM */ for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE; igu_sb_id++) { val = REG_RD(sc, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4); if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) { continue; } fid = IGU_FID(val); if ((fid & IGU_FID_ENCODE_IS_PF)) { if ((fid & IGU_FID_PF_NUM_MASK) != pfid) { continue; } if (IGU_VEC(val) == 0) { /* default status block */ sc->igu_dsb_id = igu_sb_id; } else { if (sc->igu_base_sb == 0xff) { sc->igu_base_sb = igu_sb_id; } igu_sb_cnt++; } } } /* * Due to new PF resource allocation by MFW T7.4 and above, it's optional * that number of CAM entries will not be equal to the value advertised in * PCI. Driver should use the minimal value of both as the actual status * block count */ sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt); if (igu_sb_cnt == 0) { BLOGE(sc, "CAM configuration error\n"); return (-1); } return (0); } /* * Gather various information from the device config space, the device itself, * shmem, and the user input. */ static int bxe_get_device_info(struct bxe_softc *sc) { uint32_t val; int rc; /* Get the data for the device */ sc->devinfo.vendor_id = pci_get_vendor(sc->dev); sc->devinfo.device_id = pci_get_device(sc->dev); sc->devinfo.subvendor_id = pci_get_subvendor(sc->dev); sc->devinfo.subdevice_id = pci_get_subdevice(sc->dev); /* get the chip revision (chip metal comes from pci config space) */ sc->devinfo.chip_id = sc->link_params.chip_id = (((REG_RD(sc, MISC_REG_CHIP_NUM) & 0xffff) << 16) | ((REG_RD(sc, MISC_REG_CHIP_REV) & 0xf) << 12) | (((REG_RD(sc, PCICFG_OFFSET + PCI_ID_VAL3) >> 24) & 0xf) << 4) | ((REG_RD(sc, MISC_REG_BOND_ID) & 0xf) << 0)); /* force 57811 according to MISC register */ if (REG_RD(sc, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) { if (CHIP_IS_57810(sc)) { sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) | (sc->devinfo.chip_id & 0x0000ffff)); } else if (CHIP_IS_57810_MF(sc)) { sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) | (sc->devinfo.chip_id & 0x0000ffff)); } sc->devinfo.chip_id |= 0x1; } BLOGD(sc, DBG_LOAD, "chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)\n", sc->devinfo.chip_id, ((sc->devinfo.chip_id >> 16) & 0xffff), ((sc->devinfo.chip_id >> 12) & 0xf), ((sc->devinfo.chip_id >> 4) & 0xff), ((sc->devinfo.chip_id >> 0) & 0xf)); val = (REG_RD(sc, 0x2874) & 0x55); if ((sc->devinfo.chip_id & 0x1) || (CHIP_IS_E1(sc) && val) || (CHIP_IS_E1H(sc) && (val == 0x55))) { sc->flags |= BXE_ONE_PORT_FLAG; BLOGD(sc, DBG_LOAD, "single port device\n"); } /* set the doorbell size */ sc->doorbell_size = (1 << BXE_DB_SHIFT); /* determine whether the device is in 2 port or 4 port mode */ sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1 & E1h*/ if (CHIP_IS_E2E3(sc)) { /* * Read port4mode_en_ovwr[0]: * If 1, four port mode is in port4mode_en_ovwr[1]. * If 0, four port mode is in port4mode_en[0]. */ val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR); if (val & 1) { val = ((val >> 1) & 1); } else { val = REG_RD(sc, MISC_REG_PORT4MODE_EN); } sc->devinfo.chip_port_mode = (val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE; BLOGD(sc, DBG_LOAD, "Port mode = %s\n", (val) ? "4" : "2"); } /* get the function and path info for the device */ bxe_get_function_num(sc); /* get the shared memory base address */ sc->devinfo.shmem_base = sc->link_params.shmem_base = REG_RD(sc, MISC_REG_SHARED_MEM_ADDR); sc->devinfo.shmem2_base = REG_RD(sc, (SC_PATH(sc) ? MISC_REG_GENERIC_CR_1 : MISC_REG_GENERIC_CR_0)); BLOGD(sc, DBG_LOAD, "shmem_base=0x%08x, shmem2_base=0x%08x\n", sc->devinfo.shmem_base, sc->devinfo.shmem2_base); if (!sc->devinfo.shmem_base) { /* this should ONLY prevent upcoming shmem reads */ BLOGI(sc, "MCP not active\n"); sc->flags |= BXE_NO_MCP_FLAG; return (0); } /* make sure the shared memory contents are valid */ val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]); if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) { BLOGE(sc, "Invalid SHMEM validity signature: 0x%08x\n", val); return (0); } BLOGD(sc, DBG_LOAD, "Valid SHMEM validity signature: 0x%08x\n", val); /* get the bootcode version */ sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev); snprintf(sc->devinfo.bc_ver_str, sizeof(sc->devinfo.bc_ver_str), "%d.%d.%d", ((sc->devinfo.bc_ver >> 24) & 0xff), ((sc->devinfo.bc_ver >> 16) & 0xff), ((sc->devinfo.bc_ver >> 8) & 0xff)); BLOGD(sc, DBG_LOAD, "Bootcode version: %s\n", sc->devinfo.bc_ver_str); /* get the bootcode shmem address */ sc->devinfo.mf_cfg_base = bxe_get_shmem_mf_cfg_base(sc); BLOGD(sc, DBG_LOAD, "mf_cfg_base=0x08%x \n", sc->devinfo.mf_cfg_base); /* clean indirect addresses as they're not used */ pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4); if (IS_PF(sc)) { REG_WR(sc, PXP2_REG_PGL_ADDR_88_F0, 0); REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F0, 0); REG_WR(sc, PXP2_REG_PGL_ADDR_90_F0, 0); REG_WR(sc, PXP2_REG_PGL_ADDR_94_F0, 0); if (CHIP_IS_E1x(sc)) { REG_WR(sc, PXP2_REG_PGL_ADDR_88_F1, 0); REG_WR(sc, PXP2_REG_PGL_ADDR_8C_F1, 0); REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0); REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0); } /* * Enable internal target-read (in case we are probed after PF * FLR). Must be done prior to any BAR read access. Only for * 57712 and up */ if (!CHIP_IS_E1x(sc)) { REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); } } /* get the nvram size */ val = REG_RD(sc, MCP_REG_MCPR_NVM_CFG4); sc->devinfo.flash_size = (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE)); BLOGD(sc, DBG_LOAD, "nvram flash size: %d\n", sc->devinfo.flash_size); /* get PCI capabilites */ bxe_probe_pci_caps(sc); bxe_set_power_state(sc, PCI_PM_D0); /* get various configuration parameters from shmem */ bxe_get_shmem_info(sc); if (sc->devinfo.pcie_msix_cap_reg != 0) { val = pci_read_config(sc->dev, (sc->devinfo.pcie_msix_cap_reg + PCIR_MSIX_CTRL), 2); sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE); } else { sc->igu_sb_cnt = 1; } sc->igu_base_addr = BAR_IGU_INTMEM; /* initialize IGU parameters */ if (CHIP_IS_E1x(sc)) { sc->devinfo.int_block = INT_BLOCK_HC; sc->igu_dsb_id = DEF_SB_IGU_ID; sc->igu_base_sb = 0; } else { sc->devinfo.int_block = INT_BLOCK_IGU; /* do not allow device reset during IGU info preocessing */ bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET); val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION); if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { int tout = 5000; BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode\n"); val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN); REG_WR(sc, IGU_REG_BLOCK_CONFIGURATION, val); REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x7f); while (tout && REG_RD(sc, IGU_REG_RESET_MEMORIES)) { tout--; DELAY(1000); } if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) { BLOGD(sc, DBG_LOAD, "FORCING IGU Normal Mode failed!!!\n"); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); return (-1); } } if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) { BLOGD(sc, DBG_LOAD, "IGU Backward Compatible Mode\n"); sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP; } else { BLOGD(sc, DBG_LOAD, "IGU Normal Mode\n"); } rc = bxe_get_igu_cam_info(sc); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); if (rc) { return (rc); } } /* * Get base FW non-default (fast path) status block ID. This value is * used to initialize the fw_sb_id saved on the fp/queue structure to * determine the id used by the FW. */ if (CHIP_IS_E1x(sc)) { sc->base_fw_ndsb = ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc)); } else { /* * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of * the same queue are indicated on the same IGU SB). So we prefer * FW and IGU SBs to be the same value. */ sc->base_fw_ndsb = sc->igu_base_sb; } BLOGD(sc, DBG_LOAD, "igu_dsb_id=%d igu_base_sb=%d igu_sb_cnt=%d base_fw_ndsb=%d\n", sc->igu_dsb_id, sc->igu_base_sb, sc->igu_sb_cnt, sc->base_fw_ndsb); elink_phy_probe(&sc->link_params); return (0); } static void bxe_link_settings_supported(struct bxe_softc *sc, uint32_t switch_cfg) { uint32_t cfg_size = 0; uint32_t idx; uint8_t port = SC_PORT(sc); /* aggregation of supported attributes of all external phys */ sc->port.supported[0] = 0; sc->port.supported[1] = 0; switch (sc->link_params.num_phys) { case 1: sc->port.supported[0] = sc->link_params.phy[ELINK_INT_PHY].supported; cfg_size = 1; break; case 2: sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported; cfg_size = 1; break; case 3: if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) { sc->port.supported[1] = sc->link_params.phy[ELINK_EXT_PHY1].supported; sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY2].supported; } else { sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported; sc->port.supported[1] = sc->link_params.phy[ELINK_EXT_PHY2].supported; } cfg_size = 2; break; } if (!(sc->port.supported[0] || sc->port.supported[1])) { BLOGE(sc, "Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)\n", SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config), SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config2)); return; } if (CHIP_IS_E3(sc)) sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR); else { switch (switch_cfg) { case ELINK_SWITCH_CFG_1G: sc->port.phy_addr = REG_RD(sc, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10); break; case ELINK_SWITCH_CFG_10G: sc->port.phy_addr = REG_RD(sc, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18); break; default: BLOGE(sc, "Invalid switch config in link_config=0x%08x\n", sc->port.link_config[0]); return; } } BLOGD(sc, DBG_LOAD, "PHY addr 0x%08x\n", sc->port.phy_addr); /* mask what we support according to speed_cap_mask per configuration */ for (idx = 0; idx < cfg_size; idx++) { if (!(sc->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) { sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Half; } if (!(sc->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL)) { sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Full; } if (!(sc->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) { sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Half; } if (!(sc->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL)) { sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Full; } if (!(sc->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) { sc->port.supported[idx] &= ~ELINK_SUPPORTED_1000baseT_Full; } if (!(sc->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) { sc->port.supported[idx] &= ~ELINK_SUPPORTED_2500baseX_Full; } if (!(sc->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { sc->port.supported[idx] &= ~ELINK_SUPPORTED_10000baseT_Full; } if (!(sc->link_params.speed_cap_mask[idx] & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) { sc->port.supported[idx] &= ~ELINK_SUPPORTED_20000baseKR2_Full; } } BLOGD(sc, DBG_LOAD, "PHY supported 0=0x%08x 1=0x%08x\n", sc->port.supported[0], sc->port.supported[1]); ELINK_DEBUG_P2(sc, "PHY supported 0=0x%08x 1=0x%08x\n", sc->port.supported[0], sc->port.supported[1]); } static void bxe_link_settings_requested(struct bxe_softc *sc) { uint32_t link_config; uint32_t idx; uint32_t cfg_size = 0; sc->port.advertising[0] = 0; sc->port.advertising[1] = 0; switch (sc->link_params.num_phys) { case 1: case 2: cfg_size = 1; break; case 3: cfg_size = 2; break; } for (idx = 0; idx < cfg_size; idx++) { sc->link_params.req_duplex[idx] = DUPLEX_FULL; link_config = sc->port.link_config[idx]; switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) { case PORT_FEATURE_LINK_SPEED_AUTO: if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) { sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG; sc->port.advertising[idx] |= sc->port.supported[idx]; if (sc->link_params.phy[ELINK_EXT_PHY1].type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) sc->port.advertising[idx] |= (ELINK_SUPPORTED_100baseT_Half | ELINK_SUPPORTED_100baseT_Full); } else { /* force 10G, no AN */ sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000; sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); continue; } break; case PORT_FEATURE_LINK_SPEED_10M_FULL: if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) { sc->link_params.req_line_speed[idx] = ELINK_SPEED_10; sc->port.advertising[idx] |= (ADVERTISED_10baseT_Full | ADVERTISED_TP); } else { BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " "speed_cap_mask=0x%08x\n", link_config, sc->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_10M_HALF: if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) { sc->link_params.req_line_speed[idx] = ELINK_SPEED_10; sc->link_params.req_duplex[idx] = DUPLEX_HALF; sc->port.advertising[idx] |= (ADVERTISED_10baseT_Half | ADVERTISED_TP); ELINK_DEBUG_P1(sc, "driver requesting DUPLEX_HALF req_duplex = %x!\n", sc->link_params.req_duplex[idx]); } else { BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " "speed_cap_mask=0x%08x\n", link_config, sc->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_100M_FULL: if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) { sc->link_params.req_line_speed[idx] = ELINK_SPEED_100; sc->port.advertising[idx] |= (ADVERTISED_100baseT_Full | ADVERTISED_TP); } else { BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " "speed_cap_mask=0x%08x\n", link_config, sc->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_100M_HALF: if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) { sc->link_params.req_line_speed[idx] = ELINK_SPEED_100; sc->link_params.req_duplex[idx] = DUPLEX_HALF; sc->port.advertising[idx] |= (ADVERTISED_100baseT_Half | ADVERTISED_TP); } else { BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " "speed_cap_mask=0x%08x\n", link_config, sc->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_1G: if (sc->port.supported[idx] & ELINK_SUPPORTED_1000baseT_Full) { sc->link_params.req_line_speed[idx] = ELINK_SPEED_1000; sc->port.advertising[idx] |= (ADVERTISED_1000baseT_Full | ADVERTISED_TP); } else { BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " "speed_cap_mask=0x%08x\n", link_config, sc->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_2_5G: if (sc->port.supported[idx] & ELINK_SUPPORTED_2500baseX_Full) { sc->link_params.req_line_speed[idx] = ELINK_SPEED_2500; sc->port.advertising[idx] |= (ADVERTISED_2500baseX_Full | ADVERTISED_TP); } else { BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " "speed_cap_mask=0x%08x\n", link_config, sc->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_10G_CX4: if (sc->port.supported[idx] & ELINK_SUPPORTED_10000baseT_Full) { sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000; sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); } else { BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " "speed_cap_mask=0x%08x\n", link_config, sc->link_params.speed_cap_mask[idx]); return; } break; case PORT_FEATURE_LINK_SPEED_20G: sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000; break; default: BLOGE(sc, "Invalid NVRAM config link_config=0x%08x " "speed_cap_mask=0x%08x\n", link_config, sc->link_params.speed_cap_mask[idx]); sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG; sc->port.advertising[idx] = sc->port.supported[idx]; break; } sc->link_params.req_flow_ctrl[idx] = (link_config & PORT_FEATURE_FLOW_CONTROL_MASK); if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) { if (!(sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg)) { sc->link_params.req_flow_ctrl[idx] = ELINK_FLOW_CTRL_NONE; } else { bxe_set_requested_fc(sc); } } BLOGD(sc, DBG_LOAD, "req_line_speed=%d req_duplex=%d " "req_flow_ctrl=0x%x advertising=0x%x\n", sc->link_params.req_line_speed[idx], sc->link_params.req_duplex[idx], sc->link_params.req_flow_ctrl[idx], sc->port.advertising[idx]); ELINK_DEBUG_P3(sc, "req_line_speed=%d req_duplex=%d " "advertising=0x%x\n", sc->link_params.req_line_speed[idx], sc->link_params.req_duplex[idx], sc->port.advertising[idx]); } } static void bxe_get_phy_info(struct bxe_softc *sc) { uint8_t port = SC_PORT(sc); uint32_t config = sc->port.config; uint32_t eee_mode; /* shmem data already read in bxe_get_shmem_info() */ ELINK_DEBUG_P3(sc, "lane_config=0x%08x speed_cap_mask0=0x%08x " "link_config0=0x%08x\n", sc->link_params.lane_config, sc->link_params.speed_cap_mask[0], sc->port.link_config[0]); bxe_link_settings_supported(sc, sc->link_params.switch_cfg); bxe_link_settings_requested(sc); if (sc->autogreeen == AUTO_GREEN_FORCE_ON) { sc->link_params.feature_config_flags |= ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; } else if (sc->autogreeen == AUTO_GREEN_FORCE_OFF) { sc->link_params.feature_config_flags &= ~ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; } else if (config & PORT_FEAT_CFG_AUTOGREEEN_ENABLED) { sc->link_params.feature_config_flags |= ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED; } /* configure link feature according to nvram value */ eee_mode = (((SHMEM_RD(sc, dev_info.port_feature_config[port].eee_power_mode)) & PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >> PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT); if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) { sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI | ELINK_EEE_MODE_ENABLE_LPI | ELINK_EEE_MODE_OUTPUT_TIME); } else { sc->link_params.eee_mode = 0; } /* get the media type */ bxe_media_detect(sc); ELINK_DEBUG_P1(sc, "detected media type\n", sc->media); } static void bxe_get_params(struct bxe_softc *sc) { /* get user tunable params */ bxe_get_tunable_params(sc); /* select the RX and TX ring sizes */ sc->tx_ring_size = TX_BD_USABLE; sc->rx_ring_size = RX_BD_USABLE; /* XXX disable WoL */ sc->wol = 0; } static void bxe_set_modes_bitmap(struct bxe_softc *sc) { uint32_t flags = 0; if (CHIP_REV_IS_FPGA(sc)) { SET_FLAGS(flags, MODE_FPGA); } else if (CHIP_REV_IS_EMUL(sc)) { SET_FLAGS(flags, MODE_EMUL); } else { SET_FLAGS(flags, MODE_ASIC); } if (CHIP_IS_MODE_4_PORT(sc)) { SET_FLAGS(flags, MODE_PORT4); } else { SET_FLAGS(flags, MODE_PORT2); } if (CHIP_IS_E2(sc)) { SET_FLAGS(flags, MODE_E2); } else if (CHIP_IS_E3(sc)) { SET_FLAGS(flags, MODE_E3); if (CHIP_REV(sc) == CHIP_REV_Ax) { SET_FLAGS(flags, MODE_E3_A0); } else /*if (CHIP_REV(sc) == CHIP_REV_Bx)*/ { SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3); } } if (IS_MF(sc)) { SET_FLAGS(flags, MODE_MF); switch (sc->devinfo.mf_info.mf_mode) { case MULTI_FUNCTION_SD: SET_FLAGS(flags, MODE_MF_SD); break; case MULTI_FUNCTION_SI: SET_FLAGS(flags, MODE_MF_SI); break; case MULTI_FUNCTION_AFEX: SET_FLAGS(flags, MODE_MF_AFEX); break; } } else { SET_FLAGS(flags, MODE_SF); } #if defined(__LITTLE_ENDIAN) SET_FLAGS(flags, MODE_LITTLE_ENDIAN); #else /* __BIG_ENDIAN */ SET_FLAGS(flags, MODE_BIG_ENDIAN); #endif INIT_MODE_FLAGS(sc) = flags; } static int bxe_alloc_hsi_mem(struct bxe_softc *sc) { struct bxe_fastpath *fp; bus_addr_t busaddr; int max_agg_queues; int max_segments; bus_size_t max_size; bus_size_t max_seg_size; char buf[32]; int rc; int i, j; /* XXX zero out all vars here and call bxe_alloc_hsi_mem on error */ /* allocate the parent bus DMA tag */ rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent tag */ 1, /* alignment */ 0, /* boundary limit */ BUS_SPACE_MAXADDR, /* restricted low */ BUS_SPACE_MAXADDR, /* restricted hi */ NULL, /* addr filter() */ NULL, /* addr filter() arg */ BUS_SPACE_MAXSIZE_32BIT, /* max map size */ BUS_SPACE_UNRESTRICTED, /* num discontinuous */ BUS_SPACE_MAXSIZE_32BIT, /* max seg size */ 0, /* flags */ NULL, /* lock() */ NULL, /* lock() arg */ &sc->parent_dma_tag); /* returned dma tag */ if (rc != 0) { BLOGE(sc, "Failed to alloc parent DMA tag (%d)!\n", rc); return (1); } /************************/ /* DEFAULT STATUS BLOCK */ /************************/ if (bxe_dma_alloc(sc, sizeof(struct host_sp_status_block), &sc->def_sb_dma, "default status block") != 0) { /* XXX */ bus_dma_tag_destroy(sc->parent_dma_tag); return (1); } sc->def_sb = (struct host_sp_status_block *)sc->def_sb_dma.vaddr; /***************/ /* EVENT QUEUE */ /***************/ if (bxe_dma_alloc(sc, BCM_PAGE_SIZE, &sc->eq_dma, "event queue") != 0) { /* XXX */ bxe_dma_free(sc, &sc->def_sb_dma); sc->def_sb = NULL; bus_dma_tag_destroy(sc->parent_dma_tag); return (1); } sc->eq = (union event_ring_elem * )sc->eq_dma.vaddr; /*************/ /* SLOW PATH */ /*************/ if (bxe_dma_alloc(sc, sizeof(struct bxe_slowpath), &sc->sp_dma, "slow path") != 0) { /* XXX */ bxe_dma_free(sc, &sc->eq_dma); sc->eq = NULL; bxe_dma_free(sc, &sc->def_sb_dma); sc->def_sb = NULL; bus_dma_tag_destroy(sc->parent_dma_tag); return (1); } sc->sp = (struct bxe_slowpath *)sc->sp_dma.vaddr; /*******************/ /* SLOW PATH QUEUE */ /*******************/ if (bxe_dma_alloc(sc, BCM_PAGE_SIZE, &sc->spq_dma, "slow path queue") != 0) { /* XXX */ bxe_dma_free(sc, &sc->sp_dma); sc->sp = NULL; bxe_dma_free(sc, &sc->eq_dma); sc->eq = NULL; bxe_dma_free(sc, &sc->def_sb_dma); sc->def_sb = NULL; bus_dma_tag_destroy(sc->parent_dma_tag); return (1); } sc->spq = (struct eth_spe *)sc->spq_dma.vaddr; /***************************/ /* FW DECOMPRESSION BUFFER */ /***************************/ if (bxe_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma, "fw decompression buffer") != 0) { /* XXX */ bxe_dma_free(sc, &sc->spq_dma); sc->spq = NULL; bxe_dma_free(sc, &sc->sp_dma); sc->sp = NULL; bxe_dma_free(sc, &sc->eq_dma); sc->eq = NULL; bxe_dma_free(sc, &sc->def_sb_dma); sc->def_sb = NULL; bus_dma_tag_destroy(sc->parent_dma_tag); return (1); } sc->gz_buf = (void *)sc->gz_buf_dma.vaddr; if ((sc->gz_strm = malloc(sizeof(*sc->gz_strm), M_DEVBUF, M_NOWAIT)) == NULL) { /* XXX */ bxe_dma_free(sc, &sc->gz_buf_dma); sc->gz_buf = NULL; bxe_dma_free(sc, &sc->spq_dma); sc->spq = NULL; bxe_dma_free(sc, &sc->sp_dma); sc->sp = NULL; bxe_dma_free(sc, &sc->eq_dma); sc->eq = NULL; bxe_dma_free(sc, &sc->def_sb_dma); sc->def_sb = NULL; bus_dma_tag_destroy(sc->parent_dma_tag); return (1); } /*************/ /* FASTPATHS */ /*************/ /* allocate DMA memory for each fastpath structure */ for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; fp->sc = sc; fp->index = i; /*******************/ /* FP STATUS BLOCK */ /*******************/ snprintf(buf, sizeof(buf), "fp %d status block", i); if (bxe_dma_alloc(sc, sizeof(union bxe_host_hc_status_block), &fp->sb_dma, buf) != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to alloc %s\n", buf); return (1); } else { if (CHIP_IS_E2E3(sc)) { fp->status_block.e2_sb = (struct host_hc_status_block_e2 *)fp->sb_dma.vaddr; } else { fp->status_block.e1x_sb = (struct host_hc_status_block_e1x *)fp->sb_dma.vaddr; } } /******************/ /* FP TX BD CHAIN */ /******************/ snprintf(buf, sizeof(buf), "fp %d tx bd chain", i); if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * TX_BD_NUM_PAGES), &fp->tx_dma, buf) != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to alloc %s\n", buf); return (1); } else { fp->tx_chain = (union eth_tx_bd_types *)fp->tx_dma.vaddr; } /* link together the tx bd chain pages */ for (j = 1; j <= TX_BD_NUM_PAGES; j++) { /* index into the tx bd chain array to last entry per page */ struct eth_tx_next_bd *tx_next_bd = &fp->tx_chain[TX_BD_TOTAL_PER_PAGE * j - 1].next_bd; /* point to the next page and wrap from last page */ busaddr = (fp->tx_dma.paddr + (BCM_PAGE_SIZE * (j % TX_BD_NUM_PAGES))); tx_next_bd->addr_hi = htole32(U64_HI(busaddr)); tx_next_bd->addr_lo = htole32(U64_LO(busaddr)); } /******************/ /* FP RX BD CHAIN */ /******************/ snprintf(buf, sizeof(buf), "fp %d rx bd chain", i); if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_BD_NUM_PAGES), &fp->rx_dma, buf) != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to alloc %s\n", buf); return (1); } else { fp->rx_chain = (struct eth_rx_bd *)fp->rx_dma.vaddr; } /* link together the rx bd chain pages */ for (j = 1; j <= RX_BD_NUM_PAGES; j++) { /* index into the rx bd chain array to last entry per page */ struct eth_rx_bd *rx_bd = &fp->rx_chain[RX_BD_TOTAL_PER_PAGE * j - 2]; /* point to the next page and wrap from last page */ busaddr = (fp->rx_dma.paddr + (BCM_PAGE_SIZE * (j % RX_BD_NUM_PAGES))); rx_bd->addr_hi = htole32(U64_HI(busaddr)); rx_bd->addr_lo = htole32(U64_LO(busaddr)); } /*******************/ /* FP RX RCQ CHAIN */ /*******************/ snprintf(buf, sizeof(buf), "fp %d rcq chain", i); if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RCQ_NUM_PAGES), &fp->rcq_dma, buf) != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to alloc %s\n", buf); return (1); } else { fp->rcq_chain = (union eth_rx_cqe *)fp->rcq_dma.vaddr; } /* link together the rcq chain pages */ for (j = 1; j <= RCQ_NUM_PAGES; j++) { /* index into the rcq chain array to last entry per page */ struct eth_rx_cqe_next_page *rx_cqe_next = (struct eth_rx_cqe_next_page *) &fp->rcq_chain[RCQ_TOTAL_PER_PAGE * j - 1]; /* point to the next page and wrap from last page */ busaddr = (fp->rcq_dma.paddr + (BCM_PAGE_SIZE * (j % RCQ_NUM_PAGES))); rx_cqe_next->addr_hi = htole32(U64_HI(busaddr)); rx_cqe_next->addr_lo = htole32(U64_LO(busaddr)); } /*******************/ /* FP RX SGE CHAIN */ /*******************/ snprintf(buf, sizeof(buf), "fp %d sge chain", i); if (bxe_dma_alloc(sc, (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES), &fp->rx_sge_dma, buf) != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to alloc %s\n", buf); return (1); } else { fp->rx_sge_chain = (struct eth_rx_sge *)fp->rx_sge_dma.vaddr; } /* link together the sge chain pages */ for (j = 1; j <= RX_SGE_NUM_PAGES; j++) { /* index into the rcq chain array to last entry per page */ struct eth_rx_sge *rx_sge = &fp->rx_sge_chain[RX_SGE_TOTAL_PER_PAGE * j - 2]; /* point to the next page and wrap from last page */ busaddr = (fp->rx_sge_dma.paddr + (BCM_PAGE_SIZE * (j % RX_SGE_NUM_PAGES))); rx_sge->addr_hi = htole32(U64_HI(busaddr)); rx_sge->addr_lo = htole32(U64_LO(busaddr)); } /***********************/ /* FP TX MBUF DMA MAPS */ /***********************/ /* set required sizes before mapping to conserve resources */ if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) { max_size = BXE_TSO_MAX_SIZE; max_segments = BXE_TSO_MAX_SEGMENTS; max_seg_size = BXE_TSO_MAX_SEG_SIZE; } else { max_size = (MCLBYTES * BXE_MAX_SEGMENTS); max_segments = BXE_MAX_SEGMENTS; max_seg_size = MCLBYTES; } /* create a dma tag for the tx mbufs */ rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 1, /* alignment */ 0, /* boundary limit */ BUS_SPACE_MAXADDR, /* restricted low */ BUS_SPACE_MAXADDR, /* restricted hi */ NULL, /* addr filter() */ NULL, /* addr filter() arg */ max_size, /* max map size */ max_segments, /* num discontinuous */ max_seg_size, /* max seg size */ 0, /* flags */ NULL, /* lock() */ NULL, /* lock() arg */ &fp->tx_mbuf_tag); /* returned dma tag */ if (rc != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma tag for " "'fp %d tx mbufs' (%d)\n", i, rc); return (1); } /* create dma maps for each of the tx mbuf clusters */ for (j = 0; j < TX_BD_TOTAL; j++) { if (bus_dmamap_create(fp->tx_mbuf_tag, BUS_DMA_NOWAIT, &fp->tx_mbuf_chain[j].m_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " "'fp %d tx mbuf %d' (%d)\n", i, j, rc); return (1); } } /***********************/ /* FP RX MBUF DMA MAPS */ /***********************/ /* create a dma tag for the rx mbufs */ rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 1, /* alignment */ 0, /* boundary limit */ BUS_SPACE_MAXADDR, /* restricted low */ BUS_SPACE_MAXADDR, /* restricted hi */ NULL, /* addr filter() */ NULL, /* addr filter() arg */ MJUM9BYTES, /* max map size */ 1, /* num discontinuous */ MJUM9BYTES, /* max seg size */ 0, /* flags */ NULL, /* lock() */ NULL, /* lock() arg */ &fp->rx_mbuf_tag); /* returned dma tag */ if (rc != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma tag for " "'fp %d rx mbufs' (%d)\n", i, rc); return (1); } /* create dma maps for each of the rx mbuf clusters */ for (j = 0; j < RX_BD_TOTAL; j++) { if (bus_dmamap_create(fp->rx_mbuf_tag, BUS_DMA_NOWAIT, &fp->rx_mbuf_chain[j].m_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " "'fp %d rx mbuf %d' (%d)\n", i, j, rc); return (1); } } /* create dma map for the spare rx mbuf cluster */ if (bus_dmamap_create(fp->rx_mbuf_tag, BUS_DMA_NOWAIT, &fp->rx_mbuf_spare_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " "'fp %d spare rx mbuf' (%d)\n", i, rc); return (1); } /***************************/ /* FP RX SGE MBUF DMA MAPS */ /***************************/ /* create a dma tag for the rx sge mbufs */ rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */ 1, /* alignment */ 0, /* boundary limit */ BUS_SPACE_MAXADDR, /* restricted low */ BUS_SPACE_MAXADDR, /* restricted hi */ NULL, /* addr filter() */ NULL, /* addr filter() arg */ BCM_PAGE_SIZE, /* max map size */ 1, /* num discontinuous */ BCM_PAGE_SIZE, /* max seg size */ 0, /* flags */ NULL, /* lock() */ NULL, /* lock() arg */ &fp->rx_sge_mbuf_tag); /* returned dma tag */ if (rc != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma tag for " "'fp %d rx sge mbufs' (%d)\n", i, rc); return (1); } /* create dma maps for the rx sge mbuf clusters */ for (j = 0; j < RX_SGE_TOTAL; j++) { if (bus_dmamap_create(fp->rx_sge_mbuf_tag, BUS_DMA_NOWAIT, &fp->rx_sge_mbuf_chain[j].m_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " "'fp %d rx sge mbuf %d' (%d)\n", i, j, rc); return (1); } } /* create dma map for the spare rx sge mbuf cluster */ if (bus_dmamap_create(fp->rx_sge_mbuf_tag, BUS_DMA_NOWAIT, &fp->rx_sge_mbuf_spare_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " "'fp %d spare rx sge mbuf' (%d)\n", i, rc); return (1); } /***************************/ /* FP RX TPA MBUF DMA MAPS */ /***************************/ /* create dma maps for the rx tpa mbuf clusters */ max_agg_queues = MAX_AGG_QS(sc); for (j = 0; j < max_agg_queues; j++) { if (bus_dmamap_create(fp->rx_mbuf_tag, BUS_DMA_NOWAIT, &fp->rx_tpa_info[j].bd.m_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " "'fp %d rx tpa mbuf %d' (%d)\n", i, j, rc); return (1); } } /* create dma map for the spare rx tpa mbuf cluster */ if (bus_dmamap_create(fp->rx_mbuf_tag, BUS_DMA_NOWAIT, &fp->rx_tpa_info_mbuf_spare_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " "'fp %d spare rx tpa mbuf' (%d)\n", i, rc); return (1); } bxe_init_sge_ring_bit_mask(fp); } return (0); } static void bxe_free_hsi_mem(struct bxe_softc *sc) { struct bxe_fastpath *fp; int max_agg_queues; int i, j; if (sc->parent_dma_tag == NULL) { return; /* assume nothing was allocated */ } for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; /*******************/ /* FP STATUS BLOCK */ /*******************/ bxe_dma_free(sc, &fp->sb_dma); memset(&fp->status_block, 0, sizeof(fp->status_block)); /******************/ /* FP TX BD CHAIN */ /******************/ bxe_dma_free(sc, &fp->tx_dma); fp->tx_chain = NULL; /******************/ /* FP RX BD CHAIN */ /******************/ bxe_dma_free(sc, &fp->rx_dma); fp->rx_chain = NULL; /*******************/ /* FP RX RCQ CHAIN */ /*******************/ bxe_dma_free(sc, &fp->rcq_dma); fp->rcq_chain = NULL; /*******************/ /* FP RX SGE CHAIN */ /*******************/ bxe_dma_free(sc, &fp->rx_sge_dma); fp->rx_sge_chain = NULL; /***********************/ /* FP TX MBUF DMA MAPS */ /***********************/ if (fp->tx_mbuf_tag != NULL) { for (j = 0; j < TX_BD_TOTAL; j++) { if (fp->tx_mbuf_chain[j].m_map != NULL) { bus_dmamap_unload(fp->tx_mbuf_tag, fp->tx_mbuf_chain[j].m_map); bus_dmamap_destroy(fp->tx_mbuf_tag, fp->tx_mbuf_chain[j].m_map); } } bus_dma_tag_destroy(fp->tx_mbuf_tag); fp->tx_mbuf_tag = NULL; } /***********************/ /* FP RX MBUF DMA MAPS */ /***********************/ if (fp->rx_mbuf_tag != NULL) { for (j = 0; j < RX_BD_TOTAL; j++) { if (fp->rx_mbuf_chain[j].m_map != NULL) { bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_chain[j].m_map); bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_chain[j].m_map); } } if (fp->rx_mbuf_spare_map != NULL) { bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map); bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map); } /***************************/ /* FP RX TPA MBUF DMA MAPS */ /***************************/ max_agg_queues = MAX_AGG_QS(sc); for (j = 0; j < max_agg_queues; j++) { if (fp->rx_tpa_info[j].bd.m_map != NULL) { bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_tpa_info[j].bd.m_map); bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_tpa_info[j].bd.m_map); } } if (fp->rx_tpa_info_mbuf_spare_map != NULL) { bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_tpa_info_mbuf_spare_map); bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_tpa_info_mbuf_spare_map); } bus_dma_tag_destroy(fp->rx_mbuf_tag); fp->rx_mbuf_tag = NULL; } /***************************/ /* FP RX SGE MBUF DMA MAPS */ /***************************/ if (fp->rx_sge_mbuf_tag != NULL) { for (j = 0; j < RX_SGE_TOTAL; j++) { if (fp->rx_sge_mbuf_chain[j].m_map != NULL) { bus_dmamap_unload(fp->rx_sge_mbuf_tag, fp->rx_sge_mbuf_chain[j].m_map); bus_dmamap_destroy(fp->rx_sge_mbuf_tag, fp->rx_sge_mbuf_chain[j].m_map); } } if (fp->rx_sge_mbuf_spare_map != NULL) { bus_dmamap_unload(fp->rx_sge_mbuf_tag, fp->rx_sge_mbuf_spare_map); bus_dmamap_destroy(fp->rx_sge_mbuf_tag, fp->rx_sge_mbuf_spare_map); } bus_dma_tag_destroy(fp->rx_sge_mbuf_tag); fp->rx_sge_mbuf_tag = NULL; } } /***************************/ /* FW DECOMPRESSION BUFFER */ /***************************/ bxe_dma_free(sc, &sc->gz_buf_dma); sc->gz_buf = NULL; free(sc->gz_strm, M_DEVBUF); sc->gz_strm = NULL; /*******************/ /* SLOW PATH QUEUE */ /*******************/ bxe_dma_free(sc, &sc->spq_dma); sc->spq = NULL; /*************/ /* SLOW PATH */ /*************/ bxe_dma_free(sc, &sc->sp_dma); sc->sp = NULL; /***************/ /* EVENT QUEUE */ /***************/ bxe_dma_free(sc, &sc->eq_dma); sc->eq = NULL; /************************/ /* DEFAULT STATUS BLOCK */ /************************/ bxe_dma_free(sc, &sc->def_sb_dma); sc->def_sb = NULL; bus_dma_tag_destroy(sc->parent_dma_tag); sc->parent_dma_tag = NULL; } /* * Previous driver DMAE transaction may have occurred when pre-boot stage * ended and boot began. This would invalidate the addresses of the * transaction, resulting in was-error bit set in the PCI causing all * hw-to-host PCIe transactions to timeout. If this happened we want to clear * the interrupt which detected this from the pglueb and the was-done bit */ static void bxe_prev_interrupted_dmae(struct bxe_softc *sc) { uint32_t val; if (!CHIP_IS_E1x(sc)) { val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS); if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { BLOGD(sc, DBG_LOAD, "Clearing 'was-error' bit that was set in pglueb"); REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << SC_FUNC(sc)); } } } static int bxe_prev_mcp_done(struct bxe_softc *sc) { uint32_t rc = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE, DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET); if (!rc) { BLOGE(sc, "MCP response failure, aborting\n"); return (-1); } return (0); } static struct bxe_prev_list_node * bxe_prev_path_get_entry(struct bxe_softc *sc) { struct bxe_prev_list_node *tmp; LIST_FOREACH(tmp, &bxe_prev_list, node) { if ((sc->pcie_bus == tmp->bus) && (sc->pcie_device == tmp->slot) && (SC_PATH(sc) == tmp->path)) { return (tmp); } } return (NULL); } static uint8_t bxe_prev_is_path_marked(struct bxe_softc *sc) { struct bxe_prev_list_node *tmp; int rc = FALSE; mtx_lock(&bxe_prev_mtx); tmp = bxe_prev_path_get_entry(sc); if (tmp) { if (tmp->aer) { BLOGD(sc, DBG_LOAD, "Path %d/%d/%d was marked by AER\n", sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); } else { rc = TRUE; BLOGD(sc, DBG_LOAD, "Path %d/%d/%d was already cleaned from previous drivers\n", sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); } } mtx_unlock(&bxe_prev_mtx); return (rc); } static int bxe_prev_mark_path(struct bxe_softc *sc, uint8_t after_undi) { struct bxe_prev_list_node *tmp; mtx_lock(&bxe_prev_mtx); /* Check whether the entry for this path already exists */ tmp = bxe_prev_path_get_entry(sc); if (tmp) { if (!tmp->aer) { BLOGD(sc, DBG_LOAD, "Re-marking AER in path %d/%d/%d\n", sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); } else { BLOGD(sc, DBG_LOAD, "Removing AER indication from path %d/%d/%d\n", sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); tmp->aer = 0; } mtx_unlock(&bxe_prev_mtx); return (0); } mtx_unlock(&bxe_prev_mtx); /* Create an entry for this path and add it */ tmp = malloc(sizeof(struct bxe_prev_list_node), M_DEVBUF, (M_NOWAIT | M_ZERO)); if (!tmp) { BLOGE(sc, "Failed to allocate 'bxe_prev_list_node'\n"); return (-1); } tmp->bus = sc->pcie_bus; tmp->slot = sc->pcie_device; tmp->path = SC_PATH(sc); tmp->aer = 0; tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0; mtx_lock(&bxe_prev_mtx); BLOGD(sc, DBG_LOAD, "Marked path %d/%d/%d - finished previous unload\n", sc->pcie_bus, sc->pcie_device, SC_PATH(sc)); LIST_INSERT_HEAD(&bxe_prev_list, tmp, node); mtx_unlock(&bxe_prev_mtx); return (0); } static int bxe_do_flr(struct bxe_softc *sc) { int i; /* only E2 and onwards support FLR */ if (CHIP_IS_E1x(sc)) { BLOGD(sc, DBG_LOAD, "FLR not supported in E1/E1H\n"); return (-1); } /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */ if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { BLOGD(sc, DBG_LOAD, "FLR not supported by BC_VER: 0x%08x\n", sc->devinfo.bc_ver); return (-1); } /* Wait for Transaction Pending bit clean */ for (i = 0; i < 4; i++) { if (i) { DELAY(((1 << (i - 1)) * 100) * 1000); } if (!bxe_is_pcie_pending(sc)) { goto clear; } } BLOGE(sc, "PCIE transaction is not cleared, " "proceeding with reset anyway\n"); clear: BLOGD(sc, DBG_LOAD, "Initiating FLR\n"); bxe_fw_command(sc, DRV_MSG_CODE_INITIATE_FLR, 0); return (0); } struct bxe_mac_vals { uint32_t xmac_addr; uint32_t xmac_val; uint32_t emac_addr; uint32_t emac_val; uint32_t umac_addr; uint32_t umac_val; uint32_t bmac_addr; uint32_t bmac_val[2]; }; static void bxe_prev_unload_close_mac(struct bxe_softc *sc, struct bxe_mac_vals *vals) { uint32_t val, base_addr, offset, mask, reset_reg; uint8_t mac_stopped = FALSE; uint8_t port = SC_PORT(sc); uint32_t wb_data[2]; /* reset addresses as they also mark which values were changed */ vals->bmac_addr = 0; vals->umac_addr = 0; vals->xmac_addr = 0; vals->emac_addr = 0; reset_reg = REG_RD(sc, MISC_REG_RESET_REG_2); if (!CHIP_IS_E3(sc)) { val = REG_RD(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4); mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port; if ((mask & reset_reg) && val) { BLOGD(sc, DBG_LOAD, "Disable BMAC Rx\n"); base_addr = SC_PORT(sc) ? NIG_REG_INGRESS_BMAC1_MEM : NIG_REG_INGRESS_BMAC0_MEM; offset = CHIP_IS_E2(sc) ? BIGMAC2_REGISTER_BMAC_CONTROL : BIGMAC_REGISTER_BMAC_CONTROL; /* * use rd/wr since we cannot use dmae. This is safe * since MCP won't access the bus due to the request * to unload, and no function on the path can be * loaded at this time. */ wb_data[0] = REG_RD(sc, base_addr + offset); wb_data[1] = REG_RD(sc, base_addr + offset + 0x4); vals->bmac_addr = base_addr + offset; vals->bmac_val[0] = wb_data[0]; vals->bmac_val[1] = wb_data[1]; wb_data[0] &= ~ELINK_BMAC_CONTROL_RX_ENABLE; REG_WR(sc, vals->bmac_addr, wb_data[0]); REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]); } BLOGD(sc, DBG_LOAD, "Disable EMAC Rx\n"); vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc)*4; vals->emac_val = REG_RD(sc, vals->emac_addr); REG_WR(sc, vals->emac_addr, 0); mac_stopped = TRUE; } else { if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) { BLOGD(sc, DBG_LOAD, "Disable XMAC Rx\n"); base_addr = SC_PORT(sc) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; val = REG_RD(sc, base_addr + XMAC_REG_PFC_CTRL_HI); REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val & ~(1 << 1)); REG_WR(sc, base_addr + XMAC_REG_PFC_CTRL_HI, val | (1 << 1)); vals->xmac_addr = base_addr + XMAC_REG_CTRL; vals->xmac_val = REG_RD(sc, vals->xmac_addr); REG_WR(sc, vals->xmac_addr, 0); mac_stopped = TRUE; } mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port; if (mask & reset_reg) { BLOGD(sc, DBG_LOAD, "Disable UMAC Rx\n"); base_addr = SC_PORT(sc) ? GRCBASE_UMAC1 : GRCBASE_UMAC0; vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG; vals->umac_val = REG_RD(sc, vals->umac_addr); REG_WR(sc, vals->umac_addr, 0); mac_stopped = TRUE; } } if (mac_stopped) { DELAY(20000); } } #define BXE_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) #define BXE_PREV_UNDI_RCQ(val) ((val) & 0xffff) #define BXE_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff) #define BXE_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq)) static void bxe_prev_unload_undi_inc(struct bxe_softc *sc, uint8_t port, uint8_t inc) { uint16_t rcq, bd; uint32_t tmp_reg = REG_RD(sc, BXE_PREV_UNDI_PROD_ADDR(port)); rcq = BXE_PREV_UNDI_RCQ(tmp_reg) + inc; bd = BXE_PREV_UNDI_BD(tmp_reg) + inc; tmp_reg = BXE_PREV_UNDI_PROD(rcq, bd); REG_WR(sc, BXE_PREV_UNDI_PROD_ADDR(port), tmp_reg); BLOGD(sc, DBG_LOAD, "UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n", port, bd, rcq); } static int bxe_prev_unload_common(struct bxe_softc *sc) { uint32_t reset_reg, tmp_reg = 0, rc; uint8_t prev_undi = FALSE; struct bxe_mac_vals mac_vals; uint32_t timer_count = 1000; uint32_t prev_brb; /* * It is possible a previous function received 'common' answer, * but hasn't loaded yet, therefore creating a scenario of * multiple functions receiving 'common' on the same path. */ BLOGD(sc, DBG_LOAD, "Common unload Flow\n"); memset(&mac_vals, 0, sizeof(mac_vals)); if (bxe_prev_is_path_marked(sc)) { return (bxe_prev_mcp_done(sc)); } reset_reg = REG_RD(sc, MISC_REG_RESET_REG_1); /* Reset should be performed after BRB is emptied */ if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { /* Close the MAC Rx to prevent BRB from filling up */ bxe_prev_unload_close_mac(sc, &mac_vals); /* close LLH filters towards the BRB */ elink_set_rx_filter(&sc->link_params, 0); /* * Check if the UNDI driver was previously loaded. * UNDI driver initializes CID offset for normal bell to 0x7 */ if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) { tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST); if (tmp_reg == 0x7) { BLOGD(sc, DBG_LOAD, "UNDI previously loaded\n"); prev_undi = TRUE; /* clear the UNDI indication */ REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0); /* clear possible idle check errors */ REG_RD(sc, NIG_REG_NIG_INT_STS_CLR_0); } } /* wait until BRB is empty */ tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS); while (timer_count) { prev_brb = tmp_reg; tmp_reg = REG_RD(sc, BRB1_REG_NUM_OF_FULL_BLOCKS); if (!tmp_reg) { break; } BLOGD(sc, DBG_LOAD, "BRB still has 0x%08x\n", tmp_reg); /* reset timer as long as BRB actually gets emptied */ if (prev_brb > tmp_reg) { timer_count = 1000; } else { timer_count--; } /* If UNDI resides in memory, manually increment it */ if (prev_undi) { bxe_prev_unload_undi_inc(sc, SC_PORT(sc), 1); } DELAY(10); } if (!timer_count) { BLOGE(sc, "Failed to empty BRB\n"); } } /* No packets are in the pipeline, path is ready for reset */ bxe_reset_common(sc); if (mac_vals.xmac_addr) { REG_WR(sc, mac_vals.xmac_addr, mac_vals.xmac_val); } if (mac_vals.umac_addr) { REG_WR(sc, mac_vals.umac_addr, mac_vals.umac_val); } if (mac_vals.emac_addr) { REG_WR(sc, mac_vals.emac_addr, mac_vals.emac_val); } if (mac_vals.bmac_addr) { REG_WR(sc, mac_vals.bmac_addr, mac_vals.bmac_val[0]); REG_WR(sc, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]); } rc = bxe_prev_mark_path(sc, prev_undi); if (rc) { bxe_prev_mcp_done(sc); return (rc); } return (bxe_prev_mcp_done(sc)); } static int bxe_prev_unload_uncommon(struct bxe_softc *sc) { int rc; BLOGD(sc, DBG_LOAD, "Uncommon unload Flow\n"); /* Test if previous unload process was already finished for this path */ if (bxe_prev_is_path_marked(sc)) { return (bxe_prev_mcp_done(sc)); } BLOGD(sc, DBG_LOAD, "Path is unmarked\n"); /* * If function has FLR capabilities, and existing FW version matches * the one required, then FLR will be sufficient to clean any residue * left by previous driver */ rc = bxe_nic_load_analyze_req(sc, FW_MSG_CODE_DRV_LOAD_FUNCTION); if (!rc) { /* fw version is good */ BLOGD(sc, DBG_LOAD, "FW version matches our own, attempting FLR\n"); rc = bxe_do_flr(sc); } if (!rc) { /* FLR was performed */ BLOGD(sc, DBG_LOAD, "FLR successful\n"); return (0); } BLOGD(sc, DBG_LOAD, "Could not FLR\n"); /* Close the MCP request, return failure*/ rc = bxe_prev_mcp_done(sc); if (!rc) { rc = BXE_PREV_WAIT_NEEDED; } return (rc); } static int bxe_prev_unload(struct bxe_softc *sc) { int time_counter = 10; uint32_t fw, hw_lock_reg, hw_lock_val; uint32_t rc = 0; /* * Clear HW from errors which may have resulted from an interrupted * DMAE transaction. */ bxe_prev_interrupted_dmae(sc); /* Release previously held locks */ hw_lock_reg = (SC_FUNC(sc) <= 5) ? (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) : (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8); hw_lock_val = (REG_RD(sc, hw_lock_reg)); if (hw_lock_val) { if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) { BLOGD(sc, DBG_LOAD, "Releasing previously held NVRAM lock\n"); REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB, (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc))); } BLOGD(sc, DBG_LOAD, "Releasing previously held HW lock\n"); REG_WR(sc, hw_lock_reg, 0xffffffff); } else { BLOGD(sc, DBG_LOAD, "No need to release HW/NVRAM locks\n"); } if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) { BLOGD(sc, DBG_LOAD, "Releasing previously held ALR\n"); REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0); } do { /* Lock MCP using an unload request */ fw = bxe_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0); if (!fw) { BLOGE(sc, "MCP response failure, aborting\n"); rc = -1; break; } if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) { rc = bxe_prev_unload_common(sc); break; } /* non-common reply from MCP night require looping */ rc = bxe_prev_unload_uncommon(sc); if (rc != BXE_PREV_WAIT_NEEDED) { break; } DELAY(20000); } while (--time_counter); if (!time_counter || rc) { BLOGE(sc, "Failed to unload previous driver!" " time_counter %d rc %d\n", time_counter, rc); rc = -1; } return (rc); } void bxe_dcbx_set_state(struct bxe_softc *sc, uint8_t dcb_on, uint32_t dcbx_enabled) { if (!CHIP_IS_E1x(sc)) { sc->dcb_state = dcb_on; sc->dcbx_enabled = dcbx_enabled; } else { sc->dcb_state = FALSE; sc->dcbx_enabled = BXE_DCBX_ENABLED_INVALID; } BLOGD(sc, DBG_LOAD, "DCB state [%s:%s]\n", dcb_on ? "ON" : "OFF", (dcbx_enabled == BXE_DCBX_ENABLED_OFF) ? "user-mode" : (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" : (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_ON) ? "on-chip with negotiation" : "invalid"); } /* must be called after sriov-enable */ static int bxe_set_qm_cid_count(struct bxe_softc *sc) { int cid_count = BXE_L2_MAX_CID(sc); if (IS_SRIOV(sc)) { cid_count += BXE_VF_CIDS; } if (CNIC_SUPPORT(sc)) { cid_count += CNIC_CID_MAX; } return (roundup(cid_count, QM_CID_ROUND)); } static void bxe_init_multi_cos(struct bxe_softc *sc) { int pri, cos; uint32_t pri_map = 0; /* XXX change to user config */ for (pri = 0; pri < BXE_MAX_PRIORITY; pri++) { cos = ((pri_map & (0xf << (pri * 4))) >> (pri * 4)); if (cos < sc->max_cos) { sc->prio_to_cos[pri] = cos; } else { BLOGW(sc, "Invalid COS %d for priority %d " "(max COS is %d), setting to 0\n", cos, pri, (sc->max_cos - 1)); sc->prio_to_cos[pri] = 0; } } } static int bxe_sysctl_state(SYSCTL_HANDLER_ARGS) { struct bxe_softc *sc; int error, result; result = 0; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) { return (error); } if (result == 1) { uint32_t temp; sc = (struct bxe_softc *)arg1; BLOGI(sc, "... dumping driver state ...\n"); temp = SHMEM2_RD(sc, temperature_in_half_celsius); BLOGI(sc, "\t Device Temperature = %d Celsius\n", (temp/2)); } return (error); } static int bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS) { struct bxe_softc *sc = (struct bxe_softc *)arg1; uint32_t *eth_stats = (uint32_t *)&sc->eth_stats; uint32_t *offset; uint64_t value = 0; int index = (int)arg2; if (index >= BXE_NUM_ETH_STATS) { BLOGE(sc, "bxe_eth_stats index out of range (%d)\n", index); return (-1); } offset = (eth_stats + bxe_eth_stats_arr[index].offset); switch (bxe_eth_stats_arr[index].size) { case 4: value = (uint64_t)*offset; break; case 8: value = HILO_U64(*offset, *(offset + 1)); break; default: BLOGE(sc, "Invalid bxe_eth_stats size (index=%d size=%d)\n", index, bxe_eth_stats_arr[index].size); return (-1); } return (sysctl_handle_64(oidp, &value, 0, req)); } static int bxe_sysctl_eth_q_stat(SYSCTL_HANDLER_ARGS) { struct bxe_softc *sc = (struct bxe_softc *)arg1; uint32_t *eth_stats; uint32_t *offset; uint64_t value = 0; uint32_t q_stat = (uint32_t)arg2; uint32_t fp_index = ((q_stat >> 16) & 0xffff); uint32_t index = (q_stat & 0xffff); eth_stats = (uint32_t *)&sc->fp[fp_index].eth_q_stats; if (index >= BXE_NUM_ETH_Q_STATS) { BLOGE(sc, "bxe_eth_q_stats index out of range (%d)\n", index); return (-1); } offset = (eth_stats + bxe_eth_q_stats_arr[index].offset); switch (bxe_eth_q_stats_arr[index].size) { case 4: value = (uint64_t)*offset; break; case 8: value = HILO_U64(*offset, *(offset + 1)); break; default: BLOGE(sc, "Invalid bxe_eth_q_stats size (index=%d size=%d)\n", index, bxe_eth_q_stats_arr[index].size); return (-1); } return (sysctl_handle_64(oidp, &value, 0, req)); } static void bxe_force_link_reset(struct bxe_softc *sc) { bxe_acquire_phy_lock(sc); elink_link_reset(&sc->link_params, &sc->link_vars, 1); bxe_release_phy_lock(sc); } static int bxe_sysctl_pauseparam(SYSCTL_HANDLER_ARGS) { struct bxe_softc *sc = (struct bxe_softc *)arg1;; uint32_t cfg_idx = bxe_get_link_cfg_idx(sc); int rc = 0; int error; int result; error = sysctl_handle_int(oidp, &sc->bxe_pause_param, 0, req); if (error || !req->newptr) { return (error); } if ((sc->bxe_pause_param < 0) || (sc->bxe_pause_param > 8)) { BLOGW(sc, "invalid pause param (%d) - use intergers between 1 & 8\n",sc->bxe_pause_param); sc->bxe_pause_param = 8; } result = (sc->bxe_pause_param << PORT_FEATURE_FLOW_CONTROL_SHIFT); if((result & 0x400) && !(sc->port.supported[cfg_idx] & ELINK_SUPPORTED_Autoneg)) { BLOGW(sc, "Does not support Autoneg pause_param %d\n", sc->bxe_pause_param); return -EINVAL; } if(IS_MF(sc)) return 0; sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_AUTO; if(result & ELINK_FLOW_CTRL_RX) sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_RX; if(result & ELINK_FLOW_CTRL_TX) sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_TX; if(sc->link_params.req_flow_ctrl[cfg_idx] == ELINK_FLOW_CTRL_AUTO) sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_NONE; if(result & 0x400) { if (sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG) { sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_AUTO; } sc->link_params.req_fc_auto_adv = 0; if (result & ELINK_FLOW_CTRL_RX) sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_RX; if (result & ELINK_FLOW_CTRL_TX) sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_TX; if (!sc->link_params.req_fc_auto_adv) sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_NONE; } if (IS_PF(sc)) { if (sc->link_vars.link_up) { bxe_stats_handle(sc, STATS_EVENT_STOP); } if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) { bxe_force_link_reset(sc); bxe_acquire_phy_lock(sc); rc = elink_phy_init(&sc->link_params, &sc->link_vars); bxe_release_phy_lock(sc); bxe_calc_fc_adv(sc); } } return rc; } static void bxe_add_sysctls(struct bxe_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; struct sysctl_oid *queue_top, *queue; struct sysctl_oid_list *queue_top_children, *queue_children; char queue_num_buf[32]; uint32_t q_stat; int i, j; ctx = device_get_sysctl_ctx(sc->dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "version", CTLFLAG_RD, BXE_DRIVER_VERSION, 0, "version"); snprintf(sc->fw_ver_str, sizeof(sc->fw_ver_str), "%d.%d.%d.%d", BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION, BCM_5710_FW_REVISION_VERSION, BCM_5710_FW_ENGINEERING_VERSION); snprintf(sc->mf_mode_str, sizeof(sc->mf_mode_str), "%s", ((sc->devinfo.mf_info.mf_mode == SINGLE_FUNCTION) ? "Single" : (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD) ? "MF-SD" : (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI) ? "MF-SI" : (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX) ? "MF-AFEX" : "Unknown")); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "mf_vnics", CTLFLAG_RD, &sc->devinfo.mf_info.vnics_per_port, 0, "multifunction vnics per port"); snprintf(sc->pci_link_str, sizeof(sc->pci_link_str), "%s x%d", ((sc->devinfo.pcie_link_speed == 1) ? "2.5GT/s" : (sc->devinfo.pcie_link_speed == 2) ? "5.0GT/s" : (sc->devinfo.pcie_link_speed == 4) ? "8.0GT/s" : "???GT/s"), sc->devinfo.pcie_link_width); sc->debug = bxe_debug; #if __FreeBSD_version >= 900000 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version", CTLFLAG_RD, sc->devinfo.bc_ver_str, 0, "bootcode version"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version", CTLFLAG_RD, sc->fw_ver_str, 0, "firmware version"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode", CTLFLAG_RD, sc->mf_mode_str, 0, "multifunction mode"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr", CTLFLAG_RD, sc->mac_addr_str, 0, "mac address"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link", CTLFLAG_RD, sc->pci_link_str, 0, "pci link status"); SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "debug", CTLFLAG_RW, &sc->debug, "debug logging mode"); #else SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bc_version", CTLFLAG_RD, &sc->devinfo.bc_ver_str, 0, "bootcode version"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "fw_version", CTLFLAG_RD, &sc->fw_ver_str, 0, "firmware version"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mf_mode", CTLFLAG_RD, &sc->mf_mode_str, 0, "multifunction mode"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "mac_addr", CTLFLAG_RD, &sc->mac_addr_str, 0, "mac address"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pci_link", CTLFLAG_RD, &sc->pci_link_str, 0, "pci link status"); SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "debug", CTLFLAG_RW, &sc->debug, 0, "debug logging mode"); #endif /* #if __FreeBSD_version >= 900000 */ sc->trigger_grcdump = 0; SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "trigger_grcdump", CTLFLAG_RW, &sc->trigger_grcdump, 0, "trigger grcdump should be invoked" " before collecting grcdump"); sc->grcdump_started = 0; sc->grcdump_done = 0; SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "grcdump_done", CTLFLAG_RD, &sc->grcdump_done, 0, "set by driver when grcdump is done"); sc->rx_budget = bxe_rx_budget; SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_budget", CTLFLAG_RW, &sc->rx_budget, 0, "rx processing budget"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_param", CTLTYPE_UINT | CTLFLAG_RW, sc, 0, bxe_sysctl_pauseparam, "IU", "need pause frames- DEF:0/TX:1/RX:2/BOTH:3/AUTO:4/AUTOTX:5/AUTORX:6/AUTORXTX:7/NONE:8"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "state", CTLTYPE_UINT | CTLFLAG_RW, sc, 0, bxe_sysctl_state, "IU", "dump driver state"); for (i = 0; i < BXE_NUM_ETH_STATS; i++) { SYSCTL_ADD_PROC(ctx, children, OID_AUTO, bxe_eth_stats_arr[i].string, CTLTYPE_U64 | CTLFLAG_RD, sc, i, bxe_sysctl_eth_stat, "LU", bxe_eth_stats_arr[i].string); } /* add a new parent node for all queues "dev.bxe.#.queue" */ queue_top = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "queue", CTLFLAG_RD, NULL, "queue"); queue_top_children = SYSCTL_CHILDREN(queue_top); for (i = 0; i < sc->num_queues; i++) { /* add a new parent node for a single queue "dev.bxe.#.queue.#" */ snprintf(queue_num_buf, sizeof(queue_num_buf), "%d", i); queue = SYSCTL_ADD_NODE(ctx, queue_top_children, OID_AUTO, queue_num_buf, CTLFLAG_RD, NULL, "single queue"); queue_children = SYSCTL_CHILDREN(queue); for (j = 0; j < BXE_NUM_ETH_Q_STATS; j++) { q_stat = ((i << 16) | j); SYSCTL_ADD_PROC(ctx, queue_children, OID_AUTO, bxe_eth_q_stats_arr[j].string, CTLTYPE_U64 | CTLFLAG_RD, sc, q_stat, bxe_sysctl_eth_q_stat, "LU", bxe_eth_q_stats_arr[j].string); } } } static int bxe_alloc_buf_rings(struct bxe_softc *sc) { #if __FreeBSD_version >= 901504 int i; struct bxe_fastpath *fp; for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF, M_NOWAIT, &fp->tx_mtx); if (fp->tx_br == NULL) return (-1); } #endif return (0); } static void bxe_free_buf_rings(struct bxe_softc *sc) { #if __FreeBSD_version >= 901504 int i; struct bxe_fastpath *fp; for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; if (fp->tx_br) { buf_ring_free(fp->tx_br, M_DEVBUF); fp->tx_br = NULL; } } #endif } static void bxe_init_fp_mutexs(struct bxe_softc *sc) { int i; struct bxe_fastpath *fp; for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name), "bxe%d_fp%d_tx_lock", sc->unit, i); mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF); snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name), "bxe%d_fp%d_rx_lock", sc->unit, i); mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF); } } static void bxe_destroy_fp_mutexs(struct bxe_softc *sc) { int i; struct bxe_fastpath *fp; for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; if (mtx_initialized(&fp->tx_mtx)) { mtx_destroy(&fp->tx_mtx); } if (mtx_initialized(&fp->rx_mtx)) { mtx_destroy(&fp->rx_mtx); } } } /* * Device attach function. * * Allocates device resources, performs secondary chip identification, and * initializes driver instance variables. This function is called from driver * load after a successful probe. * * Returns: * 0 = Success, >0 = Failure */ static int bxe_attach(device_t dev) { struct bxe_softc *sc; sc = device_get_softc(dev); BLOGD(sc, DBG_LOAD, "Starting attach...\n"); sc->state = BXE_STATE_CLOSED; sc->dev = dev; sc->unit = device_get_unit(dev); BLOGD(sc, DBG_LOAD, "softc = %p\n", sc); sc->pcie_bus = pci_get_bus(dev); sc->pcie_device = pci_get_slot(dev); sc->pcie_func = pci_get_function(dev); /* enable bus master capability */ pci_enable_busmaster(dev); /* get the BARs */ if (bxe_allocate_bars(sc) != 0) { return (ENXIO); } /* initialize the mutexes */ bxe_init_mutexes(sc); /* prepare the periodic callout */ callout_init(&sc->periodic_callout, 0); /* prepare the chip taskqueue */ sc->chip_tq_flags = CHIP_TQ_NONE; snprintf(sc->chip_tq_name, sizeof(sc->chip_tq_name), "bxe%d_chip_tq", sc->unit); TASK_INIT(&sc->chip_tq_task, 0, bxe_handle_chip_tq, sc); sc->chip_tq = taskqueue_create(sc->chip_tq_name, M_NOWAIT, taskqueue_thread_enqueue, &sc->chip_tq); taskqueue_start_threads(&sc->chip_tq, 1, PWAIT, /* lower priority */ "%s", sc->chip_tq_name); TIMEOUT_TASK_INIT(taskqueue_thread, &sc->sp_err_timeout_task, 0, bxe_sp_err_timeout_task, sc); /* get device info and set params */ if (bxe_get_device_info(sc) != 0) { BLOGE(sc, "getting device info\n"); bxe_deallocate_bars(sc); pci_disable_busmaster(dev); return (ENXIO); } /* get final misc params */ bxe_get_params(sc); /* set the default MTU (changed via ifconfig) */ sc->mtu = ETHERMTU; bxe_set_modes_bitmap(sc); /* XXX * If in AFEX mode and the function is configured for FCoE * then bail... no L2 allowed. */ /* get phy settings from shmem and 'and' against admin settings */ bxe_get_phy_info(sc); /* initialize the FreeBSD ifnet interface */ if (bxe_init_ifnet(sc) != 0) { bxe_release_mutexes(sc); bxe_deallocate_bars(sc); pci_disable_busmaster(dev); return (ENXIO); } if (bxe_add_cdev(sc) != 0) { if (sc->ifp != NULL) { ether_ifdetach(sc->ifp); } ifmedia_removeall(&sc->ifmedia); bxe_release_mutexes(sc); bxe_deallocate_bars(sc); pci_disable_busmaster(dev); return (ENXIO); } /* allocate device interrupts */ if (bxe_interrupt_alloc(sc) != 0) { bxe_del_cdev(sc); if (sc->ifp != NULL) { ether_ifdetach(sc->ifp); } ifmedia_removeall(&sc->ifmedia); bxe_release_mutexes(sc); bxe_deallocate_bars(sc); pci_disable_busmaster(dev); return (ENXIO); } bxe_init_fp_mutexs(sc); if (bxe_alloc_buf_rings(sc) != 0) { bxe_free_buf_rings(sc); bxe_interrupt_free(sc); bxe_del_cdev(sc); if (sc->ifp != NULL) { ether_ifdetach(sc->ifp); } ifmedia_removeall(&sc->ifmedia); bxe_release_mutexes(sc); bxe_deallocate_bars(sc); pci_disable_busmaster(dev); return (ENXIO); } /* allocate ilt */ if (bxe_alloc_ilt_mem(sc) != 0) { bxe_free_buf_rings(sc); bxe_interrupt_free(sc); bxe_del_cdev(sc); if (sc->ifp != NULL) { ether_ifdetach(sc->ifp); } ifmedia_removeall(&sc->ifmedia); bxe_release_mutexes(sc); bxe_deallocate_bars(sc); pci_disable_busmaster(dev); return (ENXIO); } /* allocate the host hardware/software hsi structures */ if (bxe_alloc_hsi_mem(sc) != 0) { bxe_free_ilt_mem(sc); bxe_free_buf_rings(sc); bxe_interrupt_free(sc); bxe_del_cdev(sc); if (sc->ifp != NULL) { ether_ifdetach(sc->ifp); } ifmedia_removeall(&sc->ifmedia); bxe_release_mutexes(sc); bxe_deallocate_bars(sc); pci_disable_busmaster(dev); return (ENXIO); } /* need to reset chip if UNDI was active */ if (IS_PF(sc) && !BXE_NOMCP(sc)) { /* init fw_seq */ sc->fw_seq = (SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) & DRV_MSG_SEQ_NUMBER_MASK); BLOGD(sc, DBG_LOAD, "prev unload fw_seq 0x%04x\n", sc->fw_seq); bxe_prev_unload(sc); } #if 1 /* XXX */ bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF); #else if (SHMEM2_HAS(sc, dcbx_lldp_params_offset) && SHMEM2_HAS(sc, dcbx_lldp_dcbx_stat_offset) && SHMEM2_RD(sc, dcbx_lldp_params_offset) && SHMEM2_RD(sc, dcbx_lldp_dcbx_stat_offset)) { bxe_dcbx_set_state(sc, TRUE, BXE_DCBX_ENABLED_ON_NEG_ON); bxe_dcbx_init_params(sc); } else { bxe_dcbx_set_state(sc, FALSE, BXE_DCBX_ENABLED_OFF); } #endif /* calculate qm_cid_count */ sc->qm_cid_count = bxe_set_qm_cid_count(sc); BLOGD(sc, DBG_LOAD, "qm_cid_count=%d\n", sc->qm_cid_count); sc->max_cos = 1; bxe_init_multi_cos(sc); bxe_add_sysctls(sc); return (0); } /* * Device detach function. * * Stops the controller, resets the controller, and releases resources. * * Returns: * 0 = Success, >0 = Failure */ static int bxe_detach(device_t dev) { struct bxe_softc *sc; if_t ifp; sc = device_get_softc(dev); BLOGD(sc, DBG_LOAD, "Starting detach...\n"); ifp = sc->ifp; if (ifp != NULL && if_vlantrunkinuse(ifp)) { BLOGE(sc, "Cannot detach while VLANs are in use.\n"); return(EBUSY); } bxe_del_cdev(sc); /* stop the periodic callout */ bxe_periodic_stop(sc); /* stop the chip taskqueue */ atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_NONE); if (sc->chip_tq) { taskqueue_drain(sc->chip_tq, &sc->chip_tq_task); taskqueue_free(sc->chip_tq); sc->chip_tq = NULL; taskqueue_drain_timeout(taskqueue_thread, &sc->sp_err_timeout_task); } /* stop and reset the controller if it was open */ if (sc->state != BXE_STATE_CLOSED) { BXE_CORE_LOCK(sc); bxe_nic_unload(sc, UNLOAD_CLOSE, TRUE); sc->state = BXE_STATE_DISABLED; BXE_CORE_UNLOCK(sc); } /* release the network interface */ if (ifp != NULL) { ether_ifdetach(ifp); } ifmedia_removeall(&sc->ifmedia); /* XXX do the following based on driver state... */ /* free the host hardware/software hsi structures */ bxe_free_hsi_mem(sc); /* free ilt */ bxe_free_ilt_mem(sc); bxe_free_buf_rings(sc); /* release the interrupts */ bxe_interrupt_free(sc); /* Release the mutexes*/ bxe_destroy_fp_mutexs(sc); bxe_release_mutexes(sc); /* Release the PCIe BAR mapped memory */ bxe_deallocate_bars(sc); /* Release the FreeBSD interface. */ if (sc->ifp != NULL) { if_free(sc->ifp); } pci_disable_busmaster(dev); return (0); } /* * Device shutdown function. * * Stops and resets the controller. * * Returns: * Nothing */ static int bxe_shutdown(device_t dev) { struct bxe_softc *sc; sc = device_get_softc(dev); BLOGD(sc, DBG_LOAD, "Starting shutdown...\n"); /* stop the periodic callout */ bxe_periodic_stop(sc); if (sc->state != BXE_STATE_CLOSED) { BXE_CORE_LOCK(sc); bxe_nic_unload(sc, UNLOAD_NORMAL, FALSE); BXE_CORE_UNLOCK(sc); } return (0); } void bxe_igu_ack_sb(struct bxe_softc *sc, uint8_t igu_sb_id, uint8_t segment, uint16_t index, uint8_t op, uint8_t update) { uint32_t igu_addr = sc->igu_base_addr; igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8; bxe_igu_ack_sb_gen(sc, igu_sb_id, segment, index, op, update, igu_addr); } static void bxe_igu_clear_sb_gen(struct bxe_softc *sc, uint8_t func, uint8_t idu_sb_id, uint8_t is_pf) { uint32_t data, ctl, cnt = 100; uint32_t igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA; uint32_t igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL; uint32_t igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4; uint32_t sb_bit = 1 << (idu_sb_id%32); uint32_t func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT; uint32_t addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id; /* Not supported in BC mode */ if (CHIP_INT_MODE_IS_BC(sc)) { return; } data = ((IGU_USE_REGISTER_cstorm_type_0_sb_cleanup << IGU_REGULAR_CLEANUP_TYPE_SHIFT) | IGU_REGULAR_CLEANUP_SET | IGU_REGULAR_BCLEANUP); ctl = ((addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT) | (func_encode << IGU_CTRL_REG_FID_SHIFT) | (IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT)); BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n", data, igu_addr_data); REG_WR(sc, igu_addr_data, data); bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, BUS_SPACE_BARRIER_WRITE); mb(); BLOGD(sc, DBG_LOAD, "write 0x%08x to IGU(via GRC) addr 0x%x\n", ctl, igu_addr_ctl); REG_WR(sc, igu_addr_ctl, ctl); bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0, BUS_SPACE_BARRIER_WRITE); mb(); /* wait for clean up to finish */ while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) { DELAY(20000); } if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) { BLOGD(sc, DBG_LOAD, "Unable to finish IGU cleanup: " "idu_sb_id %d offset %d bit %d (cnt %d)\n", idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt); } } static void bxe_igu_clear_sb(struct bxe_softc *sc, uint8_t idu_sb_id) { bxe_igu_clear_sb_gen(sc, SC_FUNC(sc), idu_sb_id, TRUE /*PF*/); } /*******************/ /* ECORE CALLBACKS */ /*******************/ static void bxe_reset_common(struct bxe_softc *sc) { uint32_t val = 0x1400; /* reset_common */ REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR), 0xd3ffff7f); if (CHIP_IS_E3(sc)) { val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; } REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR), val); } static void bxe_common_init_phy(struct bxe_softc *sc) { uint32_t shmem_base[2]; uint32_t shmem2_base[2]; /* Avoid common init in case MFW supports LFA */ if (SHMEM2_RD(sc, size) > (uint32_t)offsetof(struct shmem2_region, lfa_host_addr[SC_PORT(sc)])) { return; } shmem_base[0] = sc->devinfo.shmem_base; shmem2_base[0] = sc->devinfo.shmem2_base; if (!CHIP_IS_E1x(sc)) { shmem_base[1] = SHMEM2_RD(sc, other_shmem_base_addr); shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr); } bxe_acquire_phy_lock(sc); elink_common_init_phy(sc, shmem_base, shmem2_base, sc->devinfo.chip_id, 0); bxe_release_phy_lock(sc); } static void bxe_pf_disable(struct bxe_softc *sc) { uint32_t val = REG_RD(sc, IGU_REG_PF_CONFIGURATION); val &= ~IGU_PF_CONF_FUNC_EN; REG_WR(sc, IGU_REG_PF_CONFIGURATION, val); REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 0); } static void bxe_init_pxp(struct bxe_softc *sc) { uint16_t devctl; int r_order, w_order; devctl = bxe_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_CTL, 2); BLOGD(sc, DBG_LOAD, "read 0x%08x from devctl\n", devctl); w_order = ((devctl & PCIM_EXP_CTL_MAX_PAYLOAD) >> 5); if (sc->mrrs == -1) { r_order = ((devctl & PCIM_EXP_CTL_MAX_READ_REQUEST) >> 12); } else { BLOGD(sc, DBG_LOAD, "forcing read order to %d\n", sc->mrrs); r_order = sc->mrrs; } ecore_init_pxp_arb(sc, r_order, w_order); } static uint32_t bxe_get_pretend_reg(struct bxe_softc *sc) { uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0; uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base); return (base + (SC_ABS_FUNC(sc)) * stride); } /* * Called only on E1H or E2. * When pretending to be PF, the pretend value is the function number 0..7. * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID * combination. */ static int bxe_pretend_func(struct bxe_softc *sc, uint16_t pretend_func_val) { uint32_t pretend_reg; if (CHIP_IS_E1H(sc) && (pretend_func_val > E1H_FUNC_MAX)) { return (-1); } /* get my own pretend register */ pretend_reg = bxe_get_pretend_reg(sc); REG_WR(sc, pretend_reg, pretend_func_val); REG_RD(sc, pretend_reg); return (0); } static void bxe_iov_init_dmae(struct bxe_softc *sc) { return; } static void bxe_iov_init_dq(struct bxe_softc *sc) { return; } /* send a NIG loopback debug packet */ static void bxe_lb_pckt(struct bxe_softc *sc) { uint32_t wb_write[3]; /* Ethernet source and destination addresses */ wb_write[0] = 0x55555555; wb_write[1] = 0x55555555; wb_write[2] = 0x20; /* SOP */ REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); /* NON-IP protocol */ wb_write[0] = 0x09000000; wb_write[1] = 0x55555555; wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */ REG_WR_DMAE(sc, NIG_REG_DEBUG_PACKET_LB, wb_write, 3); } /* * Some of the internal memories are not directly readable from the driver. * To test them we send debug packets. */ static int bxe_int_mem_test(struct bxe_softc *sc) { int factor; int count, i; uint32_t val = 0; if (CHIP_REV_IS_FPGA(sc)) { factor = 120; } else if (CHIP_REV_IS_EMUL(sc)) { factor = 200; } else { factor = 1; } /* disable inputs of parser neighbor blocks */ REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0); REG_WR(sc, TCM_REG_PRS_IFEN, 0x0); REG_WR(sc, CFC_REG_DEBUG0, 0x1); REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0); /* write 0 to parser credits for CFC search request */ REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); /* send Ethernet packet */ bxe_lb_pckt(sc); /* TODO do i reset NIG statistic? */ /* Wait until NIG register shows 1 packet of size 0x10 */ count = 1000 * factor; while (count) { bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); val = *BXE_SP(sc, wb_data[0]); if (val == 0x10) { break; } DELAY(10000); count--; } if (val != 0x10) { BLOGE(sc, "NIG timeout val=0x%x\n", val); return (-1); } /* wait until PRS register shows 1 packet */ count = (1000 * factor); while (count) { val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); if (val == 1) { break; } DELAY(10000); count--; } if (val != 0x1) { BLOGE(sc, "PRS timeout val=0x%x\n", val); return (-2); } /* Reset and init BRB, PRS */ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); DELAY(50000); REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); DELAY(50000); ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); /* Disable inputs of parser neighbor blocks */ REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x0); REG_WR(sc, TCM_REG_PRS_IFEN, 0x0); REG_WR(sc, CFC_REG_DEBUG0, 0x1); REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x0); /* Write 0 to parser credits for CFC search request */ REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0); /* send 10 Ethernet packets */ for (i = 0; i < 10; i++) { bxe_lb_pckt(sc); } /* Wait until NIG register shows 10+1 packets of size 11*0x10 = 0xb0 */ count = (1000 * factor); while (count) { bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); val = *BXE_SP(sc, wb_data[0]); if (val == 0xb0) { break; } DELAY(10000); count--; } if (val != 0xb0) { BLOGE(sc, "NIG timeout val=0x%x\n", val); return (-3); } /* Wait until PRS register shows 2 packets */ val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); if (val != 2) { BLOGE(sc, "PRS timeout val=0x%x\n", val); } /* Write 1 to parser credits for CFC search request */ REG_WR(sc, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1); /* Wait until PRS register shows 3 packets */ DELAY(10000 * factor); /* Wait until NIG register shows 1 packet of size 0x10 */ val = REG_RD(sc, PRS_REG_NUM_OF_PACKETS); if (val != 3) { BLOGE(sc, "PRS timeout val=0x%x\n", val); } /* clear NIG EOP FIFO */ for (i = 0; i < 11; i++) { REG_RD(sc, NIG_REG_INGRESS_EOP_LB_FIFO); } val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY); if (val != 1) { BLOGE(sc, "clear of NIG failed val=0x%x\n", val); return (-4); } /* Reset and init BRB, PRS, NIG */ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03); DELAY(50000); REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03); DELAY(50000); ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); if (!CNIC_SUPPORT(sc)) { /* set NIC mode */ REG_WR(sc, PRS_REG_NIC_MODE, 1); } /* Enable inputs of parser neighbor blocks */ REG_WR(sc, TSDM_REG_ENABLE_IN1, 0x7fffffff); REG_WR(sc, TCM_REG_PRS_IFEN, 0x1); REG_WR(sc, CFC_REG_DEBUG0, 0x0); REG_WR(sc, NIG_REG_PRS_REQ_IN_EN, 0x1); return (0); } static void bxe_setup_fan_failure_detection(struct bxe_softc *sc) { int is_required; uint32_t val; int port; is_required = 0; val = (SHMEM_RD(sc, dev_info.shared_hw_config.config2) & SHARED_HW_CFG_FAN_FAILURE_MASK); if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED) { is_required = 1; } /* * The fan failure mechanism is usually related to the PHY type since * the power consumption of the board is affected by the PHY. Currently, * fan is required for most designs with SFX7101, BCM8727 and BCM8481. */ else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE) { for (port = PORT_0; port < PORT_MAX; port++) { is_required |= elink_fan_failure_det_req(sc, sc->devinfo.shmem_base, sc->devinfo.shmem2_base, port); } } BLOGD(sc, DBG_LOAD, "fan detection setting: %d\n", is_required); if (is_required == 0) { return; } /* Fan failure is indicated by SPIO 5 */ bxe_set_spio(sc, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z); /* set to active low mode */ val = REG_RD(sc, MISC_REG_SPIO_INT); val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS); REG_WR(sc, MISC_REG_SPIO_INT, val); /* enable interrupt to signal the IGU */ val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN); val |= MISC_SPIO_SPIO5; REG_WR(sc, MISC_REG_SPIO_EVENT_EN, val); } static void bxe_enable_blocks_attention(struct bxe_softc *sc) { uint32_t val; REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0); if (!CHIP_IS_E1x(sc)) { REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0x40); } else { REG_WR(sc, PXP_REG_PXP_INT_MASK_1, 0); } REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0); REG_WR(sc, CFC_REG_CFC_INT_MASK, 0); /* * mask read length error interrupts in brb for parser * (parsing unit and 'checksum and crc' unit) * these errors are legal (PU reads fixed length and CAC can cause * read length error on truncated packets) */ REG_WR(sc, BRB1_REG_BRB1_INT_MASK, 0xFC00); REG_WR(sc, QM_REG_QM_INT_MASK, 0); REG_WR(sc, TM_REG_TM_INT_MASK, 0); REG_WR(sc, XSDM_REG_XSDM_INT_MASK_0, 0); REG_WR(sc, XSDM_REG_XSDM_INT_MASK_1, 0); REG_WR(sc, XCM_REG_XCM_INT_MASK, 0); /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_0, 0); */ /* REG_WR(sc, XSEM_REG_XSEM_INT_MASK_1, 0); */ REG_WR(sc, USDM_REG_USDM_INT_MASK_0, 0); REG_WR(sc, USDM_REG_USDM_INT_MASK_1, 0); REG_WR(sc, UCM_REG_UCM_INT_MASK, 0); /* REG_WR(sc, USEM_REG_USEM_INT_MASK_0, 0); */ /* REG_WR(sc, USEM_REG_USEM_INT_MASK_1, 0); */ REG_WR(sc, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0); REG_WR(sc, CSDM_REG_CSDM_INT_MASK_0, 0); REG_WR(sc, CSDM_REG_CSDM_INT_MASK_1, 0); REG_WR(sc, CCM_REG_CCM_INT_MASK, 0); /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_0, 0); */ /* REG_WR(sc, CSEM_REG_CSEM_INT_MASK_1, 0); */ val = (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN); if (!CHIP_IS_E1x(sc)) { val |= (PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED); } REG_WR(sc, PXP2_REG_PXP2_INT_MASK_0, val); REG_WR(sc, TSDM_REG_TSDM_INT_MASK_0, 0); REG_WR(sc, TSDM_REG_TSDM_INT_MASK_1, 0); REG_WR(sc, TCM_REG_TCM_INT_MASK, 0); /* REG_WR(sc, TSEM_REG_TSEM_INT_MASK_0, 0); */ if (!CHIP_IS_E1x(sc)) { /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */ REG_WR(sc, TSEM_REG_TSEM_INT_MASK_1, 0x07ff); } REG_WR(sc, CDU_REG_CDU_INT_MASK, 0); REG_WR(sc, DMAE_REG_DMAE_INT_MASK, 0); /* REG_WR(sc, MISC_REG_MISC_INT_MASK, 0); */ REG_WR(sc, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */ } /** * bxe_init_hw_common - initialize the HW at the COMMON phase. * * @sc: driver handle */ static int bxe_init_hw_common(struct bxe_softc *sc) { uint8_t abs_func_id; uint32_t val; BLOGD(sc, DBG_LOAD, "starting common init for func %d\n", SC_ABS_FUNC(sc)); /* * take the RESET lock to protect undi_unload flow from accessing * registers while we are resetting the chip */ bxe_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET); bxe_reset_common(sc); REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET), 0xffffffff); val = 0xfffc; if (CHIP_IS_E3(sc)) { val |= MISC_REGISTERS_RESET_REG_2_MSTAT0; val |= MISC_REGISTERS_RESET_REG_2_MSTAT1; } REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET), val); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET); ecore_init_block(sc, BLOCK_MISC, PHASE_COMMON); BLOGD(sc, DBG_LOAD, "after misc block init\n"); if (!CHIP_IS_E1x(sc)) { /* * 4-port mode or 2-port mode we need to turn off master-enable for * everyone. After that we turn it back on for self. So, we disregard * multi-function, and always disable all functions on the given path, * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1 */ for (abs_func_id = SC_PATH(sc); abs_func_id < (E2_FUNC_MAX * 2); abs_func_id += 2) { if (abs_func_id == SC_ABS_FUNC(sc)) { REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); continue; } bxe_pretend_func(sc, abs_func_id); /* clear pf enable */ bxe_pf_disable(sc); bxe_pretend_func(sc, SC_ABS_FUNC(sc)); } } BLOGD(sc, DBG_LOAD, "after pf disable\n"); ecore_init_block(sc, BLOCK_PXP, PHASE_COMMON); if (CHIP_IS_E1(sc)) { /* * enable HW interrupt from PXP on USDM overflow * bit 16 on INT_MASK_0 */ REG_WR(sc, PXP_REG_PXP_INT_MASK_0, 0); } ecore_init_block(sc, BLOCK_PXP2, PHASE_COMMON); bxe_init_pxp(sc); #ifdef __BIG_ENDIAN REG_WR(sc, PXP2_REG_RQ_QM_ENDIAN_M, 1); REG_WR(sc, PXP2_REG_RQ_TM_ENDIAN_M, 1); REG_WR(sc, PXP2_REG_RQ_SRC_ENDIAN_M, 1); REG_WR(sc, PXP2_REG_RQ_CDU_ENDIAN_M, 1); REG_WR(sc, PXP2_REG_RQ_DBG_ENDIAN_M, 1); /* make sure this value is 0 */ REG_WR(sc, PXP2_REG_RQ_HC_ENDIAN_M, 0); //REG_WR(sc, PXP2_REG_RD_PBF_SWAP_MODE, 1); REG_WR(sc, PXP2_REG_RD_QM_SWAP_MODE, 1); REG_WR(sc, PXP2_REG_RD_TM_SWAP_MODE, 1); REG_WR(sc, PXP2_REG_RD_SRC_SWAP_MODE, 1); REG_WR(sc, PXP2_REG_RD_CDURD_SWAP_MODE, 1); #endif ecore_ilt_init_page_size(sc, INITOP_SET); if (CHIP_REV_IS_FPGA(sc) && CHIP_IS_E1H(sc)) { REG_WR(sc, PXP2_REG_PGL_TAGS_LIMIT, 0x1); } /* let the HW do it's magic... */ DELAY(100000); /* finish PXP init */ val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE); if (val != 1) { BLOGE(sc, "PXP2 CFG failed PXP2_REG_RQ_CFG_DONE val = 0x%x\n", val); return (-1); } val = REG_RD(sc, PXP2_REG_RD_INIT_DONE); if (val != 1) { BLOGE(sc, "PXP2 RD_INIT failed val = 0x%x\n", val); return (-1); } BLOGD(sc, DBG_LOAD, "after pxp init\n"); /* * Timer bug workaround for E2 only. We need to set the entire ILT to have * entries with value "0" and valid bit on. This needs to be done by the * first PF that is loaded in a path (i.e. common phase) */ if (!CHIP_IS_E1x(sc)) { /* * In E2 there is a bug in the timers block that can cause function 6 / 7 * (i.e. vnic3) to start even if it is marked as "scan-off". * This occurs when a different function (func2,3) is being marked * as "scan-off". Real-life scenario for example: if a driver is being * load-unloaded while func6,7 are down. This will cause the timer to access * the ilt, translate to a logical address and send a request to read/write. * Since the ilt for the function that is down is not valid, this will cause * a translation error which is unrecoverable. * The Workaround is intended to make sure that when this happens nothing * fatal will occur. The workaround: * 1. First PF driver which loads on a path will: * a. After taking the chip out of reset, by using pretend, * it will write "0" to the following registers of * the other vnics. * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0); * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0); * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0); * And for itself it will write '1' to * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable * dmae-operations (writing to pram for example.) * note: can be done for only function 6,7 but cleaner this * way. * b. Write zero+valid to the entire ILT. * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of * VNIC3 (of that port). The range allocated will be the * entire ILT. This is needed to prevent ILT range error. * 2. Any PF driver load flow: * a. ILT update with the physical addresses of the allocated * logical pages. * b. Wait 20msec. - note that this timeout is needed to make * sure there are no requests in one of the PXP internal * queues with "old" ILT addresses. * c. PF enable in the PGLC. * d. Clear the was_error of the PF in the PGLC. (could have * occurred while driver was down) * e. PF enable in the CFC (WEAK + STRONG) * f. Timers scan enable * 3. PF driver unload flow: * a. Clear the Timers scan_en. * b. Polling for scan_on=0 for that PF. * c. Clear the PF enable bit in the PXP. * d. Clear the PF enable in the CFC (WEAK + STRONG) * e. Write zero+valid to all ILT entries (The valid bit must * stay set) * f. If this is VNIC 3 of a port then also init * first_timers_ilt_entry to zero and last_timers_ilt_entry * to the last enrty in the ILT. * * Notes: * Currently the PF error in the PGLC is non recoverable. * In the future the there will be a recovery routine for this error. * Currently attention is masked. * Having an MCP lock on the load/unload process does not guarantee that * there is no Timer disable during Func6/7 enable. This is because the * Timers scan is currently being cleared by the MCP on FLR. * Step 2.d can be done only for PF6/7 and the driver can also check if * there is error before clearing it. But the flow above is simpler and * more general. * All ILT entries are written by zero+valid and not just PF6/7 * ILT entries since in the future the ILT entries allocation for * PF-s might be dynamic. */ struct ilt_client_info ilt_cli; struct ecore_ilt ilt; memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); memset(&ilt, 0, sizeof(struct ecore_ilt)); /* initialize dummy TM client */ ilt_cli.start = 0; ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; ilt_cli.client_num = ILT_CLIENT_TM; /* * Step 1: set zeroes to all ilt page entries with valid bit on * Step 2: set the timers first/last ilt entry to point * to the entire range to prevent ILT range error for 3rd/4th * vnic (this code assumes existence of the vnic) * * both steps performed by call to ecore_ilt_client_init_op() * with dummy TM client * * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT * and his brother are split registers */ bxe_pretend_func(sc, (SC_PATH(sc) + 6)); ecore_ilt_client_init_op_ilt(sc, &ilt, &ilt_cli, INITOP_CLEAR); bxe_pretend_func(sc, SC_ABS_FUNC(sc)); REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN, BXE_PXP_DRAM_ALIGN); REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_RD, BXE_PXP_DRAM_ALIGN); REG_WR(sc, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1); } REG_WR(sc, PXP2_REG_RQ_DISABLE_INPUTS, 0); REG_WR(sc, PXP2_REG_RD_DISABLE_INPUTS, 0); if (!CHIP_IS_E1x(sc)) { int factor = CHIP_REV_IS_EMUL(sc) ? 1000 : (CHIP_REV_IS_FPGA(sc) ? 400 : 0); ecore_init_block(sc, BLOCK_PGLUE_B, PHASE_COMMON); ecore_init_block(sc, BLOCK_ATC, PHASE_COMMON); /* let the HW do it's magic... */ do { DELAY(200000); val = REG_RD(sc, ATC_REG_ATC_INIT_DONE); } while (factor-- && (val != 1)); if (val != 1) { BLOGE(sc, "ATC_INIT failed val = 0x%x\n", val); return (-1); } } BLOGD(sc, DBG_LOAD, "after pglue and atc init\n"); ecore_init_block(sc, BLOCK_DMAE, PHASE_COMMON); bxe_iov_init_dmae(sc); /* clean the DMAE memory */ sc->dmae_ready = 1; ecore_init_fill(sc, TSEM_REG_PRAM, 0, 8, 1); ecore_init_block(sc, BLOCK_TCM, PHASE_COMMON); ecore_init_block(sc, BLOCK_UCM, PHASE_COMMON); ecore_init_block(sc, BLOCK_CCM, PHASE_COMMON); ecore_init_block(sc, BLOCK_XCM, PHASE_COMMON); bxe_read_dmae(sc, XSEM_REG_PASSIVE_BUFFER, 3); bxe_read_dmae(sc, CSEM_REG_PASSIVE_BUFFER, 3); bxe_read_dmae(sc, TSEM_REG_PASSIVE_BUFFER, 3); bxe_read_dmae(sc, USEM_REG_PASSIVE_BUFFER, 3); ecore_init_block(sc, BLOCK_QM, PHASE_COMMON); /* QM queues pointers table */ ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET); /* soft reset pulse */ REG_WR(sc, QM_REG_SOFT_RESET, 1); REG_WR(sc, QM_REG_SOFT_RESET, 0); if (CNIC_SUPPORT(sc)) ecore_init_block(sc, BLOCK_TM, PHASE_COMMON); ecore_init_block(sc, BLOCK_DORQ, PHASE_COMMON); REG_WR(sc, DORQ_REG_DPM_CID_OFST, BXE_DB_SHIFT); if (!CHIP_REV_IS_SLOW(sc)) { /* enable hw interrupt from doorbell Q */ REG_WR(sc, DORQ_REG_DORQ_INT_MASK, 0); } ecore_init_block(sc, BLOCK_BRB1, PHASE_COMMON); ecore_init_block(sc, BLOCK_PRS, PHASE_COMMON); REG_WR(sc, PRS_REG_A_PRSU_20, 0xf); if (!CHIP_IS_E1(sc)) { REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan); } if (!CHIP_IS_E1x(sc) && !CHIP_IS_E3B0(sc)) { if (IS_MF_AFEX(sc)) { /* * configure that AFEX and VLAN headers must be * received in AFEX mode */ REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, 0xE); REG_WR(sc, PRS_REG_MUST_HAVE_HDRS, 0xA); REG_WR(sc, PRS_REG_HDRS_AFTER_TAG_0, 0x6); REG_WR(sc, PRS_REG_TAG_ETHERTYPE_0, 0x8926); REG_WR(sc, PRS_REG_TAG_LEN_0, 0x4); } else { /* * Bit-map indicating which L2 hdrs may appear * after the basic Ethernet header */ REG_WR(sc, PRS_REG_HDRS_AFTER_BASIC, sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); } } ecore_init_block(sc, BLOCK_TSDM, PHASE_COMMON); ecore_init_block(sc, BLOCK_CSDM, PHASE_COMMON); ecore_init_block(sc, BLOCK_USDM, PHASE_COMMON); ecore_init_block(sc, BLOCK_XSDM, PHASE_COMMON); if (!CHIP_IS_E1x(sc)) { /* reset VFC memories */ REG_WR(sc, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, VFC_MEMORIES_RST_REG_CAM_RST | VFC_MEMORIES_RST_REG_RAM_RST); REG_WR(sc, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST, VFC_MEMORIES_RST_REG_CAM_RST | VFC_MEMORIES_RST_REG_RAM_RST); DELAY(20000); } ecore_init_block(sc, BLOCK_TSEM, PHASE_COMMON); ecore_init_block(sc, BLOCK_USEM, PHASE_COMMON); ecore_init_block(sc, BLOCK_CSEM, PHASE_COMMON); ecore_init_block(sc, BLOCK_XSEM, PHASE_COMMON); /* sync semi rtc */ REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x80000000); REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x80000000); ecore_init_block(sc, BLOCK_UPB, PHASE_COMMON); ecore_init_block(sc, BLOCK_XPB, PHASE_COMMON); ecore_init_block(sc, BLOCK_PBF, PHASE_COMMON); if (!CHIP_IS_E1x(sc)) { if (IS_MF_AFEX(sc)) { /* * configure that AFEX and VLAN headers must be * sent in AFEX mode */ REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, 0xE); REG_WR(sc, PBF_REG_MUST_HAVE_HDRS, 0xA); REG_WR(sc, PBF_REG_HDRS_AFTER_TAG_0, 0x6); REG_WR(sc, PBF_REG_TAG_ETHERTYPE_0, 0x8926); REG_WR(sc, PBF_REG_TAG_LEN_0, 0x4); } else { REG_WR(sc, PBF_REG_HDRS_AFTER_BASIC, sc->devinfo.mf_info.path_has_ovlan ? 7 : 6); } } REG_WR(sc, SRC_REG_SOFT_RST, 1); ecore_init_block(sc, BLOCK_SRC, PHASE_COMMON); if (CNIC_SUPPORT(sc)) { REG_WR(sc, SRC_REG_KEYSEARCH_0, 0x63285672); REG_WR(sc, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); REG_WR(sc, SRC_REG_KEYSEARCH_2, 0x223aef9b); REG_WR(sc, SRC_REG_KEYSEARCH_3, 0x26001e3a); REG_WR(sc, SRC_REG_KEYSEARCH_4, 0x7ae91116); REG_WR(sc, SRC_REG_KEYSEARCH_5, 0x5ce5230b); REG_WR(sc, SRC_REG_KEYSEARCH_6, 0x298d8adf); REG_WR(sc, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); REG_WR(sc, SRC_REG_KEYSEARCH_8, 0x1830f82f); REG_WR(sc, SRC_REG_KEYSEARCH_9, 0x01e46be7); } REG_WR(sc, SRC_REG_SOFT_RST, 0); if (sizeof(union cdu_context) != 1024) { /* we currently assume that a context is 1024 bytes */ BLOGE(sc, "please adjust the size of cdu_context(%ld)\n", (long)sizeof(union cdu_context)); } ecore_init_block(sc, BLOCK_CDU, PHASE_COMMON); val = (4 << 24) + (0 << 12) + 1024; REG_WR(sc, CDU_REG_CDU_GLOBAL_PARAMS, val); ecore_init_block(sc, BLOCK_CFC, PHASE_COMMON); REG_WR(sc, CFC_REG_INIT_REG, 0x7FF); /* enable context validation interrupt from CFC */ REG_WR(sc, CFC_REG_CFC_INT_MASK, 0); /* set the thresholds to prevent CFC/CDU race */ REG_WR(sc, CFC_REG_DEBUG0, 0x20020000); ecore_init_block(sc, BLOCK_HC, PHASE_COMMON); if (!CHIP_IS_E1x(sc) && BXE_NOMCP(sc)) { REG_WR(sc, IGU_REG_RESET_MEMORIES, 0x36); } ecore_init_block(sc, BLOCK_IGU, PHASE_COMMON); ecore_init_block(sc, BLOCK_MISC_AEU, PHASE_COMMON); /* Reset PCIE errors for debug */ REG_WR(sc, 0x2814, 0xffffffff); REG_WR(sc, 0x3820, 0xffffffff); if (!CHIP_IS_E1x(sc)) { REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_CONTROL_5, (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 | PXPCS_TL_CONTROL_5_ERR_UNSPPORT)); REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT, (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 | PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 | PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2)); REG_WR(sc, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT, (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 | PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 | PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5)); } ecore_init_block(sc, BLOCK_NIG, PHASE_COMMON); if (!CHIP_IS_E1(sc)) { /* in E3 this done in per-port section */ if (!CHIP_IS_E3(sc)) REG_WR(sc, NIG_REG_LLH_MF_MODE, IS_MF(sc)); } if (CHIP_IS_E1H(sc)) { /* not applicable for E2 (and above ...) */ REG_WR(sc, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(sc)); } if (CHIP_REV_IS_SLOW(sc)) { DELAY(200000); } /* finish CFC init */ val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10); if (val != 1) { BLOGE(sc, "CFC LL_INIT failed val=0x%x\n", val); return (-1); } val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10); if (val != 1) { BLOGE(sc, "CFC AC_INIT failed val=0x%x\n", val); return (-1); } val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10); if (val != 1) { BLOGE(sc, "CFC CAM_INIT failed val=0x%x\n", val); return (-1); } REG_WR(sc, CFC_REG_DEBUG0, 0); if (CHIP_IS_E1(sc)) { /* read NIG statistic to see if this is our first up since powerup */ bxe_read_dmae(sc, NIG_REG_STAT2_BRB_OCTET, 2); val = *BXE_SP(sc, wb_data[0]); /* do internal memory self test */ if ((val == 0) && bxe_int_mem_test(sc)) { BLOGE(sc, "internal mem self test failed val=0x%x\n", val); return (-1); } } bxe_setup_fan_failure_detection(sc); /* clear PXP2 attentions */ REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0); bxe_enable_blocks_attention(sc); if (!CHIP_REV_IS_SLOW(sc)) { ecore_enable_blocks_parity(sc); } if (!BXE_NOMCP(sc)) { if (CHIP_IS_E1x(sc)) { bxe_common_init_phy(sc); } } return (0); } /** * bxe_init_hw_common_chip - init HW at the COMMON_CHIP phase. * * @sc: driver handle */ static int bxe_init_hw_common_chip(struct bxe_softc *sc) { int rc = bxe_init_hw_common(sc); if (rc) { BLOGE(sc, "bxe_init_hw_common failed rc=%d\n", rc); return (rc); } /* In E2 2-PORT mode, same ext phy is used for the two paths */ if (!BXE_NOMCP(sc)) { bxe_common_init_phy(sc); } return (0); } static int bxe_init_hw_port(struct bxe_softc *sc) { int port = SC_PORT(sc); int init_phase = port ? PHASE_PORT1 : PHASE_PORT0; uint32_t low, high; uint32_t val; BLOGD(sc, DBG_LOAD, "starting port init for port %d\n", port); REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); ecore_init_block(sc, BLOCK_MISC, init_phase); ecore_init_block(sc, BLOCK_PXP, init_phase); ecore_init_block(sc, BLOCK_PXP2, init_phase); /* * Timers bug workaround: disables the pf_master bit in pglue at * common phase, we need to enable it here before any dmae access are * attempted. Therefore we manually added the enable-master to the * port phase (it also happens in the function phase) */ if (!CHIP_IS_E1x(sc)) { REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); } ecore_init_block(sc, BLOCK_ATC, init_phase); ecore_init_block(sc, BLOCK_DMAE, init_phase); ecore_init_block(sc, BLOCK_PGLUE_B, init_phase); ecore_init_block(sc, BLOCK_QM, init_phase); ecore_init_block(sc, BLOCK_TCM, init_phase); ecore_init_block(sc, BLOCK_UCM, init_phase); ecore_init_block(sc, BLOCK_CCM, init_phase); ecore_init_block(sc, BLOCK_XCM, init_phase); /* QM cid (connection) count */ ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET); if (CNIC_SUPPORT(sc)) { ecore_init_block(sc, BLOCK_TM, init_phase); REG_WR(sc, TM_REG_LIN0_SCAN_TIME + port*4, 20); REG_WR(sc, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); } ecore_init_block(sc, BLOCK_DORQ, init_phase); ecore_init_block(sc, BLOCK_BRB1, init_phase); if (CHIP_IS_E1(sc) || CHIP_IS_E1H(sc)) { if (IS_MF(sc)) { low = (BXE_ONE_PORT(sc) ? 160 : 246); } else if (sc->mtu > 4096) { if (BXE_ONE_PORT(sc)) { low = 160; } else { val = sc->mtu; /* (24*1024 + val*4)/256 */ low = (96 + (val / 64) + ((val % 64) ? 1 : 0)); } } else { low = (BXE_ONE_PORT(sc) ? 80 : 160); } high = (low + 56); /* 14*1024/256 */ REG_WR(sc, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low); REG_WR(sc, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high); } if (CHIP_IS_MODE_4_PORT(sc)) { REG_WR(sc, SC_PORT(sc) ? BRB1_REG_MAC_GUARANTIED_1 : BRB1_REG_MAC_GUARANTIED_0, 40); } ecore_init_block(sc, BLOCK_PRS, init_phase); if (CHIP_IS_E3B0(sc)) { if (IS_MF_AFEX(sc)) { /* configure headers for AFEX mode */ REG_WR(sc, SC_PORT(sc) ? PRS_REG_HDRS_AFTER_BASIC_PORT_1 : PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE); REG_WR(sc, SC_PORT(sc) ? PRS_REG_HDRS_AFTER_TAG_0_PORT_1 : PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6); REG_WR(sc, SC_PORT(sc) ? PRS_REG_MUST_HAVE_HDRS_PORT_1 : PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA); } else { /* Ovlan exists only if we are in multi-function + * switch-dependent mode, in switch-independent there * is no ovlan headers */ REG_WR(sc, SC_PORT(sc) ? PRS_REG_HDRS_AFTER_BASIC_PORT_1 : PRS_REG_HDRS_AFTER_BASIC_PORT_0, (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6)); } } ecore_init_block(sc, BLOCK_TSDM, init_phase); ecore_init_block(sc, BLOCK_CSDM, init_phase); ecore_init_block(sc, BLOCK_USDM, init_phase); ecore_init_block(sc, BLOCK_XSDM, init_phase); ecore_init_block(sc, BLOCK_TSEM, init_phase); ecore_init_block(sc, BLOCK_USEM, init_phase); ecore_init_block(sc, BLOCK_CSEM, init_phase); ecore_init_block(sc, BLOCK_XSEM, init_phase); ecore_init_block(sc, BLOCK_UPB, init_phase); ecore_init_block(sc, BLOCK_XPB, init_phase); ecore_init_block(sc, BLOCK_PBF, init_phase); if (CHIP_IS_E1x(sc)) { /* configure PBF to work without PAUSE mtu 9000 */ REG_WR(sc, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); /* update threshold */ REG_WR(sc, PBF_REG_P0_ARB_THRSH + port*4, (9040/16)); /* update init credit */ REG_WR(sc, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22); /* probe changes */ REG_WR(sc, PBF_REG_INIT_P0 + port*4, 1); DELAY(50); REG_WR(sc, PBF_REG_INIT_P0 + port*4, 0); } if (CNIC_SUPPORT(sc)) { ecore_init_block(sc, BLOCK_SRC, init_phase); } ecore_init_block(sc, BLOCK_CDU, init_phase); ecore_init_block(sc, BLOCK_CFC, init_phase); if (CHIP_IS_E1(sc)) { REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); } ecore_init_block(sc, BLOCK_HC, init_phase); ecore_init_block(sc, BLOCK_IGU, init_phase); ecore_init_block(sc, BLOCK_MISC_AEU, init_phase); /* init aeu_mask_attn_func_0/1: * - SF mode: bits 3-7 are masked. only bits 0-2 are in use * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF * bits 4-7 are used for "per vn group attention" */ val = IS_MF(sc) ? 0xF7 : 0x7; /* Enable DCBX attention for all but E1 */ val |= CHIP_IS_E1(sc) ? 0 : 0x10; REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val); ecore_init_block(sc, BLOCK_NIG, init_phase); if (!CHIP_IS_E1x(sc)) { /* Bit-map indicating which L2 hdrs may appear after the * basic Ethernet header */ if (IS_MF_AFEX(sc)) { REG_WR(sc, SC_PORT(sc) ? NIG_REG_P1_HDRS_AFTER_BASIC : NIG_REG_P0_HDRS_AFTER_BASIC, 0xE); } else { REG_WR(sc, SC_PORT(sc) ? NIG_REG_P1_HDRS_AFTER_BASIC : NIG_REG_P0_HDRS_AFTER_BASIC, IS_MF_SD(sc) ? 7 : 6); } if (CHIP_IS_E3(sc)) { REG_WR(sc, SC_PORT(sc) ? NIG_REG_LLH1_MF_MODE : NIG_REG_LLH_MF_MODE, IS_MF(sc)); } } if (!CHIP_IS_E3(sc)) { REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); } if (!CHIP_IS_E1(sc)) { /* 0x2 disable mf_ov, 0x1 enable */ REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, (IS_MF_SD(sc) ? 0x1 : 0x2)); if (!CHIP_IS_E1x(sc)) { val = 0; switch (sc->devinfo.mf_info.mf_mode) { case MULTI_FUNCTION_SD: val = 1; break; case MULTI_FUNCTION_SI: case MULTI_FUNCTION_AFEX: val = 2; break; } REG_WR(sc, (SC_PORT(sc) ? NIG_REG_LLH1_CLS_TYPE : NIG_REG_LLH0_CLS_TYPE), val); } REG_WR(sc, NIG_REG_LLFC_ENABLE_0 + port*4, 0); REG_WR(sc, NIG_REG_LLFC_OUT_EN_0 + port*4, 0); REG_WR(sc, NIG_REG_PAUSE_ENABLE_0 + port*4, 1); } /* If SPIO5 is set to generate interrupts, enable it for this port */ val = REG_RD(sc, MISC_REG_SPIO_EVENT_EN); if (val & MISC_SPIO_SPIO5) { uint32_t reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); val = REG_RD(sc, reg_addr); val |= AEU_INPUTS_ATTN_BITS_SPIO5; REG_WR(sc, reg_addr, val); } return (0); } static uint32_t bxe_flr_clnup_reg_poll(struct bxe_softc *sc, uint32_t reg, uint32_t expected, uint32_t poll_count) { uint32_t cur_cnt = poll_count; uint32_t val; while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) { DELAY(FLR_WAIT_INTERVAL); } return (val); } static int bxe_flr_clnup_poll_hw_counter(struct bxe_softc *sc, uint32_t reg, char *msg, uint32_t poll_cnt) { uint32_t val = bxe_flr_clnup_reg_poll(sc, reg, 0, poll_cnt); if (val != 0) { BLOGE(sc, "%s usage count=%d\n", msg, val); return (1); } return (0); } /* Common routines with VF FLR cleanup */ static uint32_t bxe_flr_clnup_poll_count(struct bxe_softc *sc) { /* adjust polling timeout */ if (CHIP_REV_IS_EMUL(sc)) { return (FLR_POLL_CNT * 2000); } if (CHIP_REV_IS_FPGA(sc)) { return (FLR_POLL_CNT * 120); } return (FLR_POLL_CNT); } static int bxe_poll_hw_usage_counters(struct bxe_softc *sc, uint32_t poll_cnt) { /* wait for CFC PF usage-counter to zero (includes all the VFs) */ if (bxe_flr_clnup_poll_hw_counter(sc, CFC_REG_NUM_LCIDS_INSIDE_PF, "CFC PF usage counter timed out", poll_cnt)) { return (1); } /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */ if (bxe_flr_clnup_poll_hw_counter(sc, DORQ_REG_PF_USAGE_CNT, "DQ PF usage counter timed out", poll_cnt)) { return (1); } /* Wait for QM PF usage-counter to zero (until DQ cleanup) */ if (bxe_flr_clnup_poll_hw_counter(sc, QM_REG_PF_USG_CNT_0 + 4*SC_FUNC(sc), "QM PF usage counter timed out", poll_cnt)) { return (1); } /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */ if (bxe_flr_clnup_poll_hw_counter(sc, TM_REG_LIN0_VNIC_UC + 4*SC_PORT(sc), "Timers VNIC usage counter timed out", poll_cnt)) { return (1); } if (bxe_flr_clnup_poll_hw_counter(sc, TM_REG_LIN0_NUM_SCANS + 4*SC_PORT(sc), "Timers NUM_SCANS usage counter timed out", poll_cnt)) { return (1); } /* Wait DMAE PF usage counter to zero */ if (bxe_flr_clnup_poll_hw_counter(sc, dmae_reg_go_c[INIT_DMAE_C(sc)], "DMAE dommand register timed out", poll_cnt)) { return (1); } return (0); } #define OP_GEN_PARAM(param) \ (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM) #define OP_GEN_TYPE(type) \ (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE) #define OP_GEN_AGG_VECT(index) \ (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) static int bxe_send_final_clnup(struct bxe_softc *sc, uint8_t clnup_func, uint32_t poll_cnt) { uint32_t op_gen_command = 0; uint32_t comp_addr = (BAR_CSTRORM_INTMEM + CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func)); int ret = 0; if (REG_RD(sc, comp_addr)) { BLOGE(sc, "Cleanup complete was not 0 before sending\n"); return (1); } op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX); op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE); op_gen_command |= OP_GEN_AGG_VECT(clnup_func); op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT; BLOGD(sc, DBG_LOAD, "sending FW Final cleanup\n"); REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command); if (bxe_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) { BLOGE(sc, "FW final cleanup did not succeed\n"); BLOGD(sc, DBG_LOAD, "At timeout completion address contained %x\n", (REG_RD(sc, comp_addr))); bxe_panic(sc, ("FLR cleanup failed\n")); return (1); } /* Zero completion for nxt FLR */ REG_WR(sc, comp_addr, 0); return (ret); } static void bxe_pbf_pN_buf_flushed(struct bxe_softc *sc, struct pbf_pN_buf_regs *regs, uint32_t poll_count) { uint32_t init_crd, crd, crd_start, crd_freed, crd_freed_start; uint32_t cur_cnt = poll_count; crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed); crd = crd_start = REG_RD(sc, regs->crd); init_crd = REG_RD(sc, regs->init_crd); BLOGD(sc, DBG_LOAD, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd); BLOGD(sc, DBG_LOAD, "CREDIT[%d] : s:%x\n", regs->pN, crd); BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed); while ((crd != init_crd) && ((uint32_t)((int32_t)crd_freed - (int32_t)crd_freed_start) < (init_crd - crd_start))) { if (cur_cnt--) { DELAY(FLR_WAIT_INTERVAL); crd = REG_RD(sc, regs->crd); crd_freed = REG_RD(sc, regs->crd_freed); } else { BLOGD(sc, DBG_LOAD, "PBF tx buffer[%d] timed out\n", regs->pN); BLOGD(sc, DBG_LOAD, "CREDIT[%d] : c:%x\n", regs->pN, crd); BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: c:%x\n", regs->pN, crd_freed); break; } } BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF tx buffer[%d]\n", poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); } static void bxe_pbf_pN_cmd_flushed(struct bxe_softc *sc, struct pbf_pN_cmd_regs *regs, uint32_t poll_count) { uint32_t occup, to_free, freed, freed_start; uint32_t cur_cnt = poll_count; occup = to_free = REG_RD(sc, regs->lines_occup); freed = freed_start = REG_RD(sc, regs->lines_freed); BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); while (occup && ((uint32_t)((int32_t)freed - (int32_t)freed_start) < to_free)) { if (cur_cnt--) { DELAY(FLR_WAIT_INTERVAL); occup = REG_RD(sc, regs->lines_occup); freed = REG_RD(sc, regs->lines_freed); } else { BLOGD(sc, DBG_LOAD, "PBF cmd queue[%d] timed out\n", regs->pN); BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup); BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed); break; } } BLOGD(sc, DBG_LOAD, "Waited %d*%d usec for PBF cmd queue[%d]\n", poll_count - cur_cnt, FLR_WAIT_INTERVAL, regs->pN); } static void bxe_tx_hw_flushed(struct bxe_softc *sc, uint32_t poll_count) { struct pbf_pN_cmd_regs cmd_regs[] = { {0, (CHIP_IS_E3B0(sc)) ? PBF_REG_TQ_OCCUPANCY_Q0 : PBF_REG_P0_TQ_OCCUPANCY, (CHIP_IS_E3B0(sc)) ? PBF_REG_TQ_LINES_FREED_CNT_Q0 : PBF_REG_P0_TQ_LINES_FREED_CNT}, {1, (CHIP_IS_E3B0(sc)) ? PBF_REG_TQ_OCCUPANCY_Q1 : PBF_REG_P1_TQ_OCCUPANCY, (CHIP_IS_E3B0(sc)) ? PBF_REG_TQ_LINES_FREED_CNT_Q1 : PBF_REG_P1_TQ_LINES_FREED_CNT}, {4, (CHIP_IS_E3B0(sc)) ? PBF_REG_TQ_OCCUPANCY_LB_Q : PBF_REG_P4_TQ_OCCUPANCY, (CHIP_IS_E3B0(sc)) ? PBF_REG_TQ_LINES_FREED_CNT_LB_Q : PBF_REG_P4_TQ_LINES_FREED_CNT} }; struct pbf_pN_buf_regs buf_regs[] = { {0, (CHIP_IS_E3B0(sc)) ? PBF_REG_INIT_CRD_Q0 : PBF_REG_P0_INIT_CRD , (CHIP_IS_E3B0(sc)) ? PBF_REG_CREDIT_Q0 : PBF_REG_P0_CREDIT, (CHIP_IS_E3B0(sc)) ? PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 : PBF_REG_P0_INTERNAL_CRD_FREED_CNT}, {1, (CHIP_IS_E3B0(sc)) ? PBF_REG_INIT_CRD_Q1 : PBF_REG_P1_INIT_CRD, (CHIP_IS_E3B0(sc)) ? PBF_REG_CREDIT_Q1 : PBF_REG_P1_CREDIT, (CHIP_IS_E3B0(sc)) ? PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 : PBF_REG_P1_INTERNAL_CRD_FREED_CNT}, {4, (CHIP_IS_E3B0(sc)) ? PBF_REG_INIT_CRD_LB_Q : PBF_REG_P4_INIT_CRD, (CHIP_IS_E3B0(sc)) ? PBF_REG_CREDIT_LB_Q : PBF_REG_P4_CREDIT, (CHIP_IS_E3B0(sc)) ? PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q : PBF_REG_P4_INTERNAL_CRD_FREED_CNT}, }; int i; /* Verify the command queues are flushed P0, P1, P4 */ for (i = 0; i < ARRAY_SIZE(cmd_regs); i++) { bxe_pbf_pN_cmd_flushed(sc, &cmd_regs[i], poll_count); } /* Verify the transmission buffers are flushed P0, P1, P4 */ for (i = 0; i < ARRAY_SIZE(buf_regs); i++) { bxe_pbf_pN_buf_flushed(sc, &buf_regs[i], poll_count); } } static void bxe_hw_enable_status(struct bxe_softc *sc) { uint32_t val; val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF); BLOGD(sc, DBG_LOAD, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val); val = REG_RD(sc, PBF_REG_DISABLE_PF); BLOGD(sc, DBG_LOAD, "PBF_REG_DISABLE_PF is 0x%x\n", val); val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN); BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val); val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN); BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val); val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK); BLOGD(sc, DBG_LOAD, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val); val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR); BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val); val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR); BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val); val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); BLOGD(sc, DBG_LOAD, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n", val); } static int bxe_pf_flr_clnup(struct bxe_softc *sc) { uint32_t poll_cnt = bxe_flr_clnup_poll_count(sc); BLOGD(sc, DBG_LOAD, "Cleanup after FLR PF[%d]\n", SC_ABS_FUNC(sc)); /* Re-enable PF target read access */ REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); /* Poll HW usage counters */ BLOGD(sc, DBG_LOAD, "Polling usage counters\n"); if (bxe_poll_hw_usage_counters(sc, poll_cnt)) { return (-1); } /* Zero the igu 'trailing edge' and 'leading edge' */ /* Send the FW cleanup command */ if (bxe_send_final_clnup(sc, (uint8_t)SC_FUNC(sc), poll_cnt)) { return (-1); } /* ATC cleanup */ /* Verify TX hw is flushed */ bxe_tx_hw_flushed(sc, poll_cnt); /* Wait 100ms (not adjusted according to platform) */ DELAY(100000); /* Verify no pending pci transactions */ if (bxe_is_pcie_pending(sc)) { BLOGE(sc, "PCIE Transactions still pending\n"); } /* Debug */ bxe_hw_enable_status(sc); /* * Master enable - Due to WB DMAE writes performed before this * register is re-initialized as part of the regular function init */ REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); return (0); } static int bxe_init_hw_func(struct bxe_softc *sc) { int port = SC_PORT(sc); int func = SC_FUNC(sc); int init_phase = PHASE_PF0 + func; struct ecore_ilt *ilt = sc->ilt; uint16_t cdu_ilt_start; uint32_t addr, val; uint32_t main_mem_base, main_mem_size, main_mem_prty_clr; int i, main_mem_width, rc; BLOGD(sc, DBG_LOAD, "starting func init for func %d\n", func); /* FLR cleanup */ if (!CHIP_IS_E1x(sc)) { rc = bxe_pf_flr_clnup(sc); if (rc) { BLOGE(sc, "FLR cleanup failed!\n"); // XXX bxe_fw_dump(sc); // XXX bxe_idle_chk(sc); return (rc); } } /* set MSI reconfigure capability */ if (sc->devinfo.int_block == INT_BLOCK_HC) { addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); val = REG_RD(sc, addr); val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0; REG_WR(sc, addr, val); } ecore_init_block(sc, BLOCK_PXP, init_phase); ecore_init_block(sc, BLOCK_PXP2, init_phase); ilt = sc->ilt; cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; for (i = 0; i < L2_ILT_LINES(sc); i++) { ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt; ilt->lines[cdu_ilt_start + i].page_mapping = sc->context[i].vcxt_dma.paddr; ilt->lines[cdu_ilt_start + i].size = sc->context[i].size; } ecore_ilt_init_op(sc, INITOP_SET); /* Set NIC mode */ REG_WR(sc, PRS_REG_NIC_MODE, 1); BLOGD(sc, DBG_LOAD, "NIC MODE configured\n"); if (!CHIP_IS_E1x(sc)) { uint32_t pf_conf = IGU_PF_CONF_FUNC_EN; /* Turn on a single ISR mode in IGU if driver is going to use * INT#x or MSI */ if (sc->interrupt_mode != INTR_MODE_MSIX) { pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; } /* * Timers workaround bug: function init part. * Need to wait 20msec after initializing ILT, * needed to make sure there are no requests in * one of the PXP internal queues with "old" ILT addresses */ DELAY(20000); /* * Master enable - Due to WB DMAE writes performed before this * register is re-initialized as part of the regular function * init */ REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1); /* Enable the function in IGU */ REG_WR(sc, IGU_REG_PF_CONFIGURATION, pf_conf); } sc->dmae_ready = 1; ecore_init_block(sc, BLOCK_PGLUE_B, init_phase); if (!CHIP_IS_E1x(sc)) REG_WR(sc, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func); ecore_init_block(sc, BLOCK_ATC, init_phase); ecore_init_block(sc, BLOCK_DMAE, init_phase); ecore_init_block(sc, BLOCK_NIG, init_phase); ecore_init_block(sc, BLOCK_SRC, init_phase); ecore_init_block(sc, BLOCK_MISC, init_phase); ecore_init_block(sc, BLOCK_TCM, init_phase); ecore_init_block(sc, BLOCK_UCM, init_phase); ecore_init_block(sc, BLOCK_CCM, init_phase); ecore_init_block(sc, BLOCK_XCM, init_phase); ecore_init_block(sc, BLOCK_TSEM, init_phase); ecore_init_block(sc, BLOCK_USEM, init_phase); ecore_init_block(sc, BLOCK_CSEM, init_phase); ecore_init_block(sc, BLOCK_XSEM, init_phase); if (!CHIP_IS_E1x(sc)) REG_WR(sc, QM_REG_PF_EN, 1); if (!CHIP_IS_E1x(sc)) { REG_WR(sc, TSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); REG_WR(sc, USEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); REG_WR(sc, CSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); REG_WR(sc, XSEM_REG_VFPF_ERR_NUM, BXE_MAX_NUM_OF_VFS + func); } ecore_init_block(sc, BLOCK_QM, init_phase); ecore_init_block(sc, BLOCK_TM, init_phase); ecore_init_block(sc, BLOCK_DORQ, init_phase); bxe_iov_init_dq(sc); ecore_init_block(sc, BLOCK_BRB1, init_phase); ecore_init_block(sc, BLOCK_PRS, init_phase); ecore_init_block(sc, BLOCK_TSDM, init_phase); ecore_init_block(sc, BLOCK_CSDM, init_phase); ecore_init_block(sc, BLOCK_USDM, init_phase); ecore_init_block(sc, BLOCK_XSDM, init_phase); ecore_init_block(sc, BLOCK_UPB, init_phase); ecore_init_block(sc, BLOCK_XPB, init_phase); ecore_init_block(sc, BLOCK_PBF, init_phase); if (!CHIP_IS_E1x(sc)) REG_WR(sc, PBF_REG_DISABLE_PF, 0); ecore_init_block(sc, BLOCK_CDU, init_phase); ecore_init_block(sc, BLOCK_CFC, init_phase); if (!CHIP_IS_E1x(sc)) REG_WR(sc, CFC_REG_WEAK_ENABLE_PF, 1); if (IS_MF(sc)) { REG_WR(sc, NIG_REG_LLH0_FUNC_EN + port*8, 1); REG_WR(sc, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, OVLAN(sc)); } ecore_init_block(sc, BLOCK_MISC_AEU, init_phase); /* HC init per function */ if (sc->devinfo.int_block == INT_BLOCK_HC) { if (CHIP_IS_E1H(sc)) { REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); } ecore_init_block(sc, BLOCK_HC, init_phase); } else { int num_segs, sb_idx, prod_offset; REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); if (!CHIP_IS_E1x(sc)) { REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0); REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0); } ecore_init_block(sc, BLOCK_IGU, init_phase); if (!CHIP_IS_E1x(sc)) { int dsb_idx = 0; /** * Producer memory: * E2 mode: address 0-135 match to the mapping memory; * 136 - PF0 default prod; 137 - PF1 default prod; * 138 - PF2 default prod; 139 - PF3 default prod; * 140 - PF0 attn prod; 141 - PF1 attn prod; * 142 - PF2 attn prod; 143 - PF3 attn prod; * 144-147 reserved. * * E1.5 mode - In backward compatible mode; * for non default SB; each even line in the memory * holds the U producer and each odd line hold * the C producer. The first 128 producers are for * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20 * producers are for the DSB for each PF. * Each PF has five segments: (the order inside each * segment is PF0; PF1; PF2; PF3) - 128-131 U prods; * 132-135 C prods; 136-139 X prods; 140-143 T prods; * 144-147 attn prods; */ /* non-default-status-blocks */ num_segs = CHIP_INT_MODE_IS_BC(sc) ? IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS; for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) { prod_offset = (sc->igu_base_sb + sb_idx) * num_segs; for (i = 0; i < num_segs; i++) { addr = IGU_REG_PROD_CONS_MEMORY + (prod_offset + i) * 4; REG_WR(sc, addr, 0); } /* send consumer update with value 0 */ bxe_ack_sb(sc, sc->igu_base_sb + sb_idx, USTORM_ID, 0, IGU_INT_NOP, 1); bxe_igu_clear_sb(sc, sc->igu_base_sb + sb_idx); } /* default-status-blocks */ num_segs = CHIP_INT_MODE_IS_BC(sc) ? IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS; if (CHIP_IS_MODE_4_PORT(sc)) dsb_idx = SC_FUNC(sc); else dsb_idx = SC_VN(sc); prod_offset = (CHIP_INT_MODE_IS_BC(sc) ? IGU_BC_BASE_DSB_PROD + dsb_idx : IGU_NORM_BASE_DSB_PROD + dsb_idx); /* * igu prods come in chunks of E1HVN_MAX (4) - * does not matters what is the current chip mode */ for (i = 0; i < (num_segs * E1HVN_MAX); i += E1HVN_MAX) { addr = IGU_REG_PROD_CONS_MEMORY + (prod_offset + i)*4; REG_WR(sc, addr, 0); } /* send consumer update with 0 */ if (CHIP_INT_MODE_IS_BC(sc)) { bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_NOP, 1); bxe_ack_sb(sc, sc->igu_dsb_id, CSTORM_ID, 0, IGU_INT_NOP, 1); bxe_ack_sb(sc, sc->igu_dsb_id, XSTORM_ID, 0, IGU_INT_NOP, 1); bxe_ack_sb(sc, sc->igu_dsb_id, TSTORM_ID, 0, IGU_INT_NOP, 1); bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID, 0, IGU_INT_NOP, 1); } else { bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_NOP, 1); bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID, 0, IGU_INT_NOP, 1); } bxe_igu_clear_sb(sc, sc->igu_dsb_id); /* !!! these should become driver const once rf-tool supports split-68 const */ REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0); REG_WR(sc, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0); REG_WR(sc, IGU_REG_SB_MASK_LSB, 0); REG_WR(sc, IGU_REG_SB_MASK_MSB, 0); REG_WR(sc, IGU_REG_PBA_STATUS_LSB, 0); REG_WR(sc, IGU_REG_PBA_STATUS_MSB, 0); } } /* Reset PCIE errors for debug */ REG_WR(sc, 0x2114, 0xffffffff); REG_WR(sc, 0x2120, 0xffffffff); if (CHIP_IS_E1x(sc)) { main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/ main_mem_base = HC_REG_MAIN_MEMORY + SC_PORT(sc) * (main_mem_size * 4); main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR; main_mem_width = 8; val = REG_RD(sc, main_mem_prty_clr); if (val) { BLOGD(sc, DBG_LOAD, "Parity errors in HC block during function init (0x%x)!\n", val); } /* Clear "false" parity errors in MSI-X table */ for (i = main_mem_base; i < main_mem_base + main_mem_size * 4; i += main_mem_width) { bxe_read_dmae(sc, i, main_mem_width / 4); bxe_write_dmae(sc, BXE_SP_MAPPING(sc, wb_data), i, main_mem_width / 4); } /* Clear HC parity attention */ REG_RD(sc, main_mem_prty_clr); } #if 1 /* Enable STORMs SP logging */ REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_RECORD_SLOW_PATH_OFFSET(SC_FUNC(sc)), 1); #endif elink_phy_probe(&sc->link_params); return (0); } static void bxe_link_reset(struct bxe_softc *sc) { if (!BXE_NOMCP(sc)) { bxe_acquire_phy_lock(sc); elink_lfa_reset(&sc->link_params, &sc->link_vars); bxe_release_phy_lock(sc); } else { if (!CHIP_REV_IS_SLOW(sc)) { BLOGW(sc, "Bootcode is missing - cannot reset link\n"); } } } static void bxe_reset_port(struct bxe_softc *sc) { int port = SC_PORT(sc); uint32_t val; ELINK_DEBUG_P0(sc, "bxe_reset_port called\n"); /* reset physical Link */ bxe_link_reset(sc); REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); /* Do not rcv packets to BRB */ REG_WR(sc, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0); /* Do not direct rcv packets that are not for MCP to the BRB */ REG_WR(sc, (port ? NIG_REG_LLH1_BRB1_NOT_MCP : NIG_REG_LLH0_BRB1_NOT_MCP), 0x0); /* Configure AEU */ REG_WR(sc, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0); DELAY(100000); /* Check for BRB port occupancy */ val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4); if (val) { BLOGD(sc, DBG_LOAD, "BRB1 is not empty, %d blocks are occupied\n", val); } /* TODO: Close Doorbell port? */ } static void bxe_ilt_wr(struct bxe_softc *sc, uint32_t index, bus_addr_t addr) { int reg; uint32_t wb_write[2]; if (CHIP_IS_E1(sc)) { reg = PXP2_REG_RQ_ONCHIP_AT + index*8; } else { reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8; } wb_write[0] = ONCHIP_ADDR1(addr); wb_write[1] = ONCHIP_ADDR2(addr); REG_WR_DMAE(sc, reg, wb_write, 2); } static void bxe_clear_func_ilt(struct bxe_softc *sc, uint32_t func) { uint32_t i, base = FUNC_ILT_BASE(func); for (i = base; i < base + ILT_PER_FUNC; i++) { bxe_ilt_wr(sc, i, 0); } } static void bxe_reset_func(struct bxe_softc *sc) { struct bxe_fastpath *fp; int port = SC_PORT(sc); int func = SC_FUNC(sc); int i; /* Disable the function in the FW */ REG_WR8(sc, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0); REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0); REG_WR8(sc, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0); REG_WR8(sc, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0); /* FP SBs */ FOR_EACH_ETH_QUEUE(sc, i) { fp = &sc->fp[i]; REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id), SB_DISABLED); } /* SP SB */ REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), SB_DISABLED); for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++) { REG_WR(sc, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func), 0); } /* Configure IGU */ if (sc->devinfo.int_block == INT_BLOCK_HC) { REG_WR(sc, HC_REG_LEADING_EDGE_0 + port*8, 0); REG_WR(sc, HC_REG_TRAILING_EDGE_0 + port*8, 0); } else { REG_WR(sc, IGU_REG_LEADING_EDGE_LATCH, 0); REG_WR(sc, IGU_REG_TRAILING_EDGE_LATCH, 0); } if (CNIC_LOADED(sc)) { /* Disable Timer scan */ REG_WR(sc, TM_REG_EN_LINEAR0_TIMER + port*4, 0); /* * Wait for at least 10ms and up to 2 second for the timers * scan to complete */ for (i = 0; i < 200; i++) { DELAY(10000); if (!REG_RD(sc, TM_REG_LIN0_SCAN_ON + port*4)) break; } } /* Clear ILT */ bxe_clear_func_ilt(sc, func); /* * Timers workaround bug for E2: if this is vnic-3, * we need to set the entire ilt range for this timers. */ if (!CHIP_IS_E1x(sc) && SC_VN(sc) == 3) { struct ilt_client_info ilt_cli; /* use dummy TM client */ memset(&ilt_cli, 0, sizeof(struct ilt_client_info)); ilt_cli.start = 0; ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1; ilt_cli.client_num = ILT_CLIENT_TM; ecore_ilt_boundry_init_op(sc, &ilt_cli, 0, INITOP_CLEAR); } /* this assumes that reset_port() called before reset_func()*/ if (!CHIP_IS_E1x(sc)) { bxe_pf_disable(sc); } sc->dmae_ready = 0; } static int bxe_gunzip_init(struct bxe_softc *sc) { return (0); } static void bxe_gunzip_end(struct bxe_softc *sc) { return; } static int bxe_init_firmware(struct bxe_softc *sc) { if (CHIP_IS_E1(sc)) { ecore_init_e1_firmware(sc); sc->iro_array = e1_iro_arr; } else if (CHIP_IS_E1H(sc)) { ecore_init_e1h_firmware(sc); sc->iro_array = e1h_iro_arr; } else if (!CHIP_IS_E1x(sc)) { ecore_init_e2_firmware(sc); sc->iro_array = e2_iro_arr; } else { BLOGE(sc, "Unsupported chip revision\n"); return (-1); } return (0); } static void bxe_release_firmware(struct bxe_softc *sc) { /* Do nothing */ return; } static int ecore_gunzip(struct bxe_softc *sc, const uint8_t *zbuf, int len) { /* XXX : Implement... */ BLOGD(sc, DBG_LOAD, "ECORE_GUNZIP NOT IMPLEMENTED\n"); return (FALSE); } static void ecore_reg_wr_ind(struct bxe_softc *sc, uint32_t addr, uint32_t val) { bxe_reg_wr_ind(sc, addr, val); } static void ecore_write_dmae_phys_len(struct bxe_softc *sc, bus_addr_t phys_addr, uint32_t addr, uint32_t len) { bxe_write_dmae_phys_len(sc, phys_addr, addr, len); } void ecore_storm_memset_struct(struct bxe_softc *sc, uint32_t addr, size_t size, uint32_t *data) { uint8_t i; for (i = 0; i < size/4; i++) { REG_WR(sc, addr + (i * 4), data[i]); } } /* * character device - ioctl interface definitions */ #include "bxe_dump.h" #include "bxe_ioctl.h" #include static int bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td); static struct cdevsw bxe_cdevsw = { .d_version = D_VERSION, .d_ioctl = bxe_eioctl, .d_name = "bxecnic", }; #define BXE_PATH(sc) (CHIP_IS_E1x(sc) ? 0 : (sc->pcie_func & 1)) #define DUMP_ALL_PRESETS 0x1FFF #define DUMP_MAX_PRESETS 13 #define IS_E1_REG(chips) ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1) #define IS_E1H_REG(chips) ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H) #define IS_E2_REG(chips) ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2) #define IS_E3A0_REG(chips) ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0) #define IS_E3B0_REG(chips) ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0) #define IS_REG_IN_PRESET(presets, idx) \ ((presets & (1 << (idx-1))) == (1 << (idx-1))) static int bxe_get_preset_regs_len(struct bxe_softc *sc, uint32_t preset) { if (CHIP_IS_E1(sc)) return dump_num_registers[0][preset-1]; else if (CHIP_IS_E1H(sc)) return dump_num_registers[1][preset-1]; else if (CHIP_IS_E2(sc)) return dump_num_registers[2][preset-1]; else if (CHIP_IS_E3A0(sc)) return dump_num_registers[3][preset-1]; else if (CHIP_IS_E3B0(sc)) return dump_num_registers[4][preset-1]; else return 0; } static int bxe_get_total_regs_len32(struct bxe_softc *sc) { uint32_t preset_idx; int regdump_len32 = 0; /* Calculate the total preset regs length */ for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) { regdump_len32 += bxe_get_preset_regs_len(sc, preset_idx); } return regdump_len32; } static const uint32_t * __bxe_get_page_addr_ar(struct bxe_softc *sc) { if (CHIP_IS_E2(sc)) return page_vals_e2; else if (CHIP_IS_E3(sc)) return page_vals_e3; else return NULL; } static uint32_t __bxe_get_page_reg_num(struct bxe_softc *sc) { if (CHIP_IS_E2(sc)) return PAGE_MODE_VALUES_E2; else if (CHIP_IS_E3(sc)) return PAGE_MODE_VALUES_E3; else return 0; } static const uint32_t * __bxe_get_page_write_ar(struct bxe_softc *sc) { if (CHIP_IS_E2(sc)) return page_write_regs_e2; else if (CHIP_IS_E3(sc)) return page_write_regs_e3; else return NULL; } static uint32_t __bxe_get_page_write_num(struct bxe_softc *sc) { if (CHIP_IS_E2(sc)) return PAGE_WRITE_REGS_E2; else if (CHIP_IS_E3(sc)) return PAGE_WRITE_REGS_E3; else return 0; } static const struct reg_addr * __bxe_get_page_read_ar(struct bxe_softc *sc) { if (CHIP_IS_E2(sc)) return page_read_regs_e2; else if (CHIP_IS_E3(sc)) return page_read_regs_e3; else return NULL; } static uint32_t __bxe_get_page_read_num(struct bxe_softc *sc) { if (CHIP_IS_E2(sc)) return PAGE_READ_REGS_E2; else if (CHIP_IS_E3(sc)) return PAGE_READ_REGS_E3; else return 0; } static bool bxe_is_reg_in_chip(struct bxe_softc *sc, const struct reg_addr *reg_info) { if (CHIP_IS_E1(sc)) return IS_E1_REG(reg_info->chips); else if (CHIP_IS_E1H(sc)) return IS_E1H_REG(reg_info->chips); else if (CHIP_IS_E2(sc)) return IS_E2_REG(reg_info->chips); else if (CHIP_IS_E3A0(sc)) return IS_E3A0_REG(reg_info->chips); else if (CHIP_IS_E3B0(sc)) return IS_E3B0_REG(reg_info->chips); else return 0; } static bool bxe_is_wreg_in_chip(struct bxe_softc *sc, const struct wreg_addr *wreg_info) { if (CHIP_IS_E1(sc)) return IS_E1_REG(wreg_info->chips); else if (CHIP_IS_E1H(sc)) return IS_E1H_REG(wreg_info->chips); else if (CHIP_IS_E2(sc)) return IS_E2_REG(wreg_info->chips); else if (CHIP_IS_E3A0(sc)) return IS_E3A0_REG(wreg_info->chips); else if (CHIP_IS_E3B0(sc)) return IS_E3B0_REG(wreg_info->chips); else return 0; } /** * bxe_read_pages_regs - read "paged" registers * * @bp device handle * @p output buffer * * Reads "paged" memories: memories that may only be read by first writing to a * specific address ("write address") and then reading from a specific address * ("read address"). There may be more than one write address per "page" and * more than one read address per write address. */ static void bxe_read_pages_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset) { uint32_t i, j, k, n; /* addresses of the paged registers */ const uint32_t *page_addr = __bxe_get_page_addr_ar(sc); /* number of paged registers */ int num_pages = __bxe_get_page_reg_num(sc); /* write addresses */ const uint32_t *write_addr = __bxe_get_page_write_ar(sc); /* number of write addresses */ int write_num = __bxe_get_page_write_num(sc); /* read addresses info */ const struct reg_addr *read_addr = __bxe_get_page_read_ar(sc); /* number of read addresses */ int read_num = __bxe_get_page_read_num(sc); uint32_t addr, size; for (i = 0; i < num_pages; i++) { for (j = 0; j < write_num; j++) { REG_WR(sc, write_addr[j], page_addr[i]); for (k = 0; k < read_num; k++) { if (IS_REG_IN_PRESET(read_addr[k].presets, preset)) { size = read_addr[k].size; for (n = 0; n < size; n++) { addr = read_addr[k].addr + n*4; *p++ = REG_RD(sc, addr); } } } } } return; } static int bxe_get_preset_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset) { uint32_t i, j, addr; const struct wreg_addr *wreg_addr_p = NULL; if (CHIP_IS_E1(sc)) wreg_addr_p = &wreg_addr_e1; else if (CHIP_IS_E1H(sc)) wreg_addr_p = &wreg_addr_e1h; else if (CHIP_IS_E2(sc)) wreg_addr_p = &wreg_addr_e2; else if (CHIP_IS_E3A0(sc)) wreg_addr_p = &wreg_addr_e3; else if (CHIP_IS_E3B0(sc)) wreg_addr_p = &wreg_addr_e3b0; else return (-1); /* Read the idle_chk registers */ for (i = 0; i < IDLE_REGS_COUNT; i++) { if (bxe_is_reg_in_chip(sc, &idle_reg_addrs[i]) && IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) { for (j = 0; j < idle_reg_addrs[i].size; j++) *p++ = REG_RD(sc, idle_reg_addrs[i].addr + j*4); } } /* Read the regular registers */ for (i = 0; i < REGS_COUNT; i++) { if (bxe_is_reg_in_chip(sc, ®_addrs[i]) && IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) { for (j = 0; j < reg_addrs[i].size; j++) *p++ = REG_RD(sc, reg_addrs[i].addr + j*4); } } /* Read the CAM registers */ if (bxe_is_wreg_in_chip(sc, wreg_addr_p) && IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) { for (i = 0; i < wreg_addr_p->size; i++) { *p++ = REG_RD(sc, wreg_addr_p->addr + i*4); /* In case of wreg_addr register, read additional registers from read_regs array */ for (j = 0; j < wreg_addr_p->read_regs_count; j++) { addr = *(wreg_addr_p->read_regs); *p++ = REG_RD(sc, addr + j*4); } } } /* Paged registers are supported in E2 & E3 only */ if (CHIP_IS_E2(sc) || CHIP_IS_E3(sc)) { /* Read "paged" registers */ bxe_read_pages_regs(sc, p, preset); } return 0; } int bxe_grc_dump(struct bxe_softc *sc) { int rval = 0; uint32_t preset_idx; uint8_t *buf; uint32_t size; struct dump_header *d_hdr; uint32_t i; uint32_t reg_val; uint32_t reg_addr; uint32_t cmd_offset; struct ecore_ilt *ilt = SC_ILT(sc); struct bxe_fastpath *fp; struct ilt_client_info *ilt_cli; int grc_dump_size; if (sc->grcdump_done || sc->grcdump_started) return (rval); sc->grcdump_started = 1; BLOGI(sc, "Started collecting grcdump\n"); grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) + sizeof(struct dump_header); sc->grc_dump = malloc(grc_dump_size, M_DEVBUF, M_NOWAIT); if (sc->grc_dump == NULL) { BLOGW(sc, "Unable to allocate memory for grcdump collection\n"); return(ENOMEM); } /* Disable parity attentions as long as following dump may * cause false alarms by reading never written registers. We * will re-enable parity attentions right after the dump. */ /* Disable parity on path 0 */ bxe_pretend_func(sc, 0); ecore_disable_blocks_parity(sc); /* Disable parity on path 1 */ bxe_pretend_func(sc, 1); ecore_disable_blocks_parity(sc); /* Return to current function */ bxe_pretend_func(sc, SC_ABS_FUNC(sc)); buf = sc->grc_dump; d_hdr = sc->grc_dump; d_hdr->header_size = (sizeof(struct dump_header) >> 2) - 1; d_hdr->version = BNX2X_DUMP_VERSION; d_hdr->preset = DUMP_ALL_PRESETS; if (CHIP_IS_E1(sc)) { d_hdr->dump_meta_data = DUMP_CHIP_E1; } else if (CHIP_IS_E1H(sc)) { d_hdr->dump_meta_data = DUMP_CHIP_E1H; } else if (CHIP_IS_E2(sc)) { d_hdr->dump_meta_data = DUMP_CHIP_E2 | (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0); } else if (CHIP_IS_E3A0(sc)) { d_hdr->dump_meta_data = DUMP_CHIP_E3A0 | (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0); } else if (CHIP_IS_E3B0(sc)) { d_hdr->dump_meta_data = DUMP_CHIP_E3B0 | (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0); } buf += sizeof(struct dump_header); for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) { /* Skip presets with IOR */ if ((preset_idx == 2) || (preset_idx == 5) || (preset_idx == 8) || (preset_idx == 11)) continue; rval = bxe_get_preset_regs(sc, (uint32_t *)buf, preset_idx); if (rval) break; size = bxe_get_preset_regs_len(sc, preset_idx) * (sizeof (uint32_t)); buf += size; } bxe_pretend_func(sc, 0); ecore_clear_blocks_parity(sc); ecore_enable_blocks_parity(sc); bxe_pretend_func(sc, 1); ecore_clear_blocks_parity(sc); ecore_enable_blocks_parity(sc); /* Return to current function */ bxe_pretend_func(sc, SC_ABS_FUNC(sc)); if(sc->state == BXE_STATE_OPEN) { if(sc->fw_stats_req != NULL) { BLOGI(sc, "fw stats start_paddr %#jx end_paddr %#jx vaddr %p size 0x%x\n", (uintmax_t)sc->fw_stats_req_mapping, (uintmax_t)sc->fw_stats_data_mapping, sc->fw_stats_req, (sc->fw_stats_req_size + sc->fw_stats_data_size)); } if(sc->def_sb != NULL) { BLOGI(sc, "def_status_block paddr %p vaddr %p size 0x%zx\n", (void *)sc->def_sb_dma.paddr, sc->def_sb, sizeof(struct host_sp_status_block)); } if(sc->eq_dma.vaddr != NULL) { BLOGI(sc, "event_queue paddr %#jx vaddr %p size 0x%x\n", (uintmax_t)sc->eq_dma.paddr, sc->eq_dma.vaddr, BCM_PAGE_SIZE); } if(sc->sp_dma.vaddr != NULL) { BLOGI(sc, "slow path paddr %#jx vaddr %p size 0x%zx\n", (uintmax_t)sc->sp_dma.paddr, sc->sp_dma.vaddr, sizeof(struct bxe_slowpath)); } if(sc->spq_dma.vaddr != NULL) { BLOGI(sc, "slow path queue paddr %#jx vaddr %p size 0x%x\n", (uintmax_t)sc->spq_dma.paddr, sc->spq_dma.vaddr, BCM_PAGE_SIZE); } if(sc->gz_buf_dma.vaddr != NULL) { BLOGI(sc, "fw_buf paddr %#jx vaddr %p size 0x%x\n", (uintmax_t)sc->gz_buf_dma.paddr, sc->gz_buf_dma.vaddr, FW_BUF_SIZE); } for (i = 0; i < sc->num_queues; i++) { fp = &sc->fp[i]; if(fp->sb_dma.vaddr != NULL && fp->tx_dma.vaddr != NULL && fp->rx_dma.vaddr != NULL && fp->rcq_dma.vaddr != NULL && fp->rx_sge_dma.vaddr != NULL) { BLOGI(sc, "FP status block fp %d paddr %#jx vaddr %p size 0x%zx\n", i, (uintmax_t)fp->sb_dma.paddr, fp->sb_dma.vaddr, sizeof(union bxe_host_hc_status_block)); BLOGI(sc, "TX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i, (uintmax_t)fp->tx_dma.paddr, fp->tx_dma.vaddr, (BCM_PAGE_SIZE * TX_BD_NUM_PAGES)); BLOGI(sc, "RX BD CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i, (uintmax_t)fp->rx_dma.paddr, fp->rx_dma.vaddr, (BCM_PAGE_SIZE * RX_BD_NUM_PAGES)); BLOGI(sc, "RX RCQ CHAIN fp %d paddr %#jx vaddr %p size 0x%zx\n", i, (uintmax_t)fp->rcq_dma.paddr, fp->rcq_dma.vaddr, (BCM_PAGE_SIZE * RCQ_NUM_PAGES)); BLOGI(sc, "RX SGE CHAIN fp %d paddr %#jx vaddr %p size 0x%x\n", i, (uintmax_t)fp->rx_sge_dma.paddr, fp->rx_sge_dma.vaddr, (BCM_PAGE_SIZE * RX_SGE_NUM_PAGES)); } } if(ilt != NULL ) { ilt_cli = &ilt->clients[1]; if(ilt->lines != NULL) { for (i = ilt_cli->start; i <= ilt_cli->end; i++) { BLOGI(sc, "ECORE_ILT paddr %#jx vaddr %p size 0x%x\n", (uintmax_t)(((struct bxe_dma *)((&ilt->lines[i])->page))->paddr), ((struct bxe_dma *)((&ilt->lines[i])->page))->vaddr, BCM_PAGE_SIZE); } } } cmd_offset = DMAE_REG_CMD_MEM; for (i = 0; i < 224; i++) { reg_addr = (cmd_offset +(i * 4)); reg_val = REG_RD(sc, reg_addr); BLOGI(sc, "DMAE_REG_CMD_MEM i=%d reg_addr 0x%x reg_val 0x%08x\n",i, reg_addr, reg_val); } } BLOGI(sc, "Collection of grcdump done\n"); sc->grcdump_done = 1; return(rval); } static int bxe_add_cdev(struct bxe_softc *sc) { sc->eeprom = malloc(BXE_EEPROM_MAX_DATA_LEN, M_DEVBUF, M_NOWAIT); if (sc->eeprom == NULL) { BLOGW(sc, "Unable to alloc for eeprom size buffer\n"); return (-1); } sc->ioctl_dev = make_dev(&bxe_cdevsw, sc->ifp->if_dunit, UID_ROOT, GID_WHEEL, 0600, "%s", if_name(sc->ifp)); if (sc->ioctl_dev == NULL) { free(sc->eeprom, M_DEVBUF); sc->eeprom = NULL; return (-1); } sc->ioctl_dev->si_drv1 = sc; return (0); } static void bxe_del_cdev(struct bxe_softc *sc) { if (sc->ioctl_dev != NULL) destroy_dev(sc->ioctl_dev); if (sc->eeprom != NULL) { free(sc->eeprom, M_DEVBUF); sc->eeprom = NULL; } sc->ioctl_dev = NULL; return; } static bool bxe_is_nvram_accessible(struct bxe_softc *sc) { if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0) return FALSE; return TRUE; } static int bxe_wr_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len) { int rval = 0; if(!bxe_is_nvram_accessible(sc)) { BLOGW(sc, "Cannot access eeprom when interface is down\n"); return (-EAGAIN); } rval = bxe_nvram_write(sc, offset, (uint8_t *)data, len); return (rval); } static int bxe_rd_eeprom(struct bxe_softc *sc, void *data, uint32_t offset, uint32_t len) { int rval = 0; if(!bxe_is_nvram_accessible(sc)) { BLOGW(sc, "Cannot access eeprom when interface is down\n"); return (-EAGAIN); } rval = bxe_nvram_read(sc, offset, (uint8_t *)data, len); return (rval); } static int bxe_eeprom_rd_wr(struct bxe_softc *sc, bxe_eeprom_t *eeprom) { int rval = 0; switch (eeprom->eeprom_cmd) { case BXE_EEPROM_CMD_SET_EEPROM: rval = copyin(eeprom->eeprom_data, sc->eeprom, eeprom->eeprom_data_len); if (rval) break; rval = bxe_wr_eeprom(sc, sc->eeprom, eeprom->eeprom_offset, eeprom->eeprom_data_len); break; case BXE_EEPROM_CMD_GET_EEPROM: rval = bxe_rd_eeprom(sc, sc->eeprom, eeprom->eeprom_offset, eeprom->eeprom_data_len); if (rval) { break; } rval = copyout(sc->eeprom, eeprom->eeprom_data, eeprom->eeprom_data_len); break; default: rval = EINVAL; break; } if (rval) { BLOGW(sc, "ioctl cmd %d failed rval %d\n", eeprom->eeprom_cmd, rval); } return (rval); } static int bxe_get_settings(struct bxe_softc *sc, bxe_dev_setting_t *dev_p) { uint32_t ext_phy_config; int port = SC_PORT(sc); int cfg_idx = bxe_get_link_cfg_idx(sc); dev_p->supported = sc->port.supported[cfg_idx] | (sc->port.supported[cfg_idx ^ 1] & (ELINK_SUPPORTED_TP | ELINK_SUPPORTED_FIBRE)); dev_p->advertising = sc->port.advertising[cfg_idx]; if(sc->link_params.phy[bxe_get_cur_phy_idx(sc)].media_type == ELINK_ETH_PHY_SFP_1G_FIBER) { dev_p->supported = ~(ELINK_SUPPORTED_10000baseT_Full); dev_p->advertising &= ~(ADVERTISED_10000baseT_Full); } if ((sc->state == BXE_STATE_OPEN) && sc->link_vars.link_up && !(sc->flags & BXE_MF_FUNC_DIS)) { dev_p->duplex = sc->link_vars.duplex; if (IS_MF(sc) && !BXE_NOMCP(sc)) dev_p->speed = bxe_get_mf_speed(sc); else dev_p->speed = sc->link_vars.line_speed; } else { dev_p->duplex = DUPLEX_UNKNOWN; dev_p->speed = SPEED_UNKNOWN; } dev_p->port = bxe_media_detect(sc); ext_phy_config = SHMEM_RD(sc, dev_info.port_hw_config[port].external_phy_config); if((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) dev_p->phy_address = sc->port.phy_addr; else if(((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) && ((ext_phy_config & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK) != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) dev_p->phy_address = ELINK_XGXS_EXT_PHY_ADDR(ext_phy_config); else dev_p->phy_address = 0; if(sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG) dev_p->autoneg = AUTONEG_ENABLE; else dev_p->autoneg = AUTONEG_DISABLE; return 0; } static int bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { struct bxe_softc *sc; int rval = 0; device_t pci_dev; bxe_grcdump_t *dump = NULL; int grc_dump_size; bxe_drvinfo_t *drv_infop = NULL; bxe_dev_setting_t *dev_p; bxe_dev_setting_t dev_set; bxe_get_regs_t *reg_p; bxe_reg_rdw_t *reg_rdw_p; bxe_pcicfg_rdw_t *cfg_rdw_p; bxe_perm_mac_addr_t *mac_addr_p; if ((sc = (struct bxe_softc *)dev->si_drv1) == NULL) return ENXIO; pci_dev= sc->dev; dump = (bxe_grcdump_t *)data; switch(cmd) { case BXE_GRC_DUMP_SIZE: dump->pci_func = sc->pcie_func; dump->grcdump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) + sizeof(struct dump_header); break; case BXE_GRC_DUMP: grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) + sizeof(struct dump_header); if ((!sc->trigger_grcdump) || (dump->grcdump == NULL) || (dump->grcdump_size < grc_dump_size)) { rval = EINVAL; break; } if((sc->trigger_grcdump) && (!sc->grcdump_done) && (!sc->grcdump_started)) { rval = bxe_grc_dump(sc); } if((!rval) && (sc->grcdump_done) && (sc->grcdump_started) && (sc->grc_dump != NULL)) { dump->grcdump_dwords = grc_dump_size >> 2; rval = copyout(sc->grc_dump, dump->grcdump, grc_dump_size); free(sc->grc_dump, M_DEVBUF); sc->grc_dump = NULL; sc->grcdump_started = 0; sc->grcdump_done = 0; } break; case BXE_DRV_INFO: drv_infop = (bxe_drvinfo_t *)data; snprintf(drv_infop->drv_name, BXE_DRV_NAME_LENGTH, "%s", "bxe"); snprintf(drv_infop->drv_version, BXE_DRV_VERSION_LENGTH, "v:%s", BXE_DRIVER_VERSION); snprintf(drv_infop->mfw_version, BXE_MFW_VERSION_LENGTH, "%s", sc->devinfo.bc_ver_str); snprintf(drv_infop->stormfw_version, BXE_STORMFW_VERSION_LENGTH, "%s", sc->fw_ver_str); drv_infop->eeprom_dump_len = sc->devinfo.flash_size; drv_infop->reg_dump_len = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) + sizeof(struct dump_header); snprintf(drv_infop->bus_info, BXE_BUS_INFO_LENGTH, "%d:%d:%d", sc->pcie_bus, sc->pcie_device, sc->pcie_func); break; case BXE_DEV_SETTING: dev_p = (bxe_dev_setting_t *)data; bxe_get_settings(sc, &dev_set); dev_p->supported = dev_set.supported; dev_p->advertising = dev_set.advertising; dev_p->speed = dev_set.speed; dev_p->duplex = dev_set.duplex; dev_p->port = dev_set.port; dev_p->phy_address = dev_set.phy_address; dev_p->autoneg = dev_set.autoneg; break; case BXE_GET_REGS: reg_p = (bxe_get_regs_t *)data; grc_dump_size = reg_p->reg_buf_len; if((!sc->grcdump_done) && (!sc->grcdump_started)) { bxe_grc_dump(sc); } if((sc->grcdump_done) && (sc->grcdump_started) && (sc->grc_dump != NULL)) { rval = copyout(sc->grc_dump, reg_p->reg_buf, grc_dump_size); free(sc->grc_dump, M_DEVBUF); sc->grc_dump = NULL; sc->grcdump_started = 0; sc->grcdump_done = 0; } break; case BXE_RDW_REG: reg_rdw_p = (bxe_reg_rdw_t *)data; if((reg_rdw_p->reg_cmd == BXE_READ_REG_CMD) && (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT)) reg_rdw_p->reg_val = REG_RD(sc, reg_rdw_p->reg_id); if((reg_rdw_p->reg_cmd == BXE_WRITE_REG_CMD) && (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT)) REG_WR(sc, reg_rdw_p->reg_id, reg_rdw_p->reg_val); break; case BXE_RDW_PCICFG: cfg_rdw_p = (bxe_pcicfg_rdw_t *)data; if(cfg_rdw_p->cfg_cmd == BXE_READ_PCICFG) { cfg_rdw_p->cfg_val = pci_read_config(sc->dev, cfg_rdw_p->cfg_id, cfg_rdw_p->cfg_width); } else if(cfg_rdw_p->cfg_cmd == BXE_WRITE_PCICFG) { pci_write_config(sc->dev, cfg_rdw_p->cfg_id, cfg_rdw_p->cfg_val, cfg_rdw_p->cfg_width); } else { BLOGW(sc, "BXE_RDW_PCICFG ioctl wrong cmd passed\n"); } break; case BXE_MAC_ADDR: mac_addr_p = (bxe_perm_mac_addr_t *)data; snprintf(mac_addr_p->mac_addr_str, sizeof(sc->mac_addr_str), "%s", sc->mac_addr_str); break; case BXE_EEPROM: rval = bxe_eeprom_rd_wr(sc, (bxe_eeprom_t *)data); break; default: break; } return (rval); } #ifdef DEBUGNET static void bxe_debugnet_init(struct ifnet *ifp, int *nrxr, int *ncl, int *clsize) { struct bxe_softc *sc; sc = if_getsoftc(ifp); BXE_CORE_LOCK(sc); *nrxr = sc->num_queues; *ncl = DEBUGNET_MAX_IN_FLIGHT; *clsize = sc->fp[0].mbuf_alloc_size; BXE_CORE_UNLOCK(sc); } static void bxe_debugnet_event(struct ifnet *ifp __unused, enum debugnet_ev event __unused) { } static int bxe_debugnet_transmit(struct ifnet *ifp, struct mbuf *m) { struct bxe_softc *sc; int error; sc = if_getsoftc(ifp); if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING || !sc->link_vars.link_up) return (ENOENT); error = bxe_tx_encap(&sc->fp[0], &m); if (error != 0 && m != NULL) m_freem(m); return (error); } static int bxe_debugnet_poll(struct ifnet *ifp, int count) { struct bxe_softc *sc; int i; sc = if_getsoftc(ifp); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 || !sc->link_vars.link_up) return (ENOENT); for (i = 0; i < sc->num_queues; i++) (void)bxe_rxeof(sc, &sc->fp[i]); (void)bxe_txeof(sc, &sc->fp[0]); return (0); } #endif /* DEBUGNET */ diff --git a/sys/dev/cas/if_cas.c b/sys/dev/cas/if_cas.c index 1a20596f2923..653448eea405 100644 --- a/sys/dev/cas/if_cas.c +++ b/sys/dev/cas/if_cas.c @@ -1,2917 +1,2920 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2001 Eduardo Horvath. * Copyright (c) 2001-2003 Thomas Moestl * Copyright (c) 2007-2009 Marius Strobl * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp * from: FreeBSD: if_gem.c 182060 2008-08-23 15:03:26Z marius */ #include __FBSDID("$FreeBSD$"); /* * driver for Sun Cassini/Cassini+ and National Semiconductor DP83065 * Saturn Gigabit Ethernet controllers */ #if 0 #define CAS_DEBUG #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__powerpc__) #include #include #include #endif #include #include #include #include #include #include #include #include "miibus_if.h" #define RINGASSERT(n , min, max) \ CTASSERT(powerof2(n) && (n) >= (min) && (n) <= (max)) RINGASSERT(CAS_NRXCOMP, 128, 32768); RINGASSERT(CAS_NRXDESC, 32, 8192); RINGASSERT(CAS_NRXDESC2, 32, 8192); RINGASSERT(CAS_NTXDESC, 32, 8192); #undef RINGASSERT #define CCDASSERT(m, a) \ CTASSERT((offsetof(struct cas_control_data, m) & ((a) - 1)) == 0) CCDASSERT(ccd_rxcomps, CAS_RX_COMP_ALIGN); CCDASSERT(ccd_rxdescs, CAS_RX_DESC_ALIGN); CCDASSERT(ccd_rxdescs2, CAS_RX_DESC_ALIGN); #undef CCDASSERT #define CAS_TRIES 10000 /* * According to documentation, the hardware has support for basic TCP * checksum offloading only, in practice this can be also used for UDP * however (i.e. the problem of previous Sun NICs that a checksum of 0x0 * is not converted to 0xffff no longer exists). */ #define CAS_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) static inline void cas_add_rxdesc(struct cas_softc *sc, u_int idx); static int cas_attach(struct cas_softc *sc); static int cas_bitwait(struct cas_softc *sc, bus_addr_t r, uint32_t clr, uint32_t set); static void cas_cddma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error); static void cas_detach(struct cas_softc *sc); static int cas_disable_rx(struct cas_softc *sc); static int cas_disable_tx(struct cas_softc *sc); static void cas_eint(struct cas_softc *sc, u_int status); static void cas_free(struct mbuf *m); static void cas_init(void *xsc); static void cas_init_locked(struct cas_softc *sc); static void cas_init_regs(struct cas_softc *sc); static int cas_intr(void *v); static void cas_intr_task(void *arg, int pending __unused); static int cas_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); static int cas_load_txmbuf(struct cas_softc *sc, struct mbuf **m_head); static int cas_mediachange(struct ifnet *ifp); static void cas_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr); static void cas_meminit(struct cas_softc *sc); static void cas_mifinit(struct cas_softc *sc); static int cas_mii_readreg(device_t dev, int phy, int reg); static void cas_mii_statchg(device_t dev); static int cas_mii_writereg(device_t dev, int phy, int reg, int val); static void cas_reset(struct cas_softc *sc); static int cas_reset_rx(struct cas_softc *sc); static int cas_reset_tx(struct cas_softc *sc); static void cas_resume(struct cas_softc *sc); static u_int cas_descsize(u_int sz); static void cas_rint(struct cas_softc *sc); static void cas_rint_timeout(void *arg); static inline void cas_rxcksum(struct mbuf *m, uint16_t cksum); static inline void cas_rxcompinit(struct cas_rx_comp *rxcomp); static u_int cas_rxcompsize(u_int sz); static void cas_rxdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error); static void cas_setladrf(struct cas_softc *sc); static void cas_start(struct ifnet *ifp); static void cas_stop(struct ifnet *ifp); static void cas_suspend(struct cas_softc *sc); static void cas_tick(void *arg); static void cas_tint(struct cas_softc *sc); static void cas_tx_task(void *arg, int pending __unused); static inline void cas_txkick(struct cas_softc *sc); static void cas_watchdog(struct cas_softc *sc); static devclass_t cas_devclass; MODULE_DEPEND(cas, ether, 1, 1, 1); MODULE_DEPEND(cas, miibus, 1, 1, 1); #ifdef CAS_DEBUG #include #define KTR_CAS KTR_SPARE2 #endif static int cas_attach(struct cas_softc *sc) { struct cas_txsoft *txs; struct ifnet *ifp; int error, i; uint32_t v; /* Set up ifnet structure. */ ifp = sc->sc_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) return (ENOSPC); ifp->if_softc = sc; if_initname(ifp, device_get_name(sc->sc_dev), device_get_unit(sc->sc_dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_start = cas_start; ifp->if_ioctl = cas_ioctl; ifp->if_init = cas_init; IFQ_SET_MAXLEN(&ifp->if_snd, CAS_TXQUEUELEN); ifp->if_snd.ifq_drv_maxlen = CAS_TXQUEUELEN; IFQ_SET_READY(&ifp->if_snd); callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0); callout_init_mtx(&sc->sc_rx_ch, &sc->sc_mtx, 0); /* Create local taskq. */ - TASK_INIT(&sc->sc_intr_task, 0, cas_intr_task, sc); + NET_TASK_INIT(&sc->sc_intr_task, 0, cas_intr_task, sc); TASK_INIT(&sc->sc_tx_task, 1, cas_tx_task, ifp); sc->sc_tq = taskqueue_create_fast("cas_taskq", M_WAITOK, taskqueue_thread_enqueue, &sc->sc_tq); if (sc->sc_tq == NULL) { device_printf(sc->sc_dev, "could not create taskqueue\n"); error = ENXIO; goto fail_ifnet; } error = taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->sc_dev)); if (error != 0) { device_printf(sc->sc_dev, "could not start threads\n"); goto fail_taskq; } /* Make sure the chip is stopped. */ cas_reset(sc); error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, 0, BUS_SPACE_MAXSIZE, 0, NULL, NULL, &sc->sc_pdmatag); if (error != 0) goto fail_taskq; error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, CAS_PAGE_SIZE, 1, CAS_PAGE_SIZE, 0, NULL, NULL, &sc->sc_rdmatag); if (error != 0) goto fail_ptag; error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * CAS_NTXSEGS, CAS_NTXSEGS, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag); if (error != 0) goto fail_rtag; error = bus_dma_tag_create(sc->sc_pdmatag, CAS_TX_DESC_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct cas_control_data), 1, sizeof(struct cas_control_data), 0, NULL, NULL, &sc->sc_cdmatag); if (error != 0) goto fail_ttag; /* * Allocate the control data structures, create and load the * DMA map for it. */ if ((error = bus_dmamem_alloc(sc->sc_cdmatag, (void **)&sc->sc_control_data, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sc_cddmamap)) != 0) { device_printf(sc->sc_dev, "unable to allocate control data, error = %d\n", error); goto fail_ctag; } sc->sc_cddma = 0; if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap, sc->sc_control_data, sizeof(struct cas_control_data), cas_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) { device_printf(sc->sc_dev, "unable to load control data DMA map, error = %d\n", error); goto fail_cmem; } /* * Initialize the transmit job descriptors. */ STAILQ_INIT(&sc->sc_txfreeq); STAILQ_INIT(&sc->sc_txdirtyq); /* * Create the transmit buffer DMA maps. */ error = ENOMEM; for (i = 0; i < CAS_TXQUEUELEN; i++) { txs = &sc->sc_txsoft[i]; txs->txs_mbuf = NULL; txs->txs_ndescs = 0; if ((error = bus_dmamap_create(sc->sc_tdmatag, 0, &txs->txs_dmamap)) != 0) { device_printf(sc->sc_dev, "unable to create TX DMA map %d, error = %d\n", i, error); goto fail_txd; } STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); } /* * Allocate the receive buffers, create and load the DMA maps * for them. */ for (i = 0; i < CAS_NRXDESC; i++) { if ((error = bus_dmamem_alloc(sc->sc_rdmatag, &sc->sc_rxdsoft[i].rxds_buf, BUS_DMA_WAITOK, &sc->sc_rxdsoft[i].rxds_dmamap)) != 0) { device_printf(sc->sc_dev, "unable to allocate RX buffer %d, error = %d\n", i, error); goto fail_rxmem; } sc->sc_rxdptr = i; sc->sc_rxdsoft[i].rxds_paddr = 0; if ((error = bus_dmamap_load(sc->sc_rdmatag, sc->sc_rxdsoft[i].rxds_dmamap, sc->sc_rxdsoft[i].rxds_buf, CAS_PAGE_SIZE, cas_rxdma_callback, sc, 0)) != 0 || sc->sc_rxdsoft[i].rxds_paddr == 0) { device_printf(sc->sc_dev, "unable to load RX DMA map %d, error = %d\n", i, error); goto fail_rxmap; } } if ((sc->sc_flags & CAS_SERDES) == 0) { CAS_WRITE_4(sc, CAS_PCS_DATAPATH, CAS_PCS_DATAPATH_MII); CAS_BARRIER(sc, CAS_PCS_DATAPATH, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); cas_mifinit(sc); /* * Look for an external PHY. */ error = ENXIO; v = CAS_READ_4(sc, CAS_MIF_CONF); if ((v & CAS_MIF_CONF_MDI1) != 0) { v |= CAS_MIF_CONF_PHY_SELECT; CAS_WRITE_4(sc, CAS_MIF_CONF, v); CAS_BARRIER(sc, CAS_MIF_CONF, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); /* Enable/unfreeze the GMII pins of Saturn. */ if (sc->sc_variant == CAS_SATURN) { CAS_WRITE_4(sc, CAS_SATURN_PCFG, CAS_READ_4(sc, CAS_SATURN_PCFG) & ~CAS_SATURN_PCFG_FSI); CAS_BARRIER(sc, CAS_SATURN_PCFG, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); DELAY(10000); } error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, cas_mediachange, cas_mediastatus, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, MIIF_DOPAUSE); } /* * Fall back on an internal PHY if no external PHY was found. */ if (error != 0 && (v & CAS_MIF_CONF_MDI0) != 0) { v &= ~CAS_MIF_CONF_PHY_SELECT; CAS_WRITE_4(sc, CAS_MIF_CONF, v); CAS_BARRIER(sc, CAS_MIF_CONF, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); /* Freeze the GMII pins of Saturn for saving power. */ if (sc->sc_variant == CAS_SATURN) { CAS_WRITE_4(sc, CAS_SATURN_PCFG, CAS_READ_4(sc, CAS_SATURN_PCFG) | CAS_SATURN_PCFG_FSI); CAS_BARRIER(sc, CAS_SATURN_PCFG, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); DELAY(10000); } error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, cas_mediachange, cas_mediastatus, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, MIIF_DOPAUSE); } } else { /* * Use the external PCS SERDES. */ CAS_WRITE_4(sc, CAS_PCS_DATAPATH, CAS_PCS_DATAPATH_SERDES); CAS_BARRIER(sc, CAS_PCS_DATAPATH, 4, BUS_SPACE_BARRIER_WRITE); /* Enable/unfreeze the SERDES pins of Saturn. */ if (sc->sc_variant == CAS_SATURN) { CAS_WRITE_4(sc, CAS_SATURN_PCFG, 0); CAS_BARRIER(sc, CAS_SATURN_PCFG, 4, BUS_SPACE_BARRIER_WRITE); } CAS_WRITE_4(sc, CAS_PCS_SERDES_CTRL, CAS_PCS_SERDES_CTRL_ESD); CAS_BARRIER(sc, CAS_PCS_SERDES_CTRL, 4, BUS_SPACE_BARRIER_WRITE); CAS_WRITE_4(sc, CAS_PCS_CONF, CAS_PCS_CONF_EN); CAS_BARRIER(sc, CAS_PCS_CONF, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, cas_mediachange, cas_mediastatus, BMSR_DEFCAPMASK, CAS_PHYAD_EXTERNAL, MII_OFFSET_ANY, MIIF_DOPAUSE); } if (error != 0) { device_printf(sc->sc_dev, "attaching PHYs failed\n"); goto fail_rxmap; } sc->sc_mii = device_get_softc(sc->sc_miibus); /* * From this point forward, the attachment cannot fail. A failure * before this point releases all resources that may have been * allocated. */ /* Announce FIFO sizes. */ v = CAS_READ_4(sc, CAS_TX_FIFO_SIZE); device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n", CAS_RX_FIFO_SIZE / 1024, v / 16); /* Attach the interface. */ ether_ifattach(ifp, sc->sc_enaddr); /* * Tell the upper layer(s) we support long frames/checksum offloads. */ ifp->if_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities = IFCAP_VLAN_MTU; if ((sc->sc_flags & CAS_NO_CSUM) == 0) { ifp->if_capabilities |= IFCAP_HWCSUM; ifp->if_hwassist = CAS_CSUM_FEATURES; } ifp->if_capenable = ifp->if_capabilities; return (0); /* * Free any resources we've allocated during the failed attach * attempt. Do this in reverse order and fall through. */ fail_rxmap: for (i = 0; i < CAS_NRXDESC; i++) if (sc->sc_rxdsoft[i].rxds_paddr != 0) bus_dmamap_unload(sc->sc_rdmatag, sc->sc_rxdsoft[i].rxds_dmamap); fail_rxmem: for (i = 0; i < CAS_NRXDESC; i++) if (sc->sc_rxdsoft[i].rxds_buf != NULL) bus_dmamem_free(sc->sc_rdmatag, sc->sc_rxdsoft[i].rxds_buf, sc->sc_rxdsoft[i].rxds_dmamap); fail_txd: for (i = 0; i < CAS_TXQUEUELEN; i++) if (sc->sc_txsoft[i].txs_dmamap != NULL) bus_dmamap_destroy(sc->sc_tdmatag, sc->sc_txsoft[i].txs_dmamap); bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); fail_cmem: bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, sc->sc_cddmamap); fail_ctag: bus_dma_tag_destroy(sc->sc_cdmatag); fail_ttag: bus_dma_tag_destroy(sc->sc_tdmatag); fail_rtag: bus_dma_tag_destroy(sc->sc_rdmatag); fail_ptag: bus_dma_tag_destroy(sc->sc_pdmatag); fail_taskq: taskqueue_free(sc->sc_tq); fail_ifnet: if_free(ifp); return (error); } static void cas_detach(struct cas_softc *sc) { struct ifnet *ifp = sc->sc_ifp; int i; ether_ifdetach(ifp); CAS_LOCK(sc); cas_stop(ifp); CAS_UNLOCK(sc); callout_drain(&sc->sc_tick_ch); callout_drain(&sc->sc_rx_ch); taskqueue_drain(sc->sc_tq, &sc->sc_intr_task); taskqueue_drain(sc->sc_tq, &sc->sc_tx_task); if_free(ifp); taskqueue_free(sc->sc_tq); device_delete_child(sc->sc_dev, sc->sc_miibus); for (i = 0; i < CAS_NRXDESC; i++) if (sc->sc_rxdsoft[i].rxds_dmamap != NULL) bus_dmamap_sync(sc->sc_rdmatag, sc->sc_rxdsoft[i].rxds_dmamap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); for (i = 0; i < CAS_NRXDESC; i++) if (sc->sc_rxdsoft[i].rxds_paddr != 0) bus_dmamap_unload(sc->sc_rdmatag, sc->sc_rxdsoft[i].rxds_dmamap); for (i = 0; i < CAS_NRXDESC; i++) if (sc->sc_rxdsoft[i].rxds_buf != NULL) bus_dmamem_free(sc->sc_rdmatag, sc->sc_rxdsoft[i].rxds_buf, sc->sc_rxdsoft[i].rxds_dmamap); for (i = 0; i < CAS_TXQUEUELEN; i++) if (sc->sc_txsoft[i].txs_dmamap != NULL) bus_dmamap_destroy(sc->sc_tdmatag, sc->sc_txsoft[i].txs_dmamap); CAS_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, sc->sc_cddmamap); bus_dma_tag_destroy(sc->sc_cdmatag); bus_dma_tag_destroy(sc->sc_tdmatag); bus_dma_tag_destroy(sc->sc_rdmatag); bus_dma_tag_destroy(sc->sc_pdmatag); } static void cas_suspend(struct cas_softc *sc) { struct ifnet *ifp = sc->sc_ifp; CAS_LOCK(sc); cas_stop(ifp); CAS_UNLOCK(sc); } static void cas_resume(struct cas_softc *sc) { struct ifnet *ifp = sc->sc_ifp; CAS_LOCK(sc); /* * On resume all registers have to be initialized again like * after power-on. */ sc->sc_flags &= ~CAS_INITED; if (ifp->if_flags & IFF_UP) cas_init_locked(sc); CAS_UNLOCK(sc); } static inline void cas_rxcksum(struct mbuf *m, uint16_t cksum) { struct ether_header *eh; struct ip *ip; struct udphdr *uh; uint16_t *opts; int32_t hlen, len, pktlen; uint32_t temp32; pktlen = m->m_pkthdr.len; if (pktlen < sizeof(struct ether_header) + sizeof(struct ip)) return; eh = mtod(m, struct ether_header *); if (eh->ether_type != htons(ETHERTYPE_IP)) return; ip = (struct ip *)(eh + 1); if (ip->ip_v != IPVERSION) return; hlen = ip->ip_hl << 2; pktlen -= sizeof(struct ether_header); if (hlen < sizeof(struct ip)) return; if (ntohs(ip->ip_len) < hlen) return; if (ntohs(ip->ip_len) != pktlen) return; if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) return; /* Cannot handle fragmented packet. */ switch (ip->ip_p) { case IPPROTO_TCP: if (pktlen < (hlen + sizeof(struct tcphdr))) return; break; case IPPROTO_UDP: if (pktlen < (hlen + sizeof(struct udphdr))) return; uh = (struct udphdr *)((uint8_t *)ip + hlen); if (uh->uh_sum == 0) return; /* no checksum */ break; default: return; } cksum = ~cksum; /* checksum fixup for IP options */ len = hlen - sizeof(struct ip); if (len > 0) { opts = (uint16_t *)(ip + 1); for (; len > 0; len -= sizeof(uint16_t), opts++) { temp32 = cksum - *opts; temp32 = (temp32 >> 16) + (temp32 & 65535); cksum = temp32 & 65535; } } m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; m->m_pkthdr.csum_data = cksum; } static void cas_cddma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) { struct cas_softc *sc = xsc; if (error != 0) return; if (nsegs != 1) panic("%s: bad control buffer segment count", __func__); sc->sc_cddma = segs[0].ds_addr; } static void cas_rxdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) { struct cas_softc *sc = xsc; if (error != 0) return; if (nsegs != 1) panic("%s: bad RX buffer segment count", __func__); sc->sc_rxdsoft[sc->sc_rxdptr].rxds_paddr = segs[0].ds_addr; } static void cas_tick(void *arg) { struct cas_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; uint32_t v; CAS_LOCK_ASSERT(sc, MA_OWNED); /* * Unload collision and error counters. */ if_inc_counter(ifp, IFCOUNTER_COLLISIONS, CAS_READ_4(sc, CAS_MAC_NORM_COLL_CNT) + CAS_READ_4(sc, CAS_MAC_FIRST_COLL_CNT)); v = CAS_READ_4(sc, CAS_MAC_EXCESS_COLL_CNT) + CAS_READ_4(sc, CAS_MAC_LATE_COLL_CNT); if_inc_counter(ifp, IFCOUNTER_COLLISIONS, v); if_inc_counter(ifp, IFCOUNTER_OERRORS, v); if_inc_counter(ifp, IFCOUNTER_IERRORS, CAS_READ_4(sc, CAS_MAC_RX_LEN_ERR_CNT) + CAS_READ_4(sc, CAS_MAC_RX_ALIGN_ERR) + CAS_READ_4(sc, CAS_MAC_RX_CRC_ERR_CNT) + CAS_READ_4(sc, CAS_MAC_RX_CODE_VIOL)); /* * Then clear the hardware counters. */ CAS_WRITE_4(sc, CAS_MAC_NORM_COLL_CNT, 0); CAS_WRITE_4(sc, CAS_MAC_FIRST_COLL_CNT, 0); CAS_WRITE_4(sc, CAS_MAC_EXCESS_COLL_CNT, 0); CAS_WRITE_4(sc, CAS_MAC_LATE_COLL_CNT, 0); CAS_WRITE_4(sc, CAS_MAC_RX_LEN_ERR_CNT, 0); CAS_WRITE_4(sc, CAS_MAC_RX_ALIGN_ERR, 0); CAS_WRITE_4(sc, CAS_MAC_RX_CRC_ERR_CNT, 0); CAS_WRITE_4(sc, CAS_MAC_RX_CODE_VIOL, 0); mii_tick(sc->sc_mii); if (sc->sc_txfree != CAS_MAXTXFREE) cas_tint(sc); cas_watchdog(sc); callout_reset(&sc->sc_tick_ch, hz, cas_tick, sc); } static int cas_bitwait(struct cas_softc *sc, bus_addr_t r, uint32_t clr, uint32_t set) { int i; uint32_t reg; for (i = CAS_TRIES; i--; DELAY(100)) { reg = CAS_READ_4(sc, r); if ((reg & clr) == 0 && (reg & set) == set) return (1); } return (0); } static void cas_reset(struct cas_softc *sc) { #ifdef CAS_DEBUG CTR2(KTR_CAS, "%s: %s", device_get_name(sc->sc_dev), __func__); #endif /* Disable all interrupts in order to avoid spurious ones. */ CAS_WRITE_4(sc, CAS_INTMASK, 0xffffffff); cas_reset_rx(sc); cas_reset_tx(sc); /* * Do a full reset modulo the result of the last auto-negotiation * when using the SERDES. */ CAS_WRITE_4(sc, CAS_RESET, CAS_RESET_RX | CAS_RESET_TX | ((sc->sc_flags & CAS_SERDES) != 0 ? CAS_RESET_PCS_DIS : 0)); CAS_BARRIER(sc, CAS_RESET, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); DELAY(3000); if (!cas_bitwait(sc, CAS_RESET, CAS_RESET_RX | CAS_RESET_TX, 0)) device_printf(sc->sc_dev, "cannot reset device\n"); } static void cas_stop(struct ifnet *ifp) { struct cas_softc *sc = ifp->if_softc; struct cas_txsoft *txs; #ifdef CAS_DEBUG CTR2(KTR_CAS, "%s: %s", device_get_name(sc->sc_dev), __func__); #endif callout_stop(&sc->sc_tick_ch); callout_stop(&sc->sc_rx_ch); /* Disable all interrupts in order to avoid spurious ones. */ CAS_WRITE_4(sc, CAS_INTMASK, 0xffffffff); cas_reset_tx(sc); cas_reset_rx(sc); /* * Release any queued transmit buffers. */ while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); if (txs->txs_ndescs != 0) { bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); if (txs->txs_mbuf != NULL) { m_freem(txs->txs_mbuf); txs->txs_mbuf = NULL; } } STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); } /* * Mark the interface down and cancel the watchdog timer. */ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); sc->sc_flags &= ~CAS_LINK; sc->sc_wdog_timer = 0; } static int cas_reset_rx(struct cas_softc *sc) { /* * Resetting while DMA is in progress can cause a bus hang, so we * disable DMA first. */ (void)cas_disable_rx(sc); CAS_WRITE_4(sc, CAS_RX_CONF, 0); CAS_BARRIER(sc, CAS_RX_CONF, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); if (!cas_bitwait(sc, CAS_RX_CONF, CAS_RX_CONF_RXDMA_EN, 0)) device_printf(sc->sc_dev, "cannot disable RX DMA\n"); /* Finally, reset the ERX. */ CAS_WRITE_4(sc, CAS_RESET, CAS_RESET_RX | ((sc->sc_flags & CAS_SERDES) != 0 ? CAS_RESET_PCS_DIS : 0)); CAS_BARRIER(sc, CAS_RESET, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); if (!cas_bitwait(sc, CAS_RESET, CAS_RESET_RX, 0)) { device_printf(sc->sc_dev, "cannot reset receiver\n"); return (1); } return (0); } static int cas_reset_tx(struct cas_softc *sc) { /* * Resetting while DMA is in progress can cause a bus hang, so we * disable DMA first. */ (void)cas_disable_tx(sc); CAS_WRITE_4(sc, CAS_TX_CONF, 0); CAS_BARRIER(sc, CAS_TX_CONF, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); if (!cas_bitwait(sc, CAS_TX_CONF, CAS_TX_CONF_TXDMA_EN, 0)) device_printf(sc->sc_dev, "cannot disable TX DMA\n"); /* Finally, reset the ETX. */ CAS_WRITE_4(sc, CAS_RESET, CAS_RESET_TX | ((sc->sc_flags & CAS_SERDES) != 0 ? CAS_RESET_PCS_DIS : 0)); CAS_BARRIER(sc, CAS_RESET, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); if (!cas_bitwait(sc, CAS_RESET, CAS_RESET_TX, 0)) { device_printf(sc->sc_dev, "cannot reset transmitter\n"); return (1); } return (0); } static int cas_disable_rx(struct cas_softc *sc) { CAS_WRITE_4(sc, CAS_MAC_RX_CONF, CAS_READ_4(sc, CAS_MAC_RX_CONF) & ~CAS_MAC_RX_CONF_EN); CAS_BARRIER(sc, CAS_MAC_RX_CONF, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); if (cas_bitwait(sc, CAS_MAC_RX_CONF, CAS_MAC_RX_CONF_EN, 0)) return (1); if (bootverbose) device_printf(sc->sc_dev, "cannot disable RX MAC\n"); return (0); } static int cas_disable_tx(struct cas_softc *sc) { CAS_WRITE_4(sc, CAS_MAC_TX_CONF, CAS_READ_4(sc, CAS_MAC_TX_CONF) & ~CAS_MAC_TX_CONF_EN); CAS_BARRIER(sc, CAS_MAC_TX_CONF, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); if (cas_bitwait(sc, CAS_MAC_TX_CONF, CAS_MAC_TX_CONF_EN, 0)) return (1); if (bootverbose) device_printf(sc->sc_dev, "cannot disable TX MAC\n"); return (0); } static inline void cas_rxcompinit(struct cas_rx_comp *rxcomp) { rxcomp->crc_word1 = 0; rxcomp->crc_word2 = 0; rxcomp->crc_word3 = htole64(CAS_SET(ETHER_HDR_LEN + sizeof(struct ip), CAS_RC3_CSO)); rxcomp->crc_word4 = htole64(CAS_RC4_ZERO); } static void cas_meminit(struct cas_softc *sc) { int i; CAS_LOCK_ASSERT(sc, MA_OWNED); /* * Initialize the transmit descriptor ring. */ for (i = 0; i < CAS_NTXDESC; i++) { sc->sc_txdescs[i].cd_flags = 0; sc->sc_txdescs[i].cd_buf_ptr = 0; } sc->sc_txfree = CAS_MAXTXFREE; sc->sc_txnext = 0; sc->sc_txwin = 0; /* * Initialize the receive completion ring. */ for (i = 0; i < CAS_NRXCOMP; i++) cas_rxcompinit(&sc->sc_rxcomps[i]); sc->sc_rxcptr = 0; /* * Initialize the first receive descriptor ring. We leave * the second one zeroed as we don't actually use it. */ for (i = 0; i < CAS_NRXDESC; i++) CAS_INIT_RXDESC(sc, i, i); sc->sc_rxdptr = 0; CAS_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } static u_int cas_descsize(u_int sz) { switch (sz) { case 32: return (CAS_DESC_32); case 64: return (CAS_DESC_64); case 128: return (CAS_DESC_128); case 256: return (CAS_DESC_256); case 512: return (CAS_DESC_512); case 1024: return (CAS_DESC_1K); case 2048: return (CAS_DESC_2K); case 4096: return (CAS_DESC_4K); case 8192: return (CAS_DESC_8K); default: printf("%s: invalid descriptor ring size %d\n", __func__, sz); return (CAS_DESC_32); } } static u_int cas_rxcompsize(u_int sz) { switch (sz) { case 128: return (CAS_RX_CONF_COMP_128); case 256: return (CAS_RX_CONF_COMP_256); case 512: return (CAS_RX_CONF_COMP_512); case 1024: return (CAS_RX_CONF_COMP_1K); case 2048: return (CAS_RX_CONF_COMP_2K); case 4096: return (CAS_RX_CONF_COMP_4K); case 8192: return (CAS_RX_CONF_COMP_8K); case 16384: return (CAS_RX_CONF_COMP_16K); case 32768: return (CAS_RX_CONF_COMP_32K); default: printf("%s: invalid dcompletion ring size %d\n", __func__, sz); return (CAS_RX_CONF_COMP_128); } } static void cas_init(void *xsc) { struct cas_softc *sc = xsc; CAS_LOCK(sc); cas_init_locked(sc); CAS_UNLOCK(sc); } /* * Initialization of interface; set up initialization block * and transmit/receive descriptor rings. */ static void cas_init_locked(struct cas_softc *sc) { struct ifnet *ifp = sc->sc_ifp; uint32_t v; CAS_LOCK_ASSERT(sc, MA_OWNED); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) return; #ifdef CAS_DEBUG CTR2(KTR_CAS, "%s: %s: calling stop", device_get_name(sc->sc_dev), __func__); #endif /* * Initialization sequence. The numbered steps below correspond * to the sequence outlined in section 6.3.5.1 in the Ethernet * Channel Engine manual (part of the PCIO manual). * See also the STP2002-STQ document from Sun Microsystems. */ /* step 1 & 2. Reset the Ethernet Channel. */ cas_stop(ifp); cas_reset(sc); #ifdef CAS_DEBUG CTR2(KTR_CAS, "%s: %s: restarting", device_get_name(sc->sc_dev), __func__); #endif if ((sc->sc_flags & CAS_SERDES) == 0) /* Re-initialize the MIF. */ cas_mifinit(sc); /* step 3. Setup data structures in host memory. */ cas_meminit(sc); /* step 4. TX MAC registers & counters */ cas_init_regs(sc); /* step 5. RX MAC registers & counters */ /* step 6 & 7. Program Ring Base Addresses. */ CAS_WRITE_4(sc, CAS_TX_DESC3_BASE_HI, (((uint64_t)CAS_CDTXDADDR(sc, 0)) >> 32)); CAS_WRITE_4(sc, CAS_TX_DESC3_BASE_LO, CAS_CDTXDADDR(sc, 0) & 0xffffffff); CAS_WRITE_4(sc, CAS_RX_COMP_BASE_HI, (((uint64_t)CAS_CDRXCADDR(sc, 0)) >> 32)); CAS_WRITE_4(sc, CAS_RX_COMP_BASE_LO, CAS_CDRXCADDR(sc, 0) & 0xffffffff); CAS_WRITE_4(sc, CAS_RX_DESC_BASE_HI, (((uint64_t)CAS_CDRXDADDR(sc, 0)) >> 32)); CAS_WRITE_4(sc, CAS_RX_DESC_BASE_LO, CAS_CDRXDADDR(sc, 0) & 0xffffffff); if ((sc->sc_flags & CAS_REG_PLUS) != 0) { CAS_WRITE_4(sc, CAS_RX_DESC2_BASE_HI, (((uint64_t)CAS_CDRXD2ADDR(sc, 0)) >> 32)); CAS_WRITE_4(sc, CAS_RX_DESC2_BASE_LO, CAS_CDRXD2ADDR(sc, 0) & 0xffffffff); } #ifdef CAS_DEBUG CTR5(KTR_CAS, "loading TXDR %lx, RXCR %lx, RXDR %lx, RXD2R %lx, cddma %lx", CAS_CDTXDADDR(sc, 0), CAS_CDRXCADDR(sc, 0), CAS_CDRXDADDR(sc, 0), CAS_CDRXD2ADDR(sc, 0), sc->sc_cddma); #endif /* step 8. Global Configuration & Interrupt Masks */ /* Disable weighted round robin. */ CAS_WRITE_4(sc, CAS_CAW, CAS_CAW_RR_DIS); /* * Enable infinite bursts for revisions without PCI issues if * applicable. Doing so greatly improves the TX performance. */ CAS_WRITE_4(sc, CAS_INF_BURST, (sc->sc_flags & CAS_TABORT) == 0 ? CAS_INF_BURST_EN : 0); /* Set up interrupts. */ CAS_WRITE_4(sc, CAS_INTMASK, ~(CAS_INTR_TX_INT_ME | CAS_INTR_TX_TAG_ERR | CAS_INTR_RX_DONE | CAS_INTR_RX_BUF_NA | CAS_INTR_RX_TAG_ERR | CAS_INTR_RX_COMP_FULL | CAS_INTR_RX_BUF_AEMPTY | CAS_INTR_RX_COMP_AFULL | CAS_INTR_RX_LEN_MMATCH | CAS_INTR_PCI_ERROR_INT #ifdef CAS_DEBUG | CAS_INTR_PCS_INT | CAS_INTR_MIF #endif )); /* Don't clear top level interrupts when CAS_STATUS_ALIAS is read. */ CAS_WRITE_4(sc, CAS_CLEAR_ALIAS, 0); CAS_WRITE_4(sc, CAS_MAC_RX_MASK, ~CAS_MAC_RX_OVERFLOW); CAS_WRITE_4(sc, CAS_MAC_TX_MASK, ~(CAS_MAC_TX_UNDERRUN | CAS_MAC_TX_MAX_PKT_ERR)); #ifdef CAS_DEBUG CAS_WRITE_4(sc, CAS_MAC_CTRL_MASK, ~(CAS_MAC_CTRL_PAUSE_RCVD | CAS_MAC_CTRL_PAUSE | CAS_MAC_CTRL_NON_PAUSE)); #else CAS_WRITE_4(sc, CAS_MAC_CTRL_MASK, CAS_MAC_CTRL_PAUSE_RCVD | CAS_MAC_CTRL_PAUSE | CAS_MAC_CTRL_NON_PAUSE); #endif /* Enable PCI error interrupts. */ CAS_WRITE_4(sc, CAS_ERROR_MASK, ~(CAS_ERROR_DTRTO | CAS_ERROR_OTHER | CAS_ERROR_DMAW_ZERO | CAS_ERROR_DMAR_ZERO | CAS_ERROR_RTRTO)); /* Enable PCI error interrupts in BIM configuration. */ CAS_WRITE_4(sc, CAS_BIM_CONF, CAS_BIM_CONF_DPAR_EN | CAS_BIM_CONF_RMA_EN | CAS_BIM_CONF_RTA_EN); /* * step 9. ETX Configuration: encode receive descriptor ring size, * enable DMA and disable pre-interrupt writeback completion. */ v = cas_descsize(CAS_NTXDESC) << CAS_TX_CONF_DESC3_SHFT; CAS_WRITE_4(sc, CAS_TX_CONF, v | CAS_TX_CONF_TXDMA_EN | CAS_TX_CONF_RDPP_DIS | CAS_TX_CONF_PICWB_DIS); /* step 10. ERX Configuration */ /* * Encode receive completion and descriptor ring sizes, set the * swivel offset. */ v = cas_rxcompsize(CAS_NRXCOMP) << CAS_RX_CONF_COMP_SHFT; v |= cas_descsize(CAS_NRXDESC) << CAS_RX_CONF_DESC_SHFT; if ((sc->sc_flags & CAS_REG_PLUS) != 0) v |= cas_descsize(CAS_NRXDESC2) << CAS_RX_CONF_DESC2_SHFT; CAS_WRITE_4(sc, CAS_RX_CONF, v | (ETHER_ALIGN << CAS_RX_CONF_SOFF_SHFT)); /* Set the PAUSE thresholds. We use the maximum OFF threshold. */ CAS_WRITE_4(sc, CAS_RX_PTHRS, (111 << CAS_RX_PTHRS_XOFF_SHFT) | (15 << CAS_RX_PTHRS_XON_SHFT)); /* RX blanking */ CAS_WRITE_4(sc, CAS_RX_BLANK, (15 << CAS_RX_BLANK_TIME_SHFT) | (5 << CAS_RX_BLANK_PKTS_SHFT)); /* Set RX_COMP_AFULL threshold to half of the RX completions. */ CAS_WRITE_4(sc, CAS_RX_AEMPTY_THRS, (CAS_NRXCOMP / 2) << CAS_RX_AEMPTY_COMP_SHFT); /* Initialize the RX page size register as appropriate for 8k. */ CAS_WRITE_4(sc, CAS_RX_PSZ, (CAS_RX_PSZ_8K << CAS_RX_PSZ_SHFT) | (4 << CAS_RX_PSZ_MB_CNT_SHFT) | (CAS_RX_PSZ_MB_STRD_2K << CAS_RX_PSZ_MB_STRD_SHFT) | (CAS_RX_PSZ_MB_OFF_64 << CAS_RX_PSZ_MB_OFF_SHFT)); /* Disable RX random early detection. */ CAS_WRITE_4(sc, CAS_RX_RED, 0); /* Zero the RX reassembly DMA table. */ for (v = 0; v <= CAS_RX_REAS_DMA_ADDR_LC; v++) { CAS_WRITE_4(sc, CAS_RX_REAS_DMA_ADDR, v); CAS_WRITE_4(sc, CAS_RX_REAS_DMA_DATA_LO, 0); CAS_WRITE_4(sc, CAS_RX_REAS_DMA_DATA_MD, 0); CAS_WRITE_4(sc, CAS_RX_REAS_DMA_DATA_HI, 0); } /* Ensure the RX control FIFO and RX IPP FIFO addresses are zero. */ CAS_WRITE_4(sc, CAS_RX_CTRL_FIFO, 0); CAS_WRITE_4(sc, CAS_RX_IPP_ADDR, 0); /* Finally, enable RX DMA. */ CAS_WRITE_4(sc, CAS_RX_CONF, CAS_READ_4(sc, CAS_RX_CONF) | CAS_RX_CONF_RXDMA_EN); /* step 11. Configure Media. */ /* step 12. RX_MAC Configuration Register */ v = CAS_READ_4(sc, CAS_MAC_RX_CONF); v &= ~(CAS_MAC_RX_CONF_STRPPAD | CAS_MAC_RX_CONF_EN); v |= CAS_MAC_RX_CONF_STRPFCS; sc->sc_mac_rxcfg = v; /* * Clear the RX filter and reprogram it. This will also set the * current RX MAC configuration and enable it. */ cas_setladrf(sc); /* step 13. TX_MAC Configuration Register */ v = CAS_READ_4(sc, CAS_MAC_TX_CONF); v |= CAS_MAC_TX_CONF_EN; (void)cas_disable_tx(sc); CAS_WRITE_4(sc, CAS_MAC_TX_CONF, v); /* step 14. Issue Transmit Pending command. */ /* step 15. Give the receiver a swift kick. */ CAS_WRITE_4(sc, CAS_RX_KICK, CAS_NRXDESC - 4); CAS_WRITE_4(sc, CAS_RX_COMP_TAIL, 0); if ((sc->sc_flags & CAS_REG_PLUS) != 0) CAS_WRITE_4(sc, CAS_RX_KICK2, CAS_NRXDESC2 - 4); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; mii_mediachg(sc->sc_mii); /* Start the one second timer. */ sc->sc_wdog_timer = 0; callout_reset(&sc->sc_tick_ch, hz, cas_tick, sc); } static int cas_load_txmbuf(struct cas_softc *sc, struct mbuf **m_head) { bus_dma_segment_t txsegs[CAS_NTXSEGS]; struct cas_txsoft *txs; struct ip *ip; struct mbuf *m; uint64_t cflags; int error, nexttx, nsegs, offset, seg; CAS_LOCK_ASSERT(sc, MA_OWNED); /* Get a work queue entry. */ if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) { /* Ran out of descriptors. */ return (ENOBUFS); } cflags = 0; if (((*m_head)->m_pkthdr.csum_flags & CAS_CSUM_FEATURES) != 0) { if (M_WRITABLE(*m_head) == 0) { m = m_dup(*m_head, M_NOWAIT); m_freem(*m_head); *m_head = m; if (m == NULL) return (ENOBUFS); } offset = sizeof(struct ether_header); m = m_pullup(*m_head, offset + sizeof(struct ip)); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } ip = (struct ip *)(mtod(m, caddr_t) + offset); offset += (ip->ip_hl << 2); cflags = (offset << CAS_TD_CKSUM_START_SHFT) | ((offset + m->m_pkthdr.csum_data) << CAS_TD_CKSUM_STUFF_SHFT) | CAS_TD_CKSUM_EN; *m_head = m; } error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); if (error == EFBIG) { m = m_collapse(*m_head, M_NOWAIT, CAS_NTXSEGS); if (m == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } *m_head = m; error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { m_freem(*m_head); *m_head = NULL; return (error); } } else if (error != 0) return (error); /* If nsegs is wrong then the stack is corrupt. */ KASSERT(nsegs <= CAS_NTXSEGS, ("%s: too many DMA segments (%d)", __func__, nsegs)); if (nsegs == 0) { m_freem(*m_head); *m_head = NULL; return (EIO); } /* * Ensure we have enough descriptors free to describe * the packet. Note, we always reserve one descriptor * at the end of the ring as a termination point, in * order to prevent wrap-around. */ if (nsegs > sc->sc_txfree - 1) { txs->txs_ndescs = 0; bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); return (ENOBUFS); } txs->txs_ndescs = nsegs; txs->txs_firstdesc = sc->sc_txnext; nexttx = txs->txs_firstdesc; for (seg = 0; seg < nsegs; seg++, nexttx = CAS_NEXTTX(nexttx)) { #ifdef CAS_DEBUG CTR6(KTR_CAS, "%s: mapping seg %d (txd %d), len %lx, addr %#lx (%#lx)", __func__, seg, nexttx, txsegs[seg].ds_len, txsegs[seg].ds_addr, htole64(txsegs[seg].ds_addr)); #endif sc->sc_txdescs[nexttx].cd_buf_ptr = htole64(txsegs[seg].ds_addr); KASSERT(txsegs[seg].ds_len < CAS_TD_BUF_LEN_MASK >> CAS_TD_BUF_LEN_SHFT, ("%s: segment size too large!", __func__)); sc->sc_txdescs[nexttx].cd_flags = htole64(txsegs[seg].ds_len << CAS_TD_BUF_LEN_SHFT); txs->txs_lastdesc = nexttx; } /* Set EOF on the last descriptor. */ #ifdef CAS_DEBUG CTR3(KTR_CAS, "%s: end of frame at segment %d, TX %d", __func__, seg, nexttx); #endif sc->sc_txdescs[txs->txs_lastdesc].cd_flags |= htole64(CAS_TD_END_OF_FRAME); /* Lastly set SOF on the first descriptor. */ #ifdef CAS_DEBUG CTR3(KTR_CAS, "%s: start of frame at segment %d, TX %d", __func__, seg, nexttx); #endif if (sc->sc_txwin += nsegs > CAS_MAXTXFREE * 2 / 3) { sc->sc_txwin = 0; sc->sc_txdescs[txs->txs_firstdesc].cd_flags |= htole64(cflags | CAS_TD_START_OF_FRAME | CAS_TD_INT_ME); } else sc->sc_txdescs[txs->txs_firstdesc].cd_flags |= htole64(cflags | CAS_TD_START_OF_FRAME); /* Sync the DMA map. */ bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, BUS_DMASYNC_PREWRITE); #ifdef CAS_DEBUG CTR4(KTR_CAS, "%s: setting firstdesc=%d, lastdesc=%d, ndescs=%d", __func__, txs->txs_firstdesc, txs->txs_lastdesc, txs->txs_ndescs); #endif STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); txs->txs_mbuf = *m_head; sc->sc_txnext = CAS_NEXTTX(txs->txs_lastdesc); sc->sc_txfree -= txs->txs_ndescs; return (0); } static void cas_init_regs(struct cas_softc *sc) { int i; const u_char *laddr = IF_LLADDR(sc->sc_ifp); CAS_LOCK_ASSERT(sc, MA_OWNED); /* These registers are not cleared on reset. */ if ((sc->sc_flags & CAS_INITED) == 0) { /* magic values */ CAS_WRITE_4(sc, CAS_MAC_IPG0, 0); CAS_WRITE_4(sc, CAS_MAC_IPG1, 8); CAS_WRITE_4(sc, CAS_MAC_IPG2, 4); /* min frame length */ CAS_WRITE_4(sc, CAS_MAC_MIN_FRAME, ETHER_MIN_LEN); /* max frame length and max burst size */ CAS_WRITE_4(sc, CAS_MAC_MAX_BF, ((ETHER_MAX_LEN_JUMBO + ETHER_VLAN_ENCAP_LEN) << CAS_MAC_MAX_BF_FRM_SHFT) | (0x2000 << CAS_MAC_MAX_BF_BST_SHFT)); /* more magic values */ CAS_WRITE_4(sc, CAS_MAC_PREAMBLE_LEN, 0x7); CAS_WRITE_4(sc, CAS_MAC_JAM_SIZE, 0x4); CAS_WRITE_4(sc, CAS_MAC_ATTEMPT_LIMIT, 0x10); CAS_WRITE_4(sc, CAS_MAC_CTRL_TYPE, 0x8808); /* random number seed */ CAS_WRITE_4(sc, CAS_MAC_RANDOM_SEED, ((laddr[5] << 8) | laddr[4]) & 0x3ff); /* secondary MAC addresses: 0:0:0:0:0:0 */ for (i = CAS_MAC_ADDR3; i <= CAS_MAC_ADDR41; i += CAS_MAC_ADDR4 - CAS_MAC_ADDR3) CAS_WRITE_4(sc, i, 0); /* MAC control address: 01:80:c2:00:00:01 */ CAS_WRITE_4(sc, CAS_MAC_ADDR42, 0x0001); CAS_WRITE_4(sc, CAS_MAC_ADDR43, 0xc200); CAS_WRITE_4(sc, CAS_MAC_ADDR44, 0x0180); /* MAC filter address: 0:0:0:0:0:0 */ CAS_WRITE_4(sc, CAS_MAC_AFILTER0, 0); CAS_WRITE_4(sc, CAS_MAC_AFILTER1, 0); CAS_WRITE_4(sc, CAS_MAC_AFILTER2, 0); CAS_WRITE_4(sc, CAS_MAC_AFILTER_MASK1_2, 0); CAS_WRITE_4(sc, CAS_MAC_AFILTER_MASK0, 0); /* Zero the hash table. */ for (i = CAS_MAC_HASH0; i <= CAS_MAC_HASH15; i += CAS_MAC_HASH1 - CAS_MAC_HASH0) CAS_WRITE_4(sc, i, 0); sc->sc_flags |= CAS_INITED; } /* Counters need to be zeroed. */ CAS_WRITE_4(sc, CAS_MAC_NORM_COLL_CNT, 0); CAS_WRITE_4(sc, CAS_MAC_FIRST_COLL_CNT, 0); CAS_WRITE_4(sc, CAS_MAC_EXCESS_COLL_CNT, 0); CAS_WRITE_4(sc, CAS_MAC_LATE_COLL_CNT, 0); CAS_WRITE_4(sc, CAS_MAC_DEFER_TMR_CNT, 0); CAS_WRITE_4(sc, CAS_MAC_PEAK_ATTEMPTS, 0); CAS_WRITE_4(sc, CAS_MAC_RX_FRAME_COUNT, 0); CAS_WRITE_4(sc, CAS_MAC_RX_LEN_ERR_CNT, 0); CAS_WRITE_4(sc, CAS_MAC_RX_ALIGN_ERR, 0); CAS_WRITE_4(sc, CAS_MAC_RX_CRC_ERR_CNT, 0); CAS_WRITE_4(sc, CAS_MAC_RX_CODE_VIOL, 0); /* Set XOFF PAUSE time. */ CAS_WRITE_4(sc, CAS_MAC_SPC, 0x1BF0 << CAS_MAC_SPC_TIME_SHFT); /* Set the station address. */ CAS_WRITE_4(sc, CAS_MAC_ADDR0, (laddr[4] << 8) | laddr[5]); CAS_WRITE_4(sc, CAS_MAC_ADDR1, (laddr[2] << 8) | laddr[3]); CAS_WRITE_4(sc, CAS_MAC_ADDR2, (laddr[0] << 8) | laddr[1]); /* Enable MII outputs. */ CAS_WRITE_4(sc, CAS_MAC_XIF_CONF, CAS_MAC_XIF_CONF_TX_OE); } static void cas_tx_task(void *arg, int pending __unused) { struct ifnet *ifp; ifp = (struct ifnet *)arg; cas_start(ifp); } static inline void cas_txkick(struct cas_softc *sc) { /* * Update the TX kick register. This register has to point to the * descriptor after the last valid one and for optimum performance * should be incremented in multiples of 4 (the DMA engine fetches/ * updates descriptors in batches of 4). */ #ifdef CAS_DEBUG CTR3(KTR_CAS, "%s: %s: kicking TX %d", device_get_name(sc->sc_dev), __func__, sc->sc_txnext); #endif CAS_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); CAS_WRITE_4(sc, CAS_TX_KICK3, sc->sc_txnext); } static void cas_start(struct ifnet *ifp) { struct cas_softc *sc = ifp->if_softc; struct mbuf *m; int kicked, ntx; CAS_LOCK(sc); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING || (sc->sc_flags & CAS_LINK) == 0) { CAS_UNLOCK(sc); return; } if (sc->sc_txfree < CAS_MAXTXFREE / 4) cas_tint(sc); #ifdef CAS_DEBUG CTR4(KTR_CAS, "%s: %s: txfree %d, txnext %d", device_get_name(sc->sc_dev), __func__, sc->sc_txfree, sc->sc_txnext); #endif ntx = 0; kicked = 0; for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->sc_txfree > 1;) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; if (cas_load_txmbuf(sc, &m) != 0) { if (m == NULL) break; ifp->if_drv_flags |= IFF_DRV_OACTIVE; IFQ_DRV_PREPEND(&ifp->if_snd, m); break; } if ((sc->sc_txnext % 4) == 0) { cas_txkick(sc); kicked = 1; } else kicked = 0; ntx++; BPF_MTAP(ifp, m); } if (ntx > 0) { if (kicked == 0) cas_txkick(sc); #ifdef CAS_DEBUG CTR2(KTR_CAS, "%s: packets enqueued, OWN on %d", device_get_name(sc->sc_dev), sc->sc_txnext); #endif /* Set a watchdog timer in case the chip flakes out. */ sc->sc_wdog_timer = 5; #ifdef CAS_DEBUG CTR3(KTR_CAS, "%s: %s: watchdog %d", device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer); #endif } CAS_UNLOCK(sc); } static void cas_tint(struct cas_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct cas_txsoft *txs; int progress; uint32_t txlast; #ifdef CAS_DEBUG int i; CAS_LOCK_ASSERT(sc, MA_OWNED); CTR2(KTR_CAS, "%s: %s", device_get_name(sc->sc_dev), __func__); #endif /* * Go through our TX list and free mbufs for those * frames that have been transmitted. */ progress = 0; CAS_CDSYNC(sc, BUS_DMASYNC_POSTREAD); while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { #ifdef CAS_DEBUG if ((ifp->if_flags & IFF_DEBUG) != 0) { printf(" txsoft %p transmit chain:\n", txs); for (i = txs->txs_firstdesc;; i = CAS_NEXTTX(i)) { printf("descriptor %d: ", i); printf("cd_flags: 0x%016llx\t", (long long)le64toh( sc->sc_txdescs[i].cd_flags)); printf("cd_buf_ptr: 0x%016llx\n", (long long)le64toh( sc->sc_txdescs[i].cd_buf_ptr)); if (i == txs->txs_lastdesc) break; } } #endif /* * In theory, we could harvest some descriptors before * the ring is empty, but that's a bit complicated. * * CAS_TX_COMPn points to the last descriptor * processed + 1. */ txlast = CAS_READ_4(sc, CAS_TX_COMP3); #ifdef CAS_DEBUG CTR4(KTR_CAS, "%s: txs->txs_firstdesc = %d, " "txs->txs_lastdesc = %d, txlast = %d", __func__, txs->txs_firstdesc, txs->txs_lastdesc, txlast); #endif if (txs->txs_firstdesc <= txs->txs_lastdesc) { if ((txlast >= txs->txs_firstdesc) && (txlast <= txs->txs_lastdesc)) break; } else { /* Ick -- this command wraps. */ if ((txlast >= txs->txs_firstdesc) || (txlast <= txs->txs_lastdesc)) break; } #ifdef CAS_DEBUG CTR1(KTR_CAS, "%s: releasing a descriptor", __func__); #endif STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); sc->sc_txfree += txs->txs_ndescs; bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); if (txs->txs_mbuf != NULL) { m_freem(txs->txs_mbuf); txs->txs_mbuf = NULL; } STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); progress = 1; } #ifdef CAS_DEBUG CTR5(KTR_CAS, "%s: CAS_TX_SM1 %x CAS_TX_SM2 %x CAS_TX_DESC_BASE %llx " "CAS_TX_COMP3 %x", __func__, CAS_READ_4(sc, CAS_TX_SM1), CAS_READ_4(sc, CAS_TX_SM2), ((long long)CAS_READ_4(sc, CAS_TX_DESC3_BASE_HI) << 32) | CAS_READ_4(sc, CAS_TX_DESC3_BASE_LO), CAS_READ_4(sc, CAS_TX_COMP3)); #endif if (progress) { /* We freed some descriptors, so reset IFF_DRV_OACTIVE. */ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if (STAILQ_EMPTY(&sc->sc_txdirtyq)) sc->sc_wdog_timer = 0; } #ifdef CAS_DEBUG CTR3(KTR_CAS, "%s: %s: watchdog %d", device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer); #endif } static void cas_rint_timeout(void *arg) { + struct epoch_tracker et; struct cas_softc *sc = arg; CAS_LOCK_ASSERT(sc, MA_OWNED); + NET_EPOCH_ENTER(et); cas_rint(sc); + NET_EPOCH_EXIT(et); } static void cas_rint(struct cas_softc *sc) { struct cas_rxdsoft *rxds, *rxds2; struct ifnet *ifp = sc->sc_ifp; struct mbuf *m, *m2; uint64_t word1, word2, word3, word4; uint32_t rxhead; u_int idx, idx2, len, off, skip; CAS_LOCK_ASSERT(sc, MA_OWNED); callout_stop(&sc->sc_rx_ch); #ifdef CAS_DEBUG CTR2(KTR_CAS, "%s: %s", device_get_name(sc->sc_dev), __func__); #endif #define PRINTWORD(n, delimiter) \ printf("word ## n: 0x%016llx%c", (long long)word ## n, delimiter) #define SKIPASSERT(n) \ KASSERT(sc->sc_rxcomps[sc->sc_rxcptr].crc_word ## n == 0, \ ("%s: word ## n not 0", __func__)) #define WORDTOH(n) \ word ## n = le64toh(sc->sc_rxcomps[sc->sc_rxcptr].crc_word ## n) /* * Read the completion head register once. This limits * how long the following loop can execute. */ rxhead = CAS_READ_4(sc, CAS_RX_COMP_HEAD); #ifdef CAS_DEBUG CTR4(KTR_CAS, "%s: sc->sc_rxcptr %d, sc->sc_rxdptr %d, head %d", __func__, sc->sc_rxcptr, sc->sc_rxdptr, rxhead); #endif skip = 0; CAS_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); for (; sc->sc_rxcptr != rxhead; sc->sc_rxcptr = CAS_NEXTRXCOMP(sc->sc_rxcptr)) { if (skip != 0) { SKIPASSERT(1); SKIPASSERT(2); SKIPASSERT(3); --skip; goto skip; } WORDTOH(1); WORDTOH(2); WORDTOH(3); WORDTOH(4); #ifdef CAS_DEBUG if ((ifp->if_flags & IFF_DEBUG) != 0) { printf(" completion %d: ", sc->sc_rxcptr); PRINTWORD(1, '\t'); PRINTWORD(2, '\t'); PRINTWORD(3, '\t'); PRINTWORD(4, '\n'); } #endif if (__predict_false( (word1 & CAS_RC1_TYPE_MASK) == CAS_RC1_TYPE_HW || (word4 & CAS_RC4_ZERO) != 0)) { /* * The descriptor is still marked as owned, although * it is supposed to have completed. This has been * observed on some machines. Just exiting here * might leave the packet sitting around until another * one arrives to trigger a new interrupt, which is * generally undesirable, so set up a timeout. */ callout_reset(&sc->sc_rx_ch, CAS_RXOWN_TICKS, cas_rint_timeout, sc); break; } if (__predict_false( (word4 & (CAS_RC4_BAD | CAS_RC4_LEN_MMATCH)) != 0)) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); device_printf(sc->sc_dev, "receive error: CRC error\n"); continue; } KASSERT(CAS_GET(word1, CAS_RC1_DATA_SIZE) == 0 || CAS_GET(word2, CAS_RC2_HDR_SIZE) == 0, ("%s: data and header present", __func__)); KASSERT((word1 & CAS_RC1_SPLIT_PKT) == 0 || CAS_GET(word2, CAS_RC2_HDR_SIZE) == 0, ("%s: split and header present", __func__)); KASSERT(CAS_GET(word1, CAS_RC1_DATA_SIZE) == 0 || (word1 & CAS_RC1_RELEASE_HDR) == 0, ("%s: data present but header release", __func__)); KASSERT(CAS_GET(word2, CAS_RC2_HDR_SIZE) == 0 || (word1 & CAS_RC1_RELEASE_DATA) == 0, ("%s: header present but data release", __func__)); if ((len = CAS_GET(word2, CAS_RC2_HDR_SIZE)) != 0) { idx = CAS_GET(word2, CAS_RC2_HDR_INDEX); off = CAS_GET(word2, CAS_RC2_HDR_OFF); #ifdef CAS_DEBUG CTR4(KTR_CAS, "%s: hdr at idx %d, off %d, len %d", __func__, idx, off, len); #endif rxds = &sc->sc_rxdsoft[idx]; MGETHDR(m, M_NOWAIT, MT_DATA); if (m != NULL) { refcount_acquire(&rxds->rxds_refcount); bus_dmamap_sync(sc->sc_rdmatag, rxds->rxds_dmamap, BUS_DMASYNC_POSTREAD); m_extadd(m, (char *)rxds->rxds_buf + off * 256 + ETHER_ALIGN, len, cas_free, sc, (void *)(uintptr_t)idx, M_RDONLY, EXT_NET_DRV); if ((m->m_flags & M_EXT) == 0) { m_freem(m); m = NULL; } } if (m != NULL) { m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = len; if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) cas_rxcksum(m, CAS_GET(word4, CAS_RC4_TCP_CSUM)); /* Pass it on. */ CAS_UNLOCK(sc); (*ifp->if_input)(ifp, m); CAS_LOCK(sc); } else if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); if ((word1 & CAS_RC1_RELEASE_HDR) != 0 && refcount_release(&rxds->rxds_refcount) != 0) cas_add_rxdesc(sc, idx); } else if ((len = CAS_GET(word1, CAS_RC1_DATA_SIZE)) != 0) { idx = CAS_GET(word1, CAS_RC1_DATA_INDEX); off = CAS_GET(word1, CAS_RC1_DATA_OFF); #ifdef CAS_DEBUG CTR4(KTR_CAS, "%s: data at idx %d, off %d, len %d", __func__, idx, off, len); #endif rxds = &sc->sc_rxdsoft[idx]; MGETHDR(m, M_NOWAIT, MT_DATA); if (m != NULL) { refcount_acquire(&rxds->rxds_refcount); off += ETHER_ALIGN; m->m_len = min(CAS_PAGE_SIZE - off, len); bus_dmamap_sync(sc->sc_rdmatag, rxds->rxds_dmamap, BUS_DMASYNC_POSTREAD); m_extadd(m, (char *)rxds->rxds_buf + off, m->m_len, cas_free, sc, (void *)(uintptr_t)idx, M_RDONLY, EXT_NET_DRV); if ((m->m_flags & M_EXT) == 0) { m_freem(m); m = NULL; } } idx2 = 0; m2 = NULL; rxds2 = NULL; if ((word1 & CAS_RC1_SPLIT_PKT) != 0) { KASSERT((word1 & CAS_RC1_RELEASE_NEXT) != 0, ("%s: split but no release next", __func__)); idx2 = CAS_GET(word2, CAS_RC2_NEXT_INDEX); #ifdef CAS_DEBUG CTR2(KTR_CAS, "%s: split at idx %d", __func__, idx2); #endif rxds2 = &sc->sc_rxdsoft[idx2]; if (m != NULL) { MGET(m2, M_NOWAIT, MT_DATA); if (m2 != NULL) { refcount_acquire( &rxds2->rxds_refcount); m2->m_len = len - m->m_len; bus_dmamap_sync( sc->sc_rdmatag, rxds2->rxds_dmamap, BUS_DMASYNC_POSTREAD); m_extadd(m2, (char *)rxds2->rxds_buf, m2->m_len, cas_free, sc, (void *)(uintptr_t)idx2, M_RDONLY, EXT_NET_DRV); if ((m2->m_flags & M_EXT) == 0) { m_freem(m2); m2 = NULL; } } } if (m2 != NULL) m->m_next = m2; else if (m != NULL) { m_freem(m); m = NULL; } } if (m != NULL) { m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = len; if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) cas_rxcksum(m, CAS_GET(word4, CAS_RC4_TCP_CSUM)); /* Pass it on. */ CAS_UNLOCK(sc); (*ifp->if_input)(ifp, m); CAS_LOCK(sc); } else if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); if ((word1 & CAS_RC1_RELEASE_DATA) != 0 && refcount_release(&rxds->rxds_refcount) != 0) cas_add_rxdesc(sc, idx); if ((word1 & CAS_RC1_SPLIT_PKT) != 0 && refcount_release(&rxds2->rxds_refcount) != 0) cas_add_rxdesc(sc, idx2); } skip = CAS_GET(word1, CAS_RC1_SKIP); skip: cas_rxcompinit(&sc->sc_rxcomps[sc->sc_rxcptr]); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) break; } CAS_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); CAS_WRITE_4(sc, CAS_RX_COMP_TAIL, sc->sc_rxcptr); #undef PRINTWORD #undef SKIPASSERT #undef WORDTOH #ifdef CAS_DEBUG CTR4(KTR_CAS, "%s: done sc->sc_rxcptr %d, sc->sc_rxdptr %d, head %d", __func__, sc->sc_rxcptr, sc->sc_rxdptr, CAS_READ_4(sc, CAS_RX_COMP_HEAD)); #endif } static void cas_free(struct mbuf *m) { struct cas_rxdsoft *rxds; struct cas_softc *sc; u_int idx, locked; sc = m->m_ext.ext_arg1; idx = (uintptr_t)m->m_ext.ext_arg2; rxds = &sc->sc_rxdsoft[idx]; if (refcount_release(&rxds->rxds_refcount) == 0) return; /* * NB: this function can be called via m_freem(9) within * this driver! */ if ((locked = CAS_LOCK_OWNED(sc)) == 0) CAS_LOCK(sc); cas_add_rxdesc(sc, idx); if (locked == 0) CAS_UNLOCK(sc); } static inline void cas_add_rxdesc(struct cas_softc *sc, u_int idx) { CAS_LOCK_ASSERT(sc, MA_OWNED); bus_dmamap_sync(sc->sc_rdmatag, sc->sc_rxdsoft[idx].rxds_dmamap, BUS_DMASYNC_PREREAD); CAS_UPDATE_RXDESC(sc, sc->sc_rxdptr, idx); sc->sc_rxdptr = CAS_NEXTRXDESC(sc->sc_rxdptr); /* * Update the RX kick register. This register has to point to the * descriptor after the last valid one (before the current batch) * and for optimum performance should be incremented in multiples * of 4 (the DMA engine fetches/updates descriptors in batches of 4). */ if ((sc->sc_rxdptr % 4) == 0) { CAS_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); CAS_WRITE_4(sc, CAS_RX_KICK, (sc->sc_rxdptr + CAS_NRXDESC - 4) & CAS_NRXDESC_MASK); } } static void cas_eint(struct cas_softc *sc, u_int status) { struct ifnet *ifp = sc->sc_ifp; CAS_LOCK_ASSERT(sc, MA_OWNED); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); device_printf(sc->sc_dev, "%s: status 0x%x", __func__, status); if ((status & CAS_INTR_PCI_ERROR_INT) != 0) { status = CAS_READ_4(sc, CAS_ERROR_STATUS); printf(", PCI bus error 0x%x", status); if ((status & CAS_ERROR_OTHER) != 0) { status = pci_read_config(sc->sc_dev, PCIR_STATUS, 2); printf(", PCI status 0x%x", status); pci_write_config(sc->sc_dev, PCIR_STATUS, status, 2); } } printf("\n"); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; cas_init_locked(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task); } static int cas_intr(void *v) { struct cas_softc *sc = v; if (__predict_false((CAS_READ_4(sc, CAS_STATUS_ALIAS) & CAS_INTR_SUMMARY) == 0)) return (FILTER_STRAY); /* Disable interrupts. */ CAS_WRITE_4(sc, CAS_INTMASK, 0xffffffff); taskqueue_enqueue(sc->sc_tq, &sc->sc_intr_task); return (FILTER_HANDLED); } static void cas_intr_task(void *arg, int pending __unused) { struct cas_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; uint32_t status, status2; CAS_LOCK_ASSERT(sc, MA_NOTOWNED); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; status = CAS_READ_4(sc, CAS_STATUS); if (__predict_false((status & CAS_INTR_SUMMARY) == 0)) goto done; CAS_LOCK(sc); #ifdef CAS_DEBUG CTR4(KTR_CAS, "%s: %s: cplt %x, status %x", device_get_name(sc->sc_dev), __func__, (status >> CAS_STATUS_TX_COMP3_SHFT), (u_int)status); /* * PCS interrupts must be cleared, otherwise no traffic is passed! */ if ((status & CAS_INTR_PCS_INT) != 0) { status2 = CAS_READ_4(sc, CAS_PCS_INTR_STATUS) | CAS_READ_4(sc, CAS_PCS_INTR_STATUS); if ((status2 & CAS_PCS_INTR_LINK) != 0) device_printf(sc->sc_dev, "%s: PCS link status changed\n", __func__); } if ((status & CAS_MAC_CTRL_STATUS) != 0) { status2 = CAS_READ_4(sc, CAS_MAC_CTRL_STATUS); if ((status2 & CAS_MAC_CTRL_PAUSE) != 0) device_printf(sc->sc_dev, "%s: PAUSE received (PAUSE time %d slots)\n", __func__, (status2 & CAS_MAC_CTRL_STATUS_PT_MASK) >> CAS_MAC_CTRL_STATUS_PT_SHFT); if ((status2 & CAS_MAC_CTRL_PAUSE) != 0) device_printf(sc->sc_dev, "%s: transited to PAUSE state\n", __func__); if ((status2 & CAS_MAC_CTRL_NON_PAUSE) != 0) device_printf(sc->sc_dev, "%s: transited to non-PAUSE state\n", __func__); } if ((status & CAS_INTR_MIF) != 0) device_printf(sc->sc_dev, "%s: MIF interrupt\n", __func__); #endif if (__predict_false((status & (CAS_INTR_TX_TAG_ERR | CAS_INTR_RX_TAG_ERR | CAS_INTR_RX_LEN_MMATCH | CAS_INTR_PCI_ERROR_INT)) != 0)) { cas_eint(sc, status); CAS_UNLOCK(sc); return; } if (__predict_false(status & CAS_INTR_TX_MAC_INT)) { status2 = CAS_READ_4(sc, CAS_MAC_TX_STATUS); if ((status2 & (CAS_MAC_TX_UNDERRUN | CAS_MAC_TX_MAX_PKT_ERR)) != 0) if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); else if ((status2 & ~CAS_MAC_TX_FRAME_XMTD) != 0) device_printf(sc->sc_dev, "MAC TX fault, status %x\n", status2); } if (__predict_false(status & CAS_INTR_RX_MAC_INT)) { status2 = CAS_READ_4(sc, CAS_MAC_RX_STATUS); if ((status2 & CAS_MAC_RX_OVERFLOW) != 0) if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); else if ((status2 & ~CAS_MAC_RX_FRAME_RCVD) != 0) device_printf(sc->sc_dev, "MAC RX fault, status %x\n", status2); } if ((status & (CAS_INTR_RX_DONE | CAS_INTR_RX_BUF_NA | CAS_INTR_RX_COMP_FULL | CAS_INTR_RX_BUF_AEMPTY | CAS_INTR_RX_COMP_AFULL)) != 0) { cas_rint(sc); #ifdef CAS_DEBUG if (__predict_false((status & (CAS_INTR_RX_BUF_NA | CAS_INTR_RX_COMP_FULL | CAS_INTR_RX_BUF_AEMPTY | CAS_INTR_RX_COMP_AFULL)) != 0)) device_printf(sc->sc_dev, "RX fault, status %x\n", status); #endif } if ((status & (CAS_INTR_TX_INT_ME | CAS_INTR_TX_ALL | CAS_INTR_TX_DONE)) != 0) cas_tint(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { CAS_UNLOCK(sc); return; } else if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task); CAS_UNLOCK(sc); status = CAS_READ_4(sc, CAS_STATUS_ALIAS); if (__predict_false((status & CAS_INTR_SUMMARY) != 0)) { taskqueue_enqueue(sc->sc_tq, &sc->sc_intr_task); return; } done: /* Re-enable interrupts. */ CAS_WRITE_4(sc, CAS_INTMASK, ~(CAS_INTR_TX_INT_ME | CAS_INTR_TX_TAG_ERR | CAS_INTR_RX_DONE | CAS_INTR_RX_BUF_NA | CAS_INTR_RX_TAG_ERR | CAS_INTR_RX_COMP_FULL | CAS_INTR_RX_BUF_AEMPTY | CAS_INTR_RX_COMP_AFULL | CAS_INTR_RX_LEN_MMATCH | CAS_INTR_PCI_ERROR_INT #ifdef CAS_DEBUG | CAS_INTR_PCS_INT | CAS_INTR_MIF #endif )); } static void cas_watchdog(struct cas_softc *sc) { struct ifnet *ifp = sc->sc_ifp; CAS_LOCK_ASSERT(sc, MA_OWNED); #ifdef CAS_DEBUG CTR4(KTR_CAS, "%s: CAS_RX_CONF %x CAS_MAC_RX_STATUS %x CAS_MAC_RX_CONF %x", __func__, CAS_READ_4(sc, CAS_RX_CONF), CAS_READ_4(sc, CAS_MAC_RX_STATUS), CAS_READ_4(sc, CAS_MAC_RX_CONF)); CTR4(KTR_CAS, "%s: CAS_TX_CONF %x CAS_MAC_TX_STATUS %x CAS_MAC_TX_CONF %x", __func__, CAS_READ_4(sc, CAS_TX_CONF), CAS_READ_4(sc, CAS_MAC_TX_STATUS), CAS_READ_4(sc, CAS_MAC_TX_CONF)); #endif if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0) return; if ((sc->sc_flags & CAS_LINK) != 0) device_printf(sc->sc_dev, "device timeout\n"); else if (bootverbose) device_printf(sc->sc_dev, "device timeout (no link)\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); /* Try to get more packets going. */ ifp->if_drv_flags &= ~IFF_DRV_RUNNING; cas_init_locked(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task); } static void cas_mifinit(struct cas_softc *sc) { /* Configure the MIF in frame mode. */ CAS_WRITE_4(sc, CAS_MIF_CONF, CAS_READ_4(sc, CAS_MIF_CONF) & ~CAS_MIF_CONF_BB_MODE); CAS_BARRIER(sc, CAS_MIF_CONF, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); } /* * MII interface * * The MII interface supports at least three different operating modes: * * Bitbang mode is implemented using data, clock and output enable registers. * * Frame mode is implemented by loading a complete frame into the frame * register and polling the valid bit for completion. * * Polling mode uses the frame register but completion is indicated by * an interrupt. * */ static int cas_mii_readreg(device_t dev, int phy, int reg) { struct cas_softc *sc; int n; uint32_t v; #ifdef CAS_DEBUG_PHY printf("%s: phy %d reg %d\n", __func__, phy, reg); #endif sc = device_get_softc(dev); if ((sc->sc_flags & CAS_SERDES) != 0) { switch (reg) { case MII_BMCR: reg = CAS_PCS_CTRL; break; case MII_BMSR: reg = CAS_PCS_STATUS; break; case MII_PHYIDR1: case MII_PHYIDR2: return (0); case MII_ANAR: reg = CAS_PCS_ANAR; break; case MII_ANLPAR: reg = CAS_PCS_ANLPAR; break; case MII_EXTSR: return (EXTSR_1000XFDX | EXTSR_1000XHDX); default: device_printf(sc->sc_dev, "%s: unhandled register %d\n", __func__, reg); return (0); } return (CAS_READ_4(sc, reg)); } /* Construct the frame command. */ v = CAS_MIF_FRAME_READ | (phy << CAS_MIF_FRAME_PHY_SHFT) | (reg << CAS_MIF_FRAME_REG_SHFT); CAS_WRITE_4(sc, CAS_MIF_FRAME, v); CAS_BARRIER(sc, CAS_MIF_FRAME, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); for (n = 0; n < 100; n++) { DELAY(1); v = CAS_READ_4(sc, CAS_MIF_FRAME); if (v & CAS_MIF_FRAME_TA_LSB) return (v & CAS_MIF_FRAME_DATA); } device_printf(sc->sc_dev, "%s: timed out\n", __func__); return (0); } static int cas_mii_writereg(device_t dev, int phy, int reg, int val) { struct cas_softc *sc; int n; uint32_t v; #ifdef CAS_DEBUG_PHY printf("%s: phy %d reg %d val %x\n", phy, reg, val, __func__); #endif sc = device_get_softc(dev); if ((sc->sc_flags & CAS_SERDES) != 0) { switch (reg) { case MII_BMSR: reg = CAS_PCS_STATUS; break; case MII_BMCR: reg = CAS_PCS_CTRL; if ((val & CAS_PCS_CTRL_RESET) == 0) break; CAS_WRITE_4(sc, CAS_PCS_CTRL, val); CAS_BARRIER(sc, CAS_PCS_CTRL, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); if (!cas_bitwait(sc, CAS_PCS_CTRL, CAS_PCS_CTRL_RESET, 0)) device_printf(sc->sc_dev, "cannot reset PCS\n"); /* FALLTHROUGH */ case MII_ANAR: CAS_WRITE_4(sc, CAS_PCS_CONF, 0); CAS_BARRIER(sc, CAS_PCS_CONF, 4, BUS_SPACE_BARRIER_WRITE); CAS_WRITE_4(sc, CAS_PCS_ANAR, val); CAS_BARRIER(sc, CAS_PCS_ANAR, 4, BUS_SPACE_BARRIER_WRITE); CAS_WRITE_4(sc, CAS_PCS_SERDES_CTRL, CAS_PCS_SERDES_CTRL_ESD); CAS_BARRIER(sc, CAS_PCS_CONF, 4, BUS_SPACE_BARRIER_WRITE); CAS_WRITE_4(sc, CAS_PCS_CONF, CAS_PCS_CONF_EN); CAS_BARRIER(sc, CAS_PCS_CONF, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); return (0); case MII_ANLPAR: reg = CAS_PCS_ANLPAR; break; default: device_printf(sc->sc_dev, "%s: unhandled register %d\n", __func__, reg); return (0); } CAS_WRITE_4(sc, reg, val); CAS_BARRIER(sc, reg, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); return (0); } /* Construct the frame command. */ v = CAS_MIF_FRAME_WRITE | (phy << CAS_MIF_FRAME_PHY_SHFT) | (reg << CAS_MIF_FRAME_REG_SHFT) | (val & CAS_MIF_FRAME_DATA); CAS_WRITE_4(sc, CAS_MIF_FRAME, v); CAS_BARRIER(sc, CAS_MIF_FRAME, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); for (n = 0; n < 100; n++) { DELAY(1); v = CAS_READ_4(sc, CAS_MIF_FRAME); if (v & CAS_MIF_FRAME_TA_LSB) return (1); } device_printf(sc->sc_dev, "%s: timed out\n", __func__); return (0); } static void cas_mii_statchg(device_t dev) { struct cas_softc *sc; struct ifnet *ifp; int gigabit; uint32_t rxcfg, txcfg, v; sc = device_get_softc(dev); ifp = sc->sc_ifp; CAS_LOCK_ASSERT(sc, MA_OWNED); #ifdef CAS_DEBUG if ((ifp->if_flags & IFF_DEBUG) != 0) device_printf(sc->sc_dev, "%s: status changen", __func__); #endif if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 && IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE) sc->sc_flags |= CAS_LINK; else sc->sc_flags &= ~CAS_LINK; switch (IFM_SUBTYPE(sc->sc_mii->mii_media_active)) { case IFM_1000_SX: case IFM_1000_LX: case IFM_1000_CX: case IFM_1000_T: gigabit = 1; break; default: gigabit = 0; } /* * The configuration done here corresponds to the steps F) and * G) and as far as enabling of RX and TX MAC goes also step H) * of the initialization sequence outlined in section 11.2.1 of * the Cassini+ ASIC Specification. */ rxcfg = sc->sc_mac_rxcfg; rxcfg &= ~CAS_MAC_RX_CONF_CARR; txcfg = CAS_MAC_TX_CONF_EN_IPG0 | CAS_MAC_TX_CONF_NGU | CAS_MAC_TX_CONF_NGUL; if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) txcfg |= CAS_MAC_TX_CONF_ICARR | CAS_MAC_TX_CONF_ICOLLIS; else if (gigabit != 0) { rxcfg |= CAS_MAC_RX_CONF_CARR; txcfg |= CAS_MAC_TX_CONF_CARR; } (void)cas_disable_tx(sc); CAS_WRITE_4(sc, CAS_MAC_TX_CONF, txcfg); (void)cas_disable_rx(sc); CAS_WRITE_4(sc, CAS_MAC_RX_CONF, rxcfg); v = CAS_READ_4(sc, CAS_MAC_CTRL_CONF) & ~(CAS_MAC_CTRL_CONF_TXP | CAS_MAC_CTRL_CONF_RXP); if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) v |= CAS_MAC_CTRL_CONF_RXP; if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) v |= CAS_MAC_CTRL_CONF_TXP; CAS_WRITE_4(sc, CAS_MAC_CTRL_CONF, v); /* * All supported chips have a bug causing incorrect checksum * to be calculated when letting them strip the FCS in half- * duplex mode. In theory we could disable FCS stripping and * manually adjust the checksum accordingly. It seems to make * more sense to optimze for the common case and just disable * hardware checksumming in half-duplex mode though. */ if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0) { ifp->if_capenable &= ~IFCAP_HWCSUM; ifp->if_hwassist = 0; } else if ((sc->sc_flags & CAS_NO_CSUM) == 0) { ifp->if_capenable = ifp->if_capabilities; ifp->if_hwassist = CAS_CSUM_FEATURES; } if (sc->sc_variant == CAS_SATURN) { if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0) /* silicon bug workaround */ CAS_WRITE_4(sc, CAS_MAC_PREAMBLE_LEN, 0x41); else CAS_WRITE_4(sc, CAS_MAC_PREAMBLE_LEN, 0x7); } if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0 && gigabit != 0) CAS_WRITE_4(sc, CAS_MAC_SLOT_TIME, CAS_MAC_SLOT_TIME_CARR); else CAS_WRITE_4(sc, CAS_MAC_SLOT_TIME, CAS_MAC_SLOT_TIME_NORM); /* XIF Configuration */ v = CAS_MAC_XIF_CONF_TX_OE | CAS_MAC_XIF_CONF_LNKLED; if ((sc->sc_flags & CAS_SERDES) == 0) { if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0) v |= CAS_MAC_XIF_CONF_NOECHO; v |= CAS_MAC_XIF_CONF_BUF_OE; } if (gigabit != 0) v |= CAS_MAC_XIF_CONF_GMII; if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) v |= CAS_MAC_XIF_CONF_FDXLED; CAS_WRITE_4(sc, CAS_MAC_XIF_CONF, v); sc->sc_mac_rxcfg = rxcfg; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && (sc->sc_flags & CAS_LINK) != 0) { CAS_WRITE_4(sc, CAS_MAC_TX_CONF, txcfg | CAS_MAC_TX_CONF_EN); CAS_WRITE_4(sc, CAS_MAC_RX_CONF, rxcfg | CAS_MAC_RX_CONF_EN); } } static int cas_mediachange(struct ifnet *ifp) { struct cas_softc *sc = ifp->if_softc; int error; /* XXX add support for serial media. */ CAS_LOCK(sc); error = mii_mediachg(sc->sc_mii); CAS_UNLOCK(sc); return (error); } static void cas_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) { struct cas_softc *sc = ifp->if_softc; CAS_LOCK(sc); if ((ifp->if_flags & IFF_UP) == 0) { CAS_UNLOCK(sc); return; } mii_pollstat(sc->sc_mii); ifmr->ifm_active = sc->sc_mii->mii_media_active; ifmr->ifm_status = sc->sc_mii->mii_media_status; CAS_UNLOCK(sc); } static int cas_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct cas_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; int error; error = 0; switch (cmd) { case SIOCSIFFLAGS: CAS_LOCK(sc); if ((ifp->if_flags & IFF_UP) != 0) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && ((ifp->if_flags ^ sc->sc_ifflags) & (IFF_ALLMULTI | IFF_PROMISC)) != 0) cas_setladrf(sc); else cas_init_locked(sc); } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) cas_stop(ifp); sc->sc_ifflags = ifp->if_flags; CAS_UNLOCK(sc); break; case SIOCSIFCAP: CAS_LOCK(sc); if ((sc->sc_flags & CAS_NO_CSUM) != 0) { error = EINVAL; CAS_UNLOCK(sc); break; } ifp->if_capenable = ifr->ifr_reqcap; if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) ifp->if_hwassist = CAS_CSUM_FEATURES; else ifp->if_hwassist = 0; CAS_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: CAS_LOCK(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) cas_setladrf(sc); CAS_UNLOCK(sc); break; case SIOCSIFMTU: if ((ifr->ifr_mtu < ETHERMIN) || (ifr->ifr_mtu > ETHERMTU_JUMBO)) error = EINVAL; else ifp->if_mtu = ifr->ifr_mtu; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); break; default: error = ether_ioctl(ifp, cmd, data); break; } return (error); } static u_int cas_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { uint32_t crc, *hash = arg; crc = ether_crc32_le(LLADDR(sdl), ETHER_ADDR_LEN); /* We just want the 8 most significant bits. */ crc >>= 24; /* Set the corresponding bit in the filter. */ hash[crc >> 4] |= 1 << (15 - (crc & 15)); return (1); } static void cas_setladrf(struct cas_softc *sc) { struct ifnet *ifp = sc->sc_ifp; int i; uint32_t hash[16]; uint32_t v; CAS_LOCK_ASSERT(sc, MA_OWNED); /* * Turn off the RX MAC and the hash filter as required by the Sun * Cassini programming restrictions. */ v = sc->sc_mac_rxcfg & ~(CAS_MAC_RX_CONF_HFILTER | CAS_MAC_RX_CONF_EN); CAS_WRITE_4(sc, CAS_MAC_RX_CONF, v); CAS_BARRIER(sc, CAS_MAC_RX_CONF, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); if (!cas_bitwait(sc, CAS_MAC_RX_CONF, CAS_MAC_RX_CONF_HFILTER | CAS_MAC_RX_CONF_EN, 0)) device_printf(sc->sc_dev, "cannot disable RX MAC or hash filter\n"); v &= ~(CAS_MAC_RX_CONF_PROMISC | CAS_MAC_RX_CONF_PGRP); if ((ifp->if_flags & IFF_PROMISC) != 0) { v |= CAS_MAC_RX_CONF_PROMISC; goto chipit; } if ((ifp->if_flags & IFF_ALLMULTI) != 0) { v |= CAS_MAC_RX_CONF_PGRP; goto chipit; } /* * Set up multicast address filter by passing all multicast * addresses through a crc generator, and then using the high * order 8 bits as an index into the 256 bit logical address * filter. The high order 4 bits selects the word, while the * other 4 bits select the bit within the word (where bit 0 * is the MSB). */ memset(hash, 0, sizeof(hash)); if_foreach_llmaddr(ifp, cas_hash_maddr, &hash); v |= CAS_MAC_RX_CONF_HFILTER; /* Now load the hash table into the chip (if we are using it). */ for (i = 0; i < 16; i++) CAS_WRITE_4(sc, CAS_MAC_HASH0 + i * (CAS_MAC_HASH1 - CAS_MAC_HASH0), hash[i]); chipit: sc->sc_mac_rxcfg = v; CAS_WRITE_4(sc, CAS_MAC_RX_CONF, v | CAS_MAC_RX_CONF_EN); } static int cas_pci_attach(device_t dev); static int cas_pci_detach(device_t dev); static int cas_pci_probe(device_t dev); static int cas_pci_resume(device_t dev); static int cas_pci_suspend(device_t dev); static device_method_t cas_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, cas_pci_probe), DEVMETHOD(device_attach, cas_pci_attach), DEVMETHOD(device_detach, cas_pci_detach), DEVMETHOD(device_suspend, cas_pci_suspend), DEVMETHOD(device_resume, cas_pci_resume), /* Use the suspend handler here, it is all that is required. */ DEVMETHOD(device_shutdown, cas_pci_suspend), /* MII interface */ DEVMETHOD(miibus_readreg, cas_mii_readreg), DEVMETHOD(miibus_writereg, cas_mii_writereg), DEVMETHOD(miibus_statchg, cas_mii_statchg), DEVMETHOD_END }; static driver_t cas_pci_driver = { "cas", cas_pci_methods, sizeof(struct cas_softc) }; static const struct cas_pci_dev { uint32_t cpd_devid; uint8_t cpd_revid; int cpd_variant; const char *cpd_desc; } cas_pci_devlist[] = { { 0x0035100b, 0x0, CAS_SATURN, "NS DP83065 Saturn Gigabit Ethernet" }, { 0xabba108e, 0x10, CAS_CASPLUS, "Sun Cassini+ Gigabit Ethernet" }, { 0xabba108e, 0x0, CAS_CAS, "Sun Cassini Gigabit Ethernet" }, { 0, 0, 0, NULL } }; DRIVER_MODULE(cas, pci, cas_pci_driver, cas_devclass, 0, 0); MODULE_PNP_INFO("W32:vendor/device", pci, cas, cas_pci_devlist, nitems(cas_pci_devlist) - 1); DRIVER_MODULE(miibus, cas, miibus_driver, miibus_devclass, 0, 0); MODULE_DEPEND(cas, pci, 1, 1, 1); static int cas_pci_probe(device_t dev) { int i; for (i = 0; cas_pci_devlist[i].cpd_desc != NULL; i++) { if (pci_get_devid(dev) == cas_pci_devlist[i].cpd_devid && pci_get_revid(dev) >= cas_pci_devlist[i].cpd_revid) { device_set_desc(dev, cas_pci_devlist[i].cpd_desc); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } static struct resource_spec cas_pci_res_spec[] = { { SYS_RES_IRQ, 0, RF_SHAREABLE | RF_ACTIVE }, /* CAS_RES_INTR */ { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE }, /* CAS_RES_MEM */ { -1, 0 } }; #define CAS_LOCAL_MAC_ADDRESS "local-mac-address" #define CAS_PHY_INTERFACE "phy-interface" #define CAS_PHY_TYPE "phy-type" #define CAS_PHY_TYPE_PCS "pcs" static int cas_pci_attach(device_t dev) { char buf[sizeof(CAS_LOCAL_MAC_ADDRESS)]; struct cas_softc *sc; int i; #if !defined(__powerpc__) u_char enaddr[4][ETHER_ADDR_LEN]; u_int j, k, lma, pcs[4], phy; #endif sc = device_get_softc(dev); sc->sc_variant = CAS_UNKNOWN; for (i = 0; cas_pci_devlist[i].cpd_desc != NULL; i++) { if (pci_get_devid(dev) == cas_pci_devlist[i].cpd_devid && pci_get_revid(dev) >= cas_pci_devlist[i].cpd_revid) { sc->sc_variant = cas_pci_devlist[i].cpd_variant; break; } } if (sc->sc_variant == CAS_UNKNOWN) { device_printf(dev, "unknown adaptor\n"); return (ENXIO); } /* PCI configuration */ pci_write_config(dev, PCIR_COMMAND, pci_read_config(dev, PCIR_COMMAND, 2) | PCIM_CMD_BUSMASTEREN | PCIM_CMD_MWRICEN | PCIM_CMD_PERRESPEN | PCIM_CMD_SERRESPEN, 2); sc->sc_dev = dev; if (sc->sc_variant == CAS_CAS && pci_get_devid(dev) < 0x02) /* Hardware checksumming may hang TX. */ sc->sc_flags |= CAS_NO_CSUM; if (sc->sc_variant == CAS_CASPLUS || sc->sc_variant == CAS_SATURN) sc->sc_flags |= CAS_REG_PLUS; if (sc->sc_variant == CAS_CAS || (sc->sc_variant == CAS_CASPLUS && pci_get_revid(dev) < 0x11)) sc->sc_flags |= CAS_TABORT; if (bootverbose) device_printf(dev, "flags=0x%x\n", sc->sc_flags); if (bus_alloc_resources(dev, cas_pci_res_spec, sc->sc_res)) { device_printf(dev, "failed to allocate resources\n"); bus_release_resources(dev, cas_pci_res_spec, sc->sc_res); return (ENXIO); } CAS_LOCK_INIT(sc, device_get_nameunit(dev)); #if defined(__powerpc__) OF_getetheraddr(dev, sc->sc_enaddr); if (OF_getprop(ofw_bus_get_node(dev), CAS_PHY_INTERFACE, buf, sizeof(buf)) > 0 || OF_getprop(ofw_bus_get_node(dev), CAS_PHY_TYPE, buf, sizeof(buf)) > 0) { buf[sizeof(buf) - 1] = '\0'; if (strcmp(buf, CAS_PHY_TYPE_PCS) == 0) sc->sc_flags |= CAS_SERDES; } #else /* * Dig out VPD (vital product data) and read the MAC address as well * as the PHY type. The VPD resides in the PCI Expansion ROM (PCI * FCode) and can't be accessed via the PCI capability pointer. * SUNW,pci-ce and SUNW,pci-qge use the Enhanced VPD format described * in the free US Patent 7149820. */ #define PCI_ROMHDR_SIZE 0x1c #define PCI_ROMHDR_SIG 0x00 #define PCI_ROMHDR_SIG_MAGIC 0xaa55 /* little endian */ #define PCI_ROMHDR_PTR_DATA 0x18 #define PCI_ROM_SIZE 0x18 #define PCI_ROM_SIG 0x00 #define PCI_ROM_SIG_MAGIC 0x52494350 /* "PCIR", endian */ /* reversed */ #define PCI_ROM_VENDOR 0x04 #define PCI_ROM_DEVICE 0x06 #define PCI_ROM_PTR_VPD 0x08 #define PCI_VPDRES_BYTE0 0x00 #define PCI_VPDRES_ISLARGE(x) ((x) & 0x80) #define PCI_VPDRES_LARGE_NAME(x) ((x) & 0x7f) #define PCI_VPDRES_LARGE_LEN_LSB 0x01 #define PCI_VPDRES_LARGE_LEN_MSB 0x02 #define PCI_VPDRES_LARGE_SIZE 0x03 #define PCI_VPDRES_TYPE_ID_STRING 0x02 /* large */ #define PCI_VPDRES_TYPE_VPD 0x10 /* large */ #define PCI_VPD_KEY0 0x00 #define PCI_VPD_KEY1 0x01 #define PCI_VPD_LEN 0x02 #define PCI_VPD_SIZE 0x03 #define CAS_ROM_READ_1(sc, offs) \ CAS_READ_1((sc), CAS_PCI_ROM_OFFSET + (offs)) #define CAS_ROM_READ_2(sc, offs) \ CAS_READ_2((sc), CAS_PCI_ROM_OFFSET + (offs)) #define CAS_ROM_READ_4(sc, offs) \ CAS_READ_4((sc), CAS_PCI_ROM_OFFSET + (offs)) lma = phy = 0; memset(enaddr, 0, sizeof(enaddr)); memset(pcs, 0, sizeof(pcs)); /* Enable PCI Expansion ROM access. */ CAS_WRITE_4(sc, CAS_BIM_LDEV_OEN, CAS_BIM_LDEV_OEN_PAD | CAS_BIM_LDEV_OEN_PROM); /* Read PCI Expansion ROM header. */ if (CAS_ROM_READ_2(sc, PCI_ROMHDR_SIG) != PCI_ROMHDR_SIG_MAGIC || (i = CAS_ROM_READ_2(sc, PCI_ROMHDR_PTR_DATA)) < PCI_ROMHDR_SIZE) { device_printf(dev, "unexpected PCI Expansion ROM header\n"); goto fail_prom; } /* Read PCI Expansion ROM data. */ if (CAS_ROM_READ_4(sc, i + PCI_ROM_SIG) != PCI_ROM_SIG_MAGIC || CAS_ROM_READ_2(sc, i + PCI_ROM_VENDOR) != pci_get_vendor(dev) || CAS_ROM_READ_2(sc, i + PCI_ROM_DEVICE) != pci_get_device(dev) || (j = CAS_ROM_READ_2(sc, i + PCI_ROM_PTR_VPD)) < i + PCI_ROM_SIZE) { device_printf(dev, "unexpected PCI Expansion ROM data\n"); goto fail_prom; } /* Read PCI VPD. */ next: if (PCI_VPDRES_ISLARGE(CAS_ROM_READ_1(sc, j + PCI_VPDRES_BYTE0)) == 0) { device_printf(dev, "no large PCI VPD\n"); goto fail_prom; } i = (CAS_ROM_READ_1(sc, j + PCI_VPDRES_LARGE_LEN_MSB) << 8) | CAS_ROM_READ_1(sc, j + PCI_VPDRES_LARGE_LEN_LSB); switch (PCI_VPDRES_LARGE_NAME(CAS_ROM_READ_1(sc, j + PCI_VPDRES_BYTE0))) { case PCI_VPDRES_TYPE_ID_STRING: /* Skip identifier string. */ j += PCI_VPDRES_LARGE_SIZE + i; goto next; case PCI_VPDRES_TYPE_VPD: for (j += PCI_VPDRES_LARGE_SIZE; i > 0; i -= PCI_VPD_SIZE + CAS_ROM_READ_1(sc, j + PCI_VPD_LEN), j += PCI_VPD_SIZE + CAS_ROM_READ_1(sc, j + PCI_VPD_LEN)) { if (CAS_ROM_READ_1(sc, j + PCI_VPD_KEY0) != 'Z') /* no Enhanced VPD */ continue; if (CAS_ROM_READ_1(sc, j + PCI_VPD_SIZE) != 'I') /* no instance property */ continue; if (CAS_ROM_READ_1(sc, j + PCI_VPD_SIZE + 3) == 'B') { /* byte array */ if (CAS_ROM_READ_1(sc, j + PCI_VPD_SIZE + 4) != ETHER_ADDR_LEN) continue; bus_read_region_1(sc->sc_res[CAS_RES_MEM], CAS_PCI_ROM_OFFSET + j + PCI_VPD_SIZE + 5, buf, sizeof(buf)); buf[sizeof(buf) - 1] = '\0'; if (strcmp(buf, CAS_LOCAL_MAC_ADDRESS) != 0) continue; bus_read_region_1(sc->sc_res[CAS_RES_MEM], CAS_PCI_ROM_OFFSET + j + PCI_VPD_SIZE + 5 + sizeof(CAS_LOCAL_MAC_ADDRESS), enaddr[lma], sizeof(enaddr[lma])); lma++; if (lma == 4 && phy == 4) break; } else if (CAS_ROM_READ_1(sc, j + PCI_VPD_SIZE + 3) == 'S') { /* string */ if (CAS_ROM_READ_1(sc, j + PCI_VPD_SIZE + 4) != sizeof(CAS_PHY_TYPE_PCS)) continue; bus_read_region_1(sc->sc_res[CAS_RES_MEM], CAS_PCI_ROM_OFFSET + j + PCI_VPD_SIZE + 5, buf, sizeof(buf)); buf[sizeof(buf) - 1] = '\0'; if (strcmp(buf, CAS_PHY_INTERFACE) == 0) k = sizeof(CAS_PHY_INTERFACE); else if (strcmp(buf, CAS_PHY_TYPE) == 0) k = sizeof(CAS_PHY_TYPE); else continue; bus_read_region_1(sc->sc_res[CAS_RES_MEM], CAS_PCI_ROM_OFFSET + j + PCI_VPD_SIZE + 5 + k, buf, sizeof(buf)); buf[sizeof(buf) - 1] = '\0'; if (strcmp(buf, CAS_PHY_TYPE_PCS) == 0) pcs[phy] = 1; phy++; if (lma == 4 && phy == 4) break; } } break; default: device_printf(dev, "unexpected PCI VPD\n"); goto fail_prom; } fail_prom: CAS_WRITE_4(sc, CAS_BIM_LDEV_OEN, 0); if (lma == 0) { device_printf(dev, "could not determine Ethernet address\n"); goto fail; } i = 0; if (lma > 1 && pci_get_slot(dev) < nitems(enaddr)) i = pci_get_slot(dev); memcpy(sc->sc_enaddr, enaddr[i], ETHER_ADDR_LEN); if (phy == 0) { device_printf(dev, "could not determine PHY type\n"); goto fail; } i = 0; if (phy > 1 && pci_get_slot(dev) < nitems(pcs)) i = pci_get_slot(dev); if (pcs[i] != 0) sc->sc_flags |= CAS_SERDES; #endif if (cas_attach(sc) != 0) { device_printf(dev, "could not be attached\n"); goto fail; } if (bus_setup_intr(dev, sc->sc_res[CAS_RES_INTR], INTR_TYPE_NET | INTR_MPSAFE, cas_intr, NULL, sc, &sc->sc_ih) != 0) { device_printf(dev, "failed to set up interrupt\n"); cas_detach(sc); goto fail; } return (0); fail: CAS_LOCK_DESTROY(sc); bus_release_resources(dev, cas_pci_res_spec, sc->sc_res); return (ENXIO); } static int cas_pci_detach(device_t dev) { struct cas_softc *sc; sc = device_get_softc(dev); bus_teardown_intr(dev, sc->sc_res[CAS_RES_INTR], sc->sc_ih); cas_detach(sc); CAS_LOCK_DESTROY(sc); bus_release_resources(dev, cas_pci_res_spec, sc->sc_res); return (0); } static int cas_pci_suspend(device_t dev) { cas_suspend(device_get_softc(dev)); return (0); } static int cas_pci_resume(device_t dev) { cas_resume(device_get_softc(dev)); return (0); } diff --git a/sys/dev/ena/ena.c b/sys/dev/ena/ena.c index a5687d0213fd..f4a7236339cf 100644 --- a/sys/dev/ena/ena.c +++ b/sys/dev/ena/ena.c @@ -1,3637 +1,3637 @@ /*- * BSD LICENSE * * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ena_datapath.h" #include "ena.h" #include "ena_sysctl.h" #ifdef DEV_NETMAP #include "ena_netmap.h" #endif /* DEV_NETMAP */ /********************************************************* * Function prototypes *********************************************************/ static int ena_probe(device_t); static void ena_intr_msix_mgmnt(void *); static void ena_free_pci_resources(struct ena_adapter *); static int ena_change_mtu(if_t, int); static inline void ena_alloc_counters(counter_u64_t *, int); static inline void ena_free_counters(counter_u64_t *, int); static inline void ena_reset_counters(counter_u64_t *, int); static void ena_init_io_rings_common(struct ena_adapter *, struct ena_ring *, uint16_t); static void ena_init_io_rings(struct ena_adapter *); static void ena_free_io_ring_resources(struct ena_adapter *, unsigned int); static void ena_free_all_io_rings_resources(struct ena_adapter *); static int ena_setup_tx_dma_tag(struct ena_adapter *); static int ena_free_tx_dma_tag(struct ena_adapter *); static int ena_setup_rx_dma_tag(struct ena_adapter *); static int ena_free_rx_dma_tag(struct ena_adapter *); static void ena_release_all_tx_dmamap(struct ena_ring *); static int ena_setup_tx_resources(struct ena_adapter *, int); static void ena_free_tx_resources(struct ena_adapter *, int); static int ena_setup_all_tx_resources(struct ena_adapter *); static void ena_free_all_tx_resources(struct ena_adapter *); static int ena_setup_rx_resources(struct ena_adapter *, unsigned int); static void ena_free_rx_resources(struct ena_adapter *, unsigned int); static int ena_setup_all_rx_resources(struct ena_adapter *); static void ena_free_all_rx_resources(struct ena_adapter *); static inline int ena_alloc_rx_mbuf(struct ena_adapter *, struct ena_ring *, struct ena_rx_buffer *); static void ena_free_rx_mbuf(struct ena_adapter *, struct ena_ring *, struct ena_rx_buffer *); static void ena_free_rx_bufs(struct ena_adapter *, unsigned int); static void ena_refill_all_rx_bufs(struct ena_adapter *); static void ena_free_all_rx_bufs(struct ena_adapter *); static void ena_free_tx_bufs(struct ena_adapter *, unsigned int); static void ena_free_all_tx_bufs(struct ena_adapter *); static void ena_destroy_all_tx_queues(struct ena_adapter *); static void ena_destroy_all_rx_queues(struct ena_adapter *); static void ena_destroy_all_io_queues(struct ena_adapter *); static int ena_create_io_queues(struct ena_adapter *); static int ena_handle_msix(void *); static int ena_enable_msix(struct ena_adapter *); static void ena_setup_mgmnt_intr(struct ena_adapter *); static int ena_setup_io_intr(struct ena_adapter *); static int ena_request_mgmnt_irq(struct ena_adapter *); static int ena_request_io_irq(struct ena_adapter *); static void ena_free_mgmnt_irq(struct ena_adapter *); static void ena_free_io_irq(struct ena_adapter *); static void ena_free_irqs(struct ena_adapter*); static void ena_disable_msix(struct ena_adapter *); static void ena_unmask_all_io_irqs(struct ena_adapter *); static int ena_rss_configure(struct ena_adapter *); static int ena_up_complete(struct ena_adapter *); static uint64_t ena_get_counter(if_t, ift_counter); static int ena_media_change(if_t); static void ena_media_status(if_t, struct ifmediareq *); static void ena_init(void *); static int ena_ioctl(if_t, u_long, caddr_t); static int ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *); static void ena_update_host_info(struct ena_admin_host_info *, if_t); static void ena_update_hwassist(struct ena_adapter *); static int ena_setup_ifnet(device_t, struct ena_adapter *, struct ena_com_dev_get_features_ctx *); static int ena_enable_wc(struct resource *); static int ena_set_queues_placement_policy(device_t, struct ena_com_dev *, struct ena_admin_feature_llq_desc *, struct ena_llq_configurations *); static int ena_calc_io_queue_num(struct ena_adapter *, struct ena_com_dev_get_features_ctx *); static int ena_calc_queue_size(struct ena_adapter *, struct ena_calc_queue_size_ctx *); static int ena_handle_updated_queues(struct ena_adapter *, struct ena_com_dev_get_features_ctx *); static int ena_rss_init_default(struct ena_adapter *); static void ena_rss_init_default_deferred(void *); static void ena_config_host_info(struct ena_com_dev *, device_t); static int ena_attach(device_t); static int ena_detach(device_t); static int ena_device_init(struct ena_adapter *, device_t, struct ena_com_dev_get_features_ctx *, int *); static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *, int); static void ena_update_on_link_change(void *, struct ena_admin_aenq_entry *); static void unimplemented_aenq_handler(void *, struct ena_admin_aenq_entry *); static void ena_timer_service(void *); static char ena_version[] = DEVICE_NAME DRV_MODULE_NAME " v" DRV_MODULE_VERSION; static ena_vendor_info_t ena_vendor_info_array[] = { { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_PF, 0}, { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_LLQ_PF, 0}, { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_VF, 0}, { PCI_VENDOR_ID_AMAZON, PCI_DEV_ID_ENA_LLQ_VF, 0}, /* Last entry */ { 0, 0, 0 } }; /* * Contains pointers to event handlers, e.g. link state chage. */ static struct ena_aenq_handlers aenq_handlers; void ena_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error) { if (error != 0) return; *(bus_addr_t *) arg = segs[0].ds_addr; } int ena_dma_alloc(device_t dmadev, bus_size_t size, ena_mem_handle_t *dma , int mapflags) { struct ena_adapter* adapter = device_get_softc(dmadev); uint32_t maxsize; uint64_t dma_space_addr; int error; maxsize = ((size - 1) / PAGE_SIZE + 1) * PAGE_SIZE; dma_space_addr = ENA_DMA_BIT_MASK(adapter->dma_width); if (unlikely(dma_space_addr == 0)) dma_space_addr = BUS_SPACE_MAXADDR; error = bus_dma_tag_create(bus_get_dma_tag(dmadev), /* parent */ 8, 0, /* alignment, bounds */ dma_space_addr, /* lowaddr of exclusion window */ BUS_SPACE_MAXADDR,/* highaddr of exclusion window */ NULL, NULL, /* filter, filterarg */ maxsize, /* maxsize */ 1, /* nsegments */ maxsize, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &dma->tag); if (unlikely(error != 0)) { ena_trace(ENA_ALERT, "bus_dma_tag_create failed: %d\n", error); goto fail_tag; } error = bus_dmamem_alloc(dma->tag, (void**) &dma->vaddr, BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->map); if (unlikely(error != 0)) { ena_trace(ENA_ALERT, "bus_dmamem_alloc(%ju) failed: %d\n", (uintmax_t)size, error); goto fail_map_create; } dma->paddr = 0; error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, ena_dmamap_callback, &dma->paddr, mapflags); if (unlikely((error != 0) || (dma->paddr == 0))) { ena_trace(ENA_ALERT, ": bus_dmamap_load failed: %d\n", error); goto fail_map_load; } bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); fail_map_load: bus_dmamem_free(dma->tag, dma->vaddr, dma->map); fail_map_create: bus_dma_tag_destroy(dma->tag); fail_tag: dma->tag = NULL; dma->vaddr = NULL; dma->paddr = 0; return (error); } static void ena_free_pci_resources(struct ena_adapter *adapter) { device_t pdev = adapter->pdev; if (adapter->memory != NULL) { bus_release_resource(pdev, SYS_RES_MEMORY, PCIR_BAR(ENA_MEM_BAR), adapter->memory); } if (adapter->registers != NULL) { bus_release_resource(pdev, SYS_RES_MEMORY, PCIR_BAR(ENA_REG_BAR), adapter->registers); } } static int ena_probe(device_t dev) { ena_vendor_info_t *ent; char adapter_name[60]; uint16_t pci_vendor_id = 0; uint16_t pci_device_id = 0; pci_vendor_id = pci_get_vendor(dev); pci_device_id = pci_get_device(dev); ent = ena_vendor_info_array; while (ent->vendor_id != 0) { if ((pci_vendor_id == ent->vendor_id) && (pci_device_id == ent->device_id)) { ena_trace(ENA_DBG, "vendor=%x device=%x\n", pci_vendor_id, pci_device_id); sprintf(adapter_name, DEVICE_DESC); device_set_desc_copy(dev, adapter_name); return (BUS_PROBE_DEFAULT); } ent++; } return (ENXIO); } static int ena_change_mtu(if_t ifp, int new_mtu) { struct ena_adapter *adapter = if_getsoftc(ifp); int rc; if ((new_mtu > adapter->max_mtu) || (new_mtu < ENA_MIN_MTU)) { device_printf(adapter->pdev, "Invalid MTU setting. " "new_mtu: %d max mtu: %d min mtu: %d\n", new_mtu, adapter->max_mtu, ENA_MIN_MTU); return (EINVAL); } rc = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu); if (likely(rc == 0)) { ena_trace(ENA_DBG, "set MTU to %d\n", new_mtu); if_setmtu(ifp, new_mtu); } else { device_printf(adapter->pdev, "Failed to set MTU to %d\n", new_mtu); } return (rc); } static inline void ena_alloc_counters(counter_u64_t *begin, int size) { counter_u64_t *end = (counter_u64_t *)((char *)begin + size); for (; begin < end; ++begin) *begin = counter_u64_alloc(M_WAITOK); } static inline void ena_free_counters(counter_u64_t *begin, int size) { counter_u64_t *end = (counter_u64_t *)((char *)begin + size); for (; begin < end; ++begin) counter_u64_free(*begin); } static inline void ena_reset_counters(counter_u64_t *begin, int size) { counter_u64_t *end = (counter_u64_t *)((char *)begin + size); for (; begin < end; ++begin) counter_u64_zero(*begin); } static void ena_init_io_rings_common(struct ena_adapter *adapter, struct ena_ring *ring, uint16_t qid) { ring->qid = qid; ring->adapter = adapter; ring->ena_dev = adapter->ena_dev; ring->first_interrupt = false; ring->no_interrupt_event_cnt = 0; } static void ena_init_io_rings(struct ena_adapter *adapter) { struct ena_com_dev *ena_dev; struct ena_ring *txr, *rxr; struct ena_que *que; int i; ena_dev = adapter->ena_dev; for (i = 0; i < adapter->num_queues; i++) { txr = &adapter->tx_ring[i]; rxr = &adapter->rx_ring[i]; /* TX/RX common ring state */ ena_init_io_rings_common(adapter, txr, i); ena_init_io_rings_common(adapter, rxr, i); /* TX specific ring state */ txr->ring_size = adapter->tx_ring_size; txr->tx_max_header_size = ena_dev->tx_max_header_size; txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type; txr->smoothed_interval = ena_com_get_nonadaptive_moderation_interval_tx(ena_dev); /* Allocate a buf ring */ txr->buf_ring_size = adapter->buf_ring_size; txr->br = buf_ring_alloc(txr->buf_ring_size, M_DEVBUF, M_WAITOK, &txr->ring_mtx); /* Alloc TX statistics. */ ena_alloc_counters((counter_u64_t *)&txr->tx_stats, sizeof(txr->tx_stats)); /* RX specific ring state */ rxr->ring_size = adapter->rx_ring_size; rxr->smoothed_interval = ena_com_get_nonadaptive_moderation_interval_rx(ena_dev); /* Alloc RX statistics. */ ena_alloc_counters((counter_u64_t *)&rxr->rx_stats, sizeof(rxr->rx_stats)); /* Initialize locks */ snprintf(txr->mtx_name, nitems(txr->mtx_name), "%s:tx(%d)", device_get_nameunit(adapter->pdev), i); snprintf(rxr->mtx_name, nitems(rxr->mtx_name), "%s:rx(%d)", device_get_nameunit(adapter->pdev), i); mtx_init(&txr->ring_mtx, txr->mtx_name, NULL, MTX_DEF); que = &adapter->que[i]; que->adapter = adapter; que->id = i; que->tx_ring = txr; que->rx_ring = rxr; txr->que = que; rxr->que = que; rxr->empty_rx_queue = 0; } } static void ena_free_io_ring_resources(struct ena_adapter *adapter, unsigned int qid) { struct ena_ring *txr = &adapter->tx_ring[qid]; struct ena_ring *rxr = &adapter->rx_ring[qid]; ena_free_counters((counter_u64_t *)&txr->tx_stats, sizeof(txr->tx_stats)); ena_free_counters((counter_u64_t *)&rxr->rx_stats, sizeof(rxr->rx_stats)); ENA_RING_MTX_LOCK(txr); drbr_free(txr->br, M_DEVBUF); ENA_RING_MTX_UNLOCK(txr); mtx_destroy(&txr->ring_mtx); } static void ena_free_all_io_rings_resources(struct ena_adapter *adapter) { int i; for (i = 0; i < adapter->num_queues; i++) ena_free_io_ring_resources(adapter, i); } static int ena_setup_tx_dma_tag(struct ena_adapter *adapter) { int ret; /* Create DMA tag for Tx buffers */ ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), 1, 0, /* alignment, bounds */ ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */ BUS_SPACE_MAXADDR, /* highaddr of excl window */ NULL, NULL, /* filter, filterarg */ ENA_TSO_MAXSIZE, /* maxsize */ adapter->max_tx_sgl_size - 1, /* nsegments */ ENA_TSO_MAXSIZE, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &adapter->tx_buf_tag); return (ret); } static int ena_free_tx_dma_tag(struct ena_adapter *adapter) { int ret; ret = bus_dma_tag_destroy(adapter->tx_buf_tag); if (likely(ret == 0)) adapter->tx_buf_tag = NULL; return (ret); } static int ena_setup_rx_dma_tag(struct ena_adapter *adapter) { int ret; /* Create DMA tag for Rx buffers*/ ret = bus_dma_tag_create(bus_get_dma_tag(adapter->pdev), /* parent */ 1, 0, /* alignment, bounds */ ENA_DMA_BIT_MASK(adapter->dma_width), /* lowaddr of excl window */ BUS_SPACE_MAXADDR, /* highaddr of excl window */ NULL, NULL, /* filter, filterarg */ MJUM16BYTES, /* maxsize */ adapter->max_rx_sgl_size, /* nsegments */ MJUM16BYTES, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &adapter->rx_buf_tag); return (ret); } static int ena_free_rx_dma_tag(struct ena_adapter *adapter) { int ret; ret = bus_dma_tag_destroy(adapter->rx_buf_tag); if (likely(ret == 0)) adapter->rx_buf_tag = NULL; return (ret); } static void ena_release_all_tx_dmamap(struct ena_ring *tx_ring) { struct ena_adapter *adapter = tx_ring->adapter; struct ena_tx_buffer *tx_info; bus_dma_tag_t tx_tag = adapter->tx_buf_tag;; int i; #ifdef DEV_NETMAP struct ena_netmap_tx_info *nm_info; int j; #endif /* DEV_NETMAP */ for (i = 0; i < tx_ring->ring_size; ++i) { tx_info = &tx_ring->tx_buffer_info[i]; #ifdef DEV_NETMAP if (adapter->ifp->if_capenable & IFCAP_NETMAP) { nm_info = &tx_info->nm_info; for (j = 0; j < ENA_PKT_MAX_BUFS; ++j) { if (nm_info->map_seg[j] != NULL) { bus_dmamap_destroy(tx_tag, nm_info->map_seg[j]); nm_info->map_seg[j] = NULL; } } } #endif /* DEV_NETMAP */ if (tx_info->map_head != NULL) { bus_dmamap_destroy(tx_tag, tx_info->map_head); tx_info->map_head = NULL; } if (tx_info->map_seg != NULL) { bus_dmamap_destroy(tx_tag, tx_info->map_seg); tx_info->map_seg = NULL; } } } /** * ena_setup_tx_resources - allocate Tx resources (Descriptors) * @adapter: network interface device structure * @qid: queue index * * Returns 0 on success, otherwise on failure. **/ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid) { struct ena_que *que = &adapter->que[qid]; struct ena_ring *tx_ring = que->tx_ring; int size, i, err; #ifdef DEV_NETMAP bus_dmamap_t *map; int j; ena_netmap_reset_tx_ring(adapter, qid); #endif /* DEV_NETMAP */ size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size; tx_ring->tx_buffer_info = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); if (unlikely(tx_ring->tx_buffer_info == NULL)) return (ENOMEM); size = sizeof(uint16_t) * tx_ring->ring_size; tx_ring->free_tx_ids = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); if (unlikely(tx_ring->free_tx_ids == NULL)) goto err_buf_info_free; size = tx_ring->tx_max_header_size; tx_ring->push_buf_intermediate_buf = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO); if (unlikely(tx_ring->push_buf_intermediate_buf == NULL)) goto err_tx_ids_free; /* Req id stack for TX OOO completions */ for (i = 0; i < tx_ring->ring_size; i++) tx_ring->free_tx_ids[i] = i; /* Reset TX statistics. */ ena_reset_counters((counter_u64_t *)&tx_ring->tx_stats, sizeof(tx_ring->tx_stats)); tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; tx_ring->acum_pkts = 0; /* Make sure that drbr is empty */ ENA_RING_MTX_LOCK(tx_ring); drbr_flush(adapter->ifp, tx_ring->br); ENA_RING_MTX_UNLOCK(tx_ring); /* ... and create the buffer DMA maps */ for (i = 0; i < tx_ring->ring_size; i++) { err = bus_dmamap_create(adapter->tx_buf_tag, 0, &tx_ring->tx_buffer_info[i].map_head); if (unlikely(err != 0)) { ena_trace(ENA_ALERT, "Unable to create Tx DMA map_head for buffer %d\n", i); goto err_map_release; } tx_ring->tx_buffer_info[i].seg_mapped = false; err = bus_dmamap_create(adapter->tx_buf_tag, 0, &tx_ring->tx_buffer_info[i].map_seg); if (unlikely(err != 0)) { ena_trace(ENA_ALERT, "Unable to create Tx DMA map_seg for buffer %d\n", i); goto err_map_release; } tx_ring->tx_buffer_info[i].head_mapped = false; #ifdef DEV_NETMAP if (adapter->ifp->if_capenable & IFCAP_NETMAP) { map = tx_ring->tx_buffer_info[i].nm_info.map_seg; for (j = 0; j < ENA_PKT_MAX_BUFS; j++) { err = bus_dmamap_create(adapter->tx_buf_tag, 0, &map[j]); if (unlikely(err != 0)) { ena_trace(ENA_ALERT, "Unable to create " "Tx DMA for buffer %d %d\n", i, j); goto err_map_release; } } } #endif /* DEV_NETMAP */ } /* Allocate taskqueues */ TASK_INIT(&tx_ring->enqueue_task, 0, ena_deferred_mq_start, tx_ring); tx_ring->enqueue_tq = taskqueue_create_fast("ena_tx_enque", M_NOWAIT, taskqueue_thread_enqueue, &tx_ring->enqueue_tq); if (unlikely(tx_ring->enqueue_tq == NULL)) { ena_trace(ENA_ALERT, "Unable to create taskqueue for enqueue task\n"); i = tx_ring->ring_size; goto err_map_release; } tx_ring->running = true; taskqueue_start_threads(&tx_ring->enqueue_tq, 1, PI_NET, "%s txeq %d", device_get_nameunit(adapter->pdev), que->cpu); return (0); err_map_release: ena_release_all_tx_dmamap(tx_ring); err_tx_ids_free: free(tx_ring->free_tx_ids, M_DEVBUF); tx_ring->free_tx_ids = NULL; err_buf_info_free: free(tx_ring->tx_buffer_info, M_DEVBUF); tx_ring->tx_buffer_info = NULL; return (ENOMEM); } /** * ena_free_tx_resources - Free Tx Resources per Queue * @adapter: network interface device structure * @qid: queue index * * Free all transmit software resources **/ static void ena_free_tx_resources(struct ena_adapter *adapter, int qid) { struct ena_ring *tx_ring = &adapter->tx_ring[qid]; #ifdef DEV_NETMAP struct ena_netmap_tx_info *nm_info; int j; #endif /* DEV_NETMAP */ while (taskqueue_cancel(tx_ring->enqueue_tq, &tx_ring->enqueue_task, NULL)) taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task); taskqueue_free(tx_ring->enqueue_tq); ENA_RING_MTX_LOCK(tx_ring); /* Flush buffer ring, */ drbr_flush(adapter->ifp, tx_ring->br); /* Free buffer DMA maps, */ for (int i = 0; i < tx_ring->ring_size; i++) { if (tx_ring->tx_buffer_info[i].head_mapped == true) { bus_dmamap_sync(adapter->tx_buf_tag, tx_ring->tx_buffer_info[i].map_head, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(adapter->tx_buf_tag, tx_ring->tx_buffer_info[i].map_head); tx_ring->tx_buffer_info[i].head_mapped = false; } bus_dmamap_destroy(adapter->tx_buf_tag, tx_ring->tx_buffer_info[i].map_head); if (tx_ring->tx_buffer_info[i].seg_mapped == true) { bus_dmamap_sync(adapter->tx_buf_tag, tx_ring->tx_buffer_info[i].map_seg, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(adapter->tx_buf_tag, tx_ring->tx_buffer_info[i].map_seg); tx_ring->tx_buffer_info[i].seg_mapped = false; } bus_dmamap_destroy(adapter->tx_buf_tag, tx_ring->tx_buffer_info[i].map_seg); #ifdef DEV_NETMAP if (adapter->ifp->if_capenable & IFCAP_NETMAP) { nm_info = &tx_ring->tx_buffer_info[i].nm_info; for (j = 0; j < ENA_PKT_MAX_BUFS; j++) { if (nm_info->socket_buf_idx[j] != 0) { bus_dmamap_sync(adapter->tx_buf_tag, nm_info->map_seg[j], BUS_DMASYNC_POSTWRITE); ena_netmap_unload(adapter, nm_info->map_seg[j]); } bus_dmamap_destroy(adapter->tx_buf_tag, nm_info->map_seg[j]); nm_info->socket_buf_idx[j] = 0; } } #endif /* DEV_NETMAP */ m_freem(tx_ring->tx_buffer_info[i].mbuf); tx_ring->tx_buffer_info[i].mbuf = NULL; } ENA_RING_MTX_UNLOCK(tx_ring); /* And free allocated memory. */ free(tx_ring->tx_buffer_info, M_DEVBUF); tx_ring->tx_buffer_info = NULL; free(tx_ring->free_tx_ids, M_DEVBUF); tx_ring->free_tx_ids = NULL; ENA_MEM_FREE(adapter->ena_dev->dmadev, tx_ring->push_buf_intermediate_buf); tx_ring->push_buf_intermediate_buf = NULL; } /** * ena_setup_all_tx_resources - allocate all queues Tx resources * @adapter: network interface device structure * * Returns 0 on success, otherwise on failure. **/ static int ena_setup_all_tx_resources(struct ena_adapter *adapter) { int i, rc; for (i = 0; i < adapter->num_queues; i++) { rc = ena_setup_tx_resources(adapter, i); if (rc != 0) { device_printf(adapter->pdev, "Allocation for Tx Queue %u failed\n", i); goto err_setup_tx; } } return (0); err_setup_tx: /* Rewind the index freeing the rings as we go */ while (i--) ena_free_tx_resources(adapter, i); return (rc); } /** * ena_free_all_tx_resources - Free Tx Resources for All Queues * @adapter: network interface device structure * * Free all transmit software resources **/ static void ena_free_all_tx_resources(struct ena_adapter *adapter) { int i; for (i = 0; i < adapter->num_queues; i++) ena_free_tx_resources(adapter, i); } /** * ena_setup_rx_resources - allocate Rx resources (Descriptors) * @adapter: network interface device structure * @qid: queue index * * Returns 0 on success, otherwise on failure. **/ static int ena_setup_rx_resources(struct ena_adapter *adapter, unsigned int qid) { struct ena_que *que = &adapter->que[qid]; struct ena_ring *rx_ring = que->rx_ring; int size, err, i; size = sizeof(struct ena_rx_buffer) * rx_ring->ring_size; #ifdef DEV_NETMAP ena_netmap_reset_rx_ring(adapter, qid); rx_ring->initialized = false; #endif /* DEV_NETMAP */ /* * Alloc extra element so in rx path * we can always prefetch rx_info + 1 */ size += sizeof(struct ena_rx_buffer); rx_ring->rx_buffer_info = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO); size = sizeof(uint16_t) * rx_ring->ring_size; rx_ring->free_rx_ids = malloc(size, M_DEVBUF, M_WAITOK); for (i = 0; i < rx_ring->ring_size; i++) rx_ring->free_rx_ids[i] = i; /* Reset RX statistics. */ ena_reset_counters((counter_u64_t *)&rx_ring->rx_stats, sizeof(rx_ring->rx_stats)); rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; /* ... and create the buffer DMA maps */ for (i = 0; i < rx_ring->ring_size; i++) { err = bus_dmamap_create(adapter->rx_buf_tag, 0, &(rx_ring->rx_buffer_info[i].map)); if (err != 0) { ena_trace(ENA_ALERT, "Unable to create Rx DMA map for buffer %d\n", i); goto err_buf_info_unmap; } } /* Create LRO for the ring */ if ((adapter->ifp->if_capenable & IFCAP_LRO) != 0) { int err = tcp_lro_init(&rx_ring->lro); if (err != 0) { device_printf(adapter->pdev, "LRO[%d] Initialization failed!\n", qid); } else { ena_trace(ENA_INFO, "RX Soft LRO[%d] Initialized\n", qid); rx_ring->lro.ifp = adapter->ifp; } } return (0); err_buf_info_unmap: while (i--) { bus_dmamap_destroy(adapter->rx_buf_tag, rx_ring->rx_buffer_info[i].map); } free(rx_ring->free_rx_ids, M_DEVBUF); rx_ring->free_rx_ids = NULL; free(rx_ring->rx_buffer_info, M_DEVBUF); rx_ring->rx_buffer_info = NULL; return (ENOMEM); } /** * ena_free_rx_resources - Free Rx Resources * @adapter: network interface device structure * @qid: queue index * * Free all receive software resources **/ static void ena_free_rx_resources(struct ena_adapter *adapter, unsigned int qid) { struct ena_ring *rx_ring = &adapter->rx_ring[qid]; /* Free buffer DMA maps, */ for (int i = 0; i < rx_ring->ring_size; i++) { bus_dmamap_sync(adapter->rx_buf_tag, rx_ring->rx_buffer_info[i].map, BUS_DMASYNC_POSTREAD); m_freem(rx_ring->rx_buffer_info[i].mbuf); rx_ring->rx_buffer_info[i].mbuf = NULL; bus_dmamap_unload(adapter->rx_buf_tag, rx_ring->rx_buffer_info[i].map); bus_dmamap_destroy(adapter->rx_buf_tag, rx_ring->rx_buffer_info[i].map); } /* free LRO resources, */ tcp_lro_free(&rx_ring->lro); /* free allocated memory */ free(rx_ring->rx_buffer_info, M_DEVBUF); rx_ring->rx_buffer_info = NULL; free(rx_ring->free_rx_ids, M_DEVBUF); rx_ring->free_rx_ids = NULL; } /** * ena_setup_all_rx_resources - allocate all queues Rx resources * @adapter: network interface device structure * * Returns 0 on success, otherwise on failure. **/ static int ena_setup_all_rx_resources(struct ena_adapter *adapter) { int i, rc = 0; for (i = 0; i < adapter->num_queues; i++) { rc = ena_setup_rx_resources(adapter, i); if (rc != 0) { device_printf(adapter->pdev, "Allocation for Rx Queue %u failed\n", i); goto err_setup_rx; } } return (0); err_setup_rx: /* rewind the index freeing the rings as we go */ while (i--) ena_free_rx_resources(adapter, i); return (rc); } /** * ena_free_all_rx_resources - Free Rx resources for all queues * @adapter: network interface device structure * * Free all receive software resources **/ static void ena_free_all_rx_resources(struct ena_adapter *adapter) { int i; for (i = 0; i < adapter->num_queues; i++) ena_free_rx_resources(adapter, i); } static inline int ena_alloc_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info) { struct ena_com_buf *ena_buf; bus_dma_segment_t segs[1]; int nsegs, error; int mlen; /* if previous allocated frag is not used */ if (unlikely(rx_info->mbuf != NULL)) return (0); /* Get mbuf using UMA allocator */ rx_info->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM16BYTES); if (unlikely(rx_info->mbuf == NULL)) { counter_u64_add(rx_ring->rx_stats.mjum_alloc_fail, 1); rx_info->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (unlikely(rx_info->mbuf == NULL)) { counter_u64_add(rx_ring->rx_stats.mbuf_alloc_fail, 1); return (ENOMEM); } mlen = MCLBYTES; } else { mlen = MJUM16BYTES; } /* Set mbuf length*/ rx_info->mbuf->m_pkthdr.len = rx_info->mbuf->m_len = mlen; /* Map packets for DMA */ ena_trace(ENA_DBG | ENA_RSC | ENA_RXPTH, "Using tag %p for buffers' DMA mapping, mbuf %p len: %d\n", adapter->rx_buf_tag,rx_info->mbuf, rx_info->mbuf->m_len); error = bus_dmamap_load_mbuf_sg(adapter->rx_buf_tag, rx_info->map, rx_info->mbuf, segs, &nsegs, BUS_DMA_NOWAIT); if (unlikely((error != 0) || (nsegs != 1))) { ena_trace(ENA_WARNING, "failed to map mbuf, error: %d, " "nsegs: %d\n", error, nsegs); counter_u64_add(rx_ring->rx_stats.dma_mapping_err, 1); goto exit; } bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_PREREAD); ena_buf = &rx_info->ena_buf; ena_buf->paddr = segs[0].ds_addr; ena_buf->len = mlen; ena_trace(ENA_DBG | ENA_RSC | ENA_RXPTH, "ALLOC RX BUF: mbuf %p, rx_info %p, len %d, paddr %#jx\n", rx_info->mbuf, rx_info,ena_buf->len, (uintmax_t)ena_buf->paddr); return (0); exit: m_freem(rx_info->mbuf); rx_info->mbuf = NULL; return (EFAULT); } static void ena_free_rx_mbuf(struct ena_adapter *adapter, struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info) { if (rx_info->mbuf == NULL) { ena_trace(ENA_WARNING, "Trying to free unallocated buffer\n"); return; } bus_dmamap_sync(adapter->rx_buf_tag, rx_info->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(adapter->rx_buf_tag, rx_info->map); m_freem(rx_info->mbuf); rx_info->mbuf = NULL; } /** * ena_refill_rx_bufs - Refills ring with descriptors * @rx_ring: the ring which we want to feed with free descriptors * @num: number of descriptors to refill * Refills the ring with newly allocated DMA-mapped mbufs for receiving **/ int ena_refill_rx_bufs(struct ena_ring *rx_ring, uint32_t num) { struct ena_adapter *adapter = rx_ring->adapter; uint16_t next_to_use, req_id; uint32_t i; int rc; ena_trace(ENA_DBG | ENA_RXPTH | ENA_RSC, "refill qid: %d\n", rx_ring->qid); next_to_use = rx_ring->next_to_use; for (i = 0; i < num; i++) { struct ena_rx_buffer *rx_info; ena_trace(ENA_DBG | ENA_RXPTH | ENA_RSC, "RX buffer - next to use: %d\n", next_to_use); req_id = rx_ring->free_rx_ids[next_to_use]; rx_info = &rx_ring->rx_buffer_info[req_id]; #ifdef DEV_NETMAP if (ena_rx_ring_in_netmap(adapter, rx_ring->qid)) rc = ena_netmap_alloc_rx_slot(adapter, rx_ring, rx_info); else #endif /* DEV_NETMAP */ rc = ena_alloc_rx_mbuf(adapter, rx_ring, rx_info); if (unlikely(rc != 0)) { ena_trace(ENA_WARNING, "failed to alloc buffer for rx queue %d\n", rx_ring->qid); break; } rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq, &rx_info->ena_buf, req_id); if (unlikely(rc != 0)) { ena_trace(ENA_WARNING, "failed to add buffer for rx queue %d\n", rx_ring->qid); break; } next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use, rx_ring->ring_size); } if (unlikely(i < num)) { counter_u64_add(rx_ring->rx_stats.refil_partial, 1); ena_trace(ENA_WARNING, "refilled rx qid %d with only %d mbufs (from %d)\n", rx_ring->qid, i, num); } if (likely(i != 0)) { wmb(); ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq); } rx_ring->next_to_use = next_to_use; return (i); } static void ena_free_rx_bufs(struct ena_adapter *adapter, unsigned int qid) { struct ena_ring *rx_ring = &adapter->rx_ring[qid]; unsigned int i; for (i = 0; i < rx_ring->ring_size; i++) { struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i]; if (rx_info->mbuf != NULL) ena_free_rx_mbuf(adapter, rx_ring, rx_info); #ifdef DEV_NETMAP if (((if_getflags(adapter->ifp) & IFF_DYING) == 0) && (adapter->ifp->if_capenable & IFCAP_NETMAP)) { if (rx_info->netmap_buf_idx != 0) ena_netmap_free_rx_slot(adapter, rx_ring, rx_info); } #endif /* DEV_NETMAP */ } } /** * ena_refill_all_rx_bufs - allocate all queues Rx buffers * @adapter: network interface device structure * */ static void ena_refill_all_rx_bufs(struct ena_adapter *adapter) { struct ena_ring *rx_ring; int i, rc, bufs_num; for (i = 0; i < adapter->num_queues; i++) { rx_ring = &adapter->rx_ring[i]; bufs_num = rx_ring->ring_size - 1; rc = ena_refill_rx_bufs(rx_ring, bufs_num); if (unlikely(rc != bufs_num)) ena_trace(ENA_WARNING, "refilling Queue %d failed. " "Allocated %d buffers from: %d\n", i, rc, bufs_num); #ifdef DEV_NETMAP rx_ring->initialized = true; #endif /* DEV_NETMAP */ } } static void ena_free_all_rx_bufs(struct ena_adapter *adapter) { int i; for (i = 0; i < adapter->num_queues; i++) ena_free_rx_bufs(adapter, i); } /** * ena_free_tx_bufs - Free Tx Buffers per Queue * @adapter: network interface device structure * @qid: queue index **/ static void ena_free_tx_bufs(struct ena_adapter *adapter, unsigned int qid) { bool print_once = true; struct ena_ring *tx_ring = &adapter->tx_ring[qid]; ENA_RING_MTX_LOCK(tx_ring); for (int i = 0; i < tx_ring->ring_size; i++) { struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i]; if (tx_info->mbuf == NULL) continue; if (print_once) { device_printf(adapter->pdev, "free uncompleted tx mbuf qid %d idx 0x%x\n", qid, i); print_once = false; } else { ena_trace(ENA_DBG, "free uncompleted tx mbuf qid %d idx 0x%x\n", qid, i); } if (tx_info->head_mapped == true) { bus_dmamap_sync(adapter->tx_buf_tag, tx_info->map_head, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(adapter->tx_buf_tag, tx_info->map_head); tx_info->head_mapped = false; } if (tx_info->seg_mapped == true) { bus_dmamap_sync(adapter->tx_buf_tag, tx_info->map_seg, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(adapter->tx_buf_tag, tx_info->map_seg); tx_info->seg_mapped = false; } m_free(tx_info->mbuf); tx_info->mbuf = NULL; } ENA_RING_MTX_UNLOCK(tx_ring); } static void ena_free_all_tx_bufs(struct ena_adapter *adapter) { for (int i = 0; i < adapter->num_queues; i++) ena_free_tx_bufs(adapter, i); } static void ena_destroy_all_tx_queues(struct ena_adapter *adapter) { uint16_t ena_qid; int i; for (i = 0; i < adapter->num_queues; i++) { ena_qid = ENA_IO_TXQ_IDX(i); ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); } } static void ena_destroy_all_rx_queues(struct ena_adapter *adapter) { uint16_t ena_qid; int i; for (i = 0; i < adapter->num_queues; i++) { ena_qid = ENA_IO_RXQ_IDX(i); ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); } } static void ena_destroy_all_io_queues(struct ena_adapter *adapter) { struct ena_que *queue; int i; for (i = 0; i < adapter->num_queues; i++) { queue = &adapter->que[i]; while (taskqueue_cancel(queue->cleanup_tq, &queue->cleanup_task, NULL)) taskqueue_drain(queue->cleanup_tq, &queue->cleanup_task); taskqueue_free(queue->cleanup_tq); } ena_destroy_all_tx_queues(adapter); ena_destroy_all_rx_queues(adapter); } static int ena_create_io_queues(struct ena_adapter *adapter) { struct ena_com_dev *ena_dev = adapter->ena_dev; struct ena_com_create_io_ctx ctx; struct ena_ring *ring; struct ena_que *queue; uint16_t ena_qid; uint32_t msix_vector; int rc, i; /* Create TX queues */ for (i = 0; i < adapter->num_queues; i++) { msix_vector = ENA_IO_IRQ_IDX(i); ena_qid = ENA_IO_TXQ_IDX(i); ctx.mem_queue_type = ena_dev->tx_mem_queue_type; ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; ctx.queue_size = adapter->tx_ring_size; ctx.msix_vector = msix_vector; ctx.qid = ena_qid; rc = ena_com_create_io_queue(ena_dev, &ctx); if (rc != 0) { device_printf(adapter->pdev, "Failed to create io TX queue #%d rc: %d\n", i, rc); goto err_tx; } ring = &adapter->tx_ring[i]; rc = ena_com_get_io_handlers(ena_dev, ena_qid, &ring->ena_com_io_sq, &ring->ena_com_io_cq); if (rc != 0) { device_printf(adapter->pdev, "Failed to get TX queue handlers. TX queue num" " %d rc: %d\n", i, rc); ena_com_destroy_io_queue(ena_dev, ena_qid); goto err_tx; } } /* Create RX queues */ for (i = 0; i < adapter->num_queues; i++) { msix_vector = ENA_IO_IRQ_IDX(i); ena_qid = ENA_IO_RXQ_IDX(i); ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; ctx.queue_size = adapter->rx_ring_size; ctx.msix_vector = msix_vector; ctx.qid = ena_qid; rc = ena_com_create_io_queue(ena_dev, &ctx); if (unlikely(rc != 0)) { device_printf(adapter->pdev, "Failed to create io RX queue[%d] rc: %d\n", i, rc); goto err_rx; } ring = &adapter->rx_ring[i]; rc = ena_com_get_io_handlers(ena_dev, ena_qid, &ring->ena_com_io_sq, &ring->ena_com_io_cq); if (unlikely(rc != 0)) { device_printf(adapter->pdev, "Failed to get RX queue handlers. RX queue num" " %d rc: %d\n", i, rc); ena_com_destroy_io_queue(ena_dev, ena_qid); goto err_rx; } } for (i = 0; i < adapter->num_queues; i++) { queue = &adapter->que[i]; - TASK_INIT(&queue->cleanup_task, 0, ena_cleanup, queue); + NET_TASK_INIT(&queue->cleanup_task, 0, ena_cleanup, queue); queue->cleanup_tq = taskqueue_create_fast("ena cleanup", M_WAITOK, taskqueue_thread_enqueue, &queue->cleanup_tq); taskqueue_start_threads(&queue->cleanup_tq, 1, PI_NET, "%s queue %d cleanup", device_get_nameunit(adapter->pdev), i); } return (0); err_rx: while (i--) ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i)); i = adapter->num_queues; err_tx: while (i--) ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i)); return (ENXIO); } /********************************************************************* * * MSIX & Interrupt Service routine * **********************************************************************/ /** * ena_handle_msix - MSIX Interrupt Handler for admin/async queue * @arg: interrupt number **/ static void ena_intr_msix_mgmnt(void *arg) { struct ena_adapter *adapter = (struct ena_adapter *)arg; ena_com_admin_q_comp_intr_handler(adapter->ena_dev); if (likely(ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter))) ena_com_aenq_intr_handler(adapter->ena_dev, arg); } /** * ena_handle_msix - MSIX Interrupt Handler for Tx/Rx * @arg: queue **/ static int ena_handle_msix(void *arg) { struct ena_que *queue = arg; struct ena_adapter *adapter = queue->adapter; if_t ifp = adapter->ifp; if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)) return (FILTER_STRAY); taskqueue_enqueue(queue->cleanup_tq, &queue->cleanup_task); return (FILTER_HANDLED); } static int ena_enable_msix(struct ena_adapter *adapter) { device_t dev = adapter->pdev; int msix_vecs, msix_req; int i, rc = 0; if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) { device_printf(dev, "Error, MSI-X is already enabled\n"); return (EINVAL); } /* Reserved the max msix vectors we might need */ msix_vecs = ENA_MAX_MSIX_VEC(adapter->num_queues); adapter->msix_entries = malloc(msix_vecs * sizeof(struct msix_entry), M_DEVBUF, M_WAITOK | M_ZERO); ena_trace(ENA_DBG, "trying to enable MSI-X, vectors: %d\n", msix_vecs); for (i = 0; i < msix_vecs; i++) { adapter->msix_entries[i].entry = i; /* Vectors must start from 1 */ adapter->msix_entries[i].vector = i + 1; } msix_req = msix_vecs; rc = pci_alloc_msix(dev, &msix_vecs); if (unlikely(rc != 0)) { device_printf(dev, "Failed to enable MSIX, vectors %d rc %d\n", msix_vecs, rc); rc = ENOSPC; goto err_msix_free; } if (msix_vecs != msix_req) { if (msix_vecs == ENA_ADMIN_MSIX_VEC) { device_printf(dev, "Not enough number of MSI-x allocated: %d\n", msix_vecs); pci_release_msi(dev); rc = ENOSPC; goto err_msix_free; } device_printf(dev, "Enable only %d MSI-x (out of %d), reduce " "the number of queues\n", msix_vecs, msix_req); adapter->num_queues = msix_vecs - ENA_ADMIN_MSIX_VEC; } adapter->msix_vecs = msix_vecs; ENA_FLAG_SET_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter); return (0); err_msix_free: free(adapter->msix_entries, M_DEVBUF); adapter->msix_entries = NULL; return (rc); } static void ena_setup_mgmnt_intr(struct ena_adapter *adapter) { snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name, ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s", device_get_nameunit(adapter->pdev)); /* * Handler is NULL on purpose, it will be set * when mgmnt interrupt is acquired */ adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler = NULL; adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter; adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector = adapter->msix_entries[ENA_MGMNT_IRQ_IDX].vector; } static int ena_setup_io_intr(struct ena_adapter *adapter) { static int last_bind_cpu = -1; int irq_idx; if (adapter->msix_entries == NULL) return (EINVAL); for (int i = 0; i < adapter->num_queues; i++) { irq_idx = ENA_IO_IRQ_IDX(i); snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE, "%s-TxRx-%d", device_get_nameunit(adapter->pdev), i); adapter->irq_tbl[irq_idx].handler = ena_handle_msix; adapter->irq_tbl[irq_idx].data = &adapter->que[i]; adapter->irq_tbl[irq_idx].vector = adapter->msix_entries[irq_idx].vector; ena_trace(ENA_INFO | ENA_IOQ, "ena_setup_io_intr vector: %d\n", adapter->msix_entries[irq_idx].vector); /* * We want to bind rings to the corresponding cpu * using something similar to the RSS round-robin technique. */ if (unlikely(last_bind_cpu < 0)) last_bind_cpu = CPU_FIRST(); adapter->que[i].cpu = adapter->irq_tbl[irq_idx].cpu = last_bind_cpu; last_bind_cpu = CPU_NEXT(last_bind_cpu); } return (0); } static int ena_request_mgmnt_irq(struct ena_adapter *adapter) { struct ena_irq *irq; unsigned long flags; int rc, rcc; flags = RF_ACTIVE | RF_SHAREABLE; irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ, &irq->vector, flags); if (unlikely(irq->res == NULL)) { device_printf(adapter->pdev, "could not allocate " "irq vector: %d\n", irq->vector); return (ENXIO); } rc = bus_setup_intr(adapter->pdev, irq->res, INTR_TYPE_NET | INTR_MPSAFE, NULL, ena_intr_msix_mgmnt, irq->data, &irq->cookie); if (unlikely(rc != 0)) { device_printf(adapter->pdev, "failed to register " "interrupt handler for irq %ju: %d\n", rman_get_start(irq->res), rc); goto err_res_free; } irq->requested = true; return (rc); err_res_free: ena_trace(ENA_INFO | ENA_ADMQ, "releasing resource for irq %d\n", irq->vector); rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, irq->vector, irq->res); if (unlikely(rcc != 0)) device_printf(adapter->pdev, "dev has no parent while " "releasing res for irq: %d\n", irq->vector); irq->res = NULL; return (rc); } static int ena_request_io_irq(struct ena_adapter *adapter) { struct ena_irq *irq; unsigned long flags = 0; int rc = 0, i, rcc; if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter))) { device_printf(adapter->pdev, "failed to request I/O IRQ: MSI-X is not enabled\n"); return (EINVAL); } else { flags = RF_ACTIVE | RF_SHAREABLE; } for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { irq = &adapter->irq_tbl[i]; if (unlikely(irq->requested)) continue; irq->res = bus_alloc_resource_any(adapter->pdev, SYS_RES_IRQ, &irq->vector, flags); if (unlikely(irq->res == NULL)) { rc = ENOMEM; device_printf(adapter->pdev, "could not allocate " "irq vector: %d\n", irq->vector); goto err; } rc = bus_setup_intr(adapter->pdev, irq->res, INTR_TYPE_NET | INTR_MPSAFE, irq->handler, NULL, irq->data, &irq->cookie); if (unlikely(rc != 0)) { device_printf(adapter->pdev, "failed to register " "interrupt handler for irq %ju: %d\n", rman_get_start(irq->res), rc); goto err; } irq->requested = true; ena_trace(ENA_INFO, "queue %d - cpu %d\n", i - ENA_IO_IRQ_FIRST_IDX, irq->cpu); } return (rc); err: for (; i >= ENA_IO_IRQ_FIRST_IDX; i--) { irq = &adapter->irq_tbl[i]; rcc = 0; /* Once we entered err: section and irq->requested is true we free both intr and resources */ if (irq->requested) rcc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie); if (unlikely(rcc != 0)) device_printf(adapter->pdev, "could not release" " irq: %d, error: %d\n", irq->vector, rcc); /* If we entred err: section without irq->requested set we know it was bus_alloc_resource_any() that needs cleanup, provided res is not NULL. In case res is NULL no work in needed in this iteration */ rcc = 0; if (irq->res != NULL) { rcc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, irq->vector, irq->res); } if (unlikely(rcc != 0)) device_printf(adapter->pdev, "dev has no parent while " "releasing res for irq: %d\n", irq->vector); irq->requested = false; irq->res = NULL; } return (rc); } static void ena_free_mgmnt_irq(struct ena_adapter *adapter) { struct ena_irq *irq; int rc; irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; if (irq->requested) { ena_trace(ENA_INFO | ENA_ADMQ, "tear down irq: %d\n", irq->vector); rc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie); if (unlikely(rc != 0)) device_printf(adapter->pdev, "failed to tear " "down irq: %d\n", irq->vector); irq->requested = 0; } if (irq->res != NULL) { ena_trace(ENA_INFO | ENA_ADMQ, "release resource irq: %d\n", irq->vector); rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, irq->vector, irq->res); irq->res = NULL; if (unlikely(rc != 0)) device_printf(adapter->pdev, "dev has no parent while " "releasing res for irq: %d\n", irq->vector); } } static void ena_free_io_irq(struct ena_adapter *adapter) { struct ena_irq *irq; int rc; for (int i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { irq = &adapter->irq_tbl[i]; if (irq->requested) { ena_trace(ENA_INFO | ENA_IOQ, "tear down irq: %d\n", irq->vector); rc = bus_teardown_intr(adapter->pdev, irq->res, irq->cookie); if (unlikely(rc != 0)) { device_printf(adapter->pdev, "failed to tear " "down irq: %d\n", irq->vector); } irq->requested = 0; } if (irq->res != NULL) { ena_trace(ENA_INFO | ENA_IOQ, "release resource irq: %d\n", irq->vector); rc = bus_release_resource(adapter->pdev, SYS_RES_IRQ, irq->vector, irq->res); irq->res = NULL; if (unlikely(rc != 0)) { device_printf(adapter->pdev, "dev has no parent" " while releasing res for irq: %d\n", irq->vector); } } } } static void ena_free_irqs(struct ena_adapter* adapter) { ena_free_io_irq(adapter); ena_free_mgmnt_irq(adapter); ena_disable_msix(adapter); } static void ena_disable_msix(struct ena_adapter *adapter) { if (ENA_FLAG_ISSET(ENA_FLAG_MSIX_ENABLED, adapter)) { ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_MSIX_ENABLED, adapter); pci_release_msi(adapter->pdev); } adapter->msix_vecs = 0; if (adapter->msix_entries != NULL) free(adapter->msix_entries, M_DEVBUF); adapter->msix_entries = NULL; } static void ena_unmask_all_io_irqs(struct ena_adapter *adapter) { struct ena_com_io_cq* io_cq; struct ena_eth_io_intr_reg intr_reg; uint16_t ena_qid; int i; /* Unmask interrupts for all queues */ for (i = 0; i < adapter->num_queues; i++) { ena_qid = ENA_IO_TXQ_IDX(i); io_cq = &adapter->ena_dev->io_cq_queues[ena_qid]; ena_com_update_intr_reg(&intr_reg, 0, 0, true); ena_com_unmask_intr(io_cq, &intr_reg); } } /* Configure the Rx forwarding */ static int ena_rss_configure(struct ena_adapter *adapter) { struct ena_com_dev *ena_dev = adapter->ena_dev; int rc; /* Set indirect table */ rc = ena_com_indirect_table_set(ena_dev); if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) return (rc); /* Configure hash function (if supported) */ rc = ena_com_set_hash_function(ena_dev); if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) return (rc); /* Configure hash inputs (if supported) */ rc = ena_com_set_hash_ctrl(ena_dev); if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) return (rc); return (0); } static int ena_up_complete(struct ena_adapter *adapter) { int rc; if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter))) { rc = ena_rss_configure(adapter); if (rc != 0) return (rc); } rc = ena_change_mtu(adapter->ifp, adapter->ifp->if_mtu); if (unlikely(rc != 0)) return (rc); ena_refill_all_rx_bufs(adapter); ena_reset_counters((counter_u64_t *)&adapter->hw_stats, sizeof(adapter->hw_stats)); return (0); } int ena_up(struct ena_adapter *adapter) { int rc = 0; if (unlikely(device_is_attached(adapter->pdev) == 0)) { device_printf(adapter->pdev, "device is not attached!\n"); return (ENXIO); } if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) { device_printf(adapter->pdev, "device is going UP\n"); /* setup interrupts for IO queues */ rc = ena_setup_io_intr(adapter); if (unlikely(rc != 0)) { ena_trace(ENA_ALERT, "error setting up IO interrupt\n"); goto error; } rc = ena_request_io_irq(adapter); if (unlikely(rc != 0)) { ena_trace(ENA_ALERT, "err_req_irq\n"); goto error; } /* allocate transmit descriptors */ rc = ena_setup_all_tx_resources(adapter); if (unlikely(rc != 0)) { ena_trace(ENA_ALERT, "err_setup_tx\n"); goto err_setup_tx; } /* allocate receive descriptors */ rc = ena_setup_all_rx_resources(adapter); if (unlikely(rc != 0)) { ena_trace(ENA_ALERT, "err_setup_rx\n"); goto err_setup_rx; } /* create IO queues for Rx & Tx */ rc = ena_create_io_queues(adapter); if (unlikely(rc != 0)) { ena_trace(ENA_ALERT, "create IO queues failed\n"); goto err_io_que; } if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)) if_link_state_change(adapter->ifp, LINK_STATE_UP); rc = ena_up_complete(adapter); if (unlikely(rc != 0)) goto err_up_complete; counter_u64_add(adapter->dev_stats.interface_up, 1); ena_update_hwassist(adapter); if_setdrvflagbits(adapter->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); /* Activate timer service only if the device is running. * If this flag is not set, it means that the driver is being * reset and timer service will be activated afterwards. */ if (ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter)) { callout_reset_sbt(&adapter->timer_service, SBT_1S, SBT_1S, ena_timer_service, (void *)adapter, 0); } ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP, adapter); ena_unmask_all_io_irqs(adapter); } return (0); err_up_complete: ena_destroy_all_io_queues(adapter); err_io_que: ena_free_all_rx_resources(adapter); err_setup_rx: ena_free_all_tx_resources(adapter); err_setup_tx: ena_free_io_irq(adapter); error: return (rc); } static uint64_t ena_get_counter(if_t ifp, ift_counter cnt) { struct ena_adapter *adapter; struct ena_hw_stats *stats; adapter = if_getsoftc(ifp); stats = &adapter->hw_stats; switch (cnt) { case IFCOUNTER_IPACKETS: return (counter_u64_fetch(stats->rx_packets)); case IFCOUNTER_OPACKETS: return (counter_u64_fetch(stats->tx_packets)); case IFCOUNTER_IBYTES: return (counter_u64_fetch(stats->rx_bytes)); case IFCOUNTER_OBYTES: return (counter_u64_fetch(stats->tx_bytes)); case IFCOUNTER_IQDROPS: return (counter_u64_fetch(stats->rx_drops)); default: return (if_get_counter_default(ifp, cnt)); } } static int ena_media_change(if_t ifp) { /* Media Change is not supported by firmware */ return (0); } static void ena_media_status(if_t ifp, struct ifmediareq *ifmr) { struct ena_adapter *adapter = if_getsoftc(ifp); ena_trace(ENA_DBG, "enter\n"); mtx_lock(&adapter->global_mtx); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (!ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)) { mtx_unlock(&adapter->global_mtx); ena_trace(ENA_INFO, "Link is down\n"); return; } ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_active |= IFM_UNKNOWN | IFM_FDX; mtx_unlock(&adapter->global_mtx); } static void ena_init(void *arg) { struct ena_adapter *adapter = (struct ena_adapter *)arg; if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) { sx_xlock(&adapter->ioctl_sx); ena_up(adapter); sx_unlock(&adapter->ioctl_sx); } } static int ena_ioctl(if_t ifp, u_long command, caddr_t data) { struct ena_adapter *adapter; struct ifreq *ifr; int rc; adapter = ifp->if_softc; ifr = (struct ifreq *)data; /* * Acquiring lock to prevent from running up and down routines parallel. */ rc = 0; switch (command) { case SIOCSIFMTU: if (ifp->if_mtu == ifr->ifr_mtu) break; sx_xlock(&adapter->ioctl_sx); ena_down(adapter); ena_change_mtu(ifp, ifr->ifr_mtu); rc = ena_up(adapter); sx_unlock(&adapter->ioctl_sx); break; case SIOCSIFFLAGS: if ((ifp->if_flags & IFF_UP) != 0) { if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { device_printf(adapter->pdev, "ioctl promisc/allmulti\n"); } } else { sx_xlock(&adapter->ioctl_sx); rc = ena_up(adapter); sx_unlock(&adapter->ioctl_sx); } } else { if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { sx_xlock(&adapter->ioctl_sx); ena_down(adapter); sx_unlock(&adapter->ioctl_sx); } } break; case SIOCADDMULTI: case SIOCDELMULTI: break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: rc = ifmedia_ioctl(ifp, ifr, &adapter->media, command); break; case SIOCSIFCAP: { int reinit = 0; if (ifr->ifr_reqcap != ifp->if_capenable) { ifp->if_capenable = ifr->ifr_reqcap; reinit = 1; } if ((reinit != 0) && ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0)) { sx_xlock(&adapter->ioctl_sx); ena_down(adapter); rc = ena_up(adapter); sx_unlock(&adapter->ioctl_sx); } } break; default: rc = ether_ioctl(ifp, command, data); break; } return (rc); } static int ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *feat) { int caps = 0; if ((feat->offload.tx & (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK | ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK | ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK)) != 0) caps |= IFCAP_TXCSUM; if ((feat->offload.tx & (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK | ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)) != 0) caps |= IFCAP_TXCSUM_IPV6; if ((feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) != 0) caps |= IFCAP_TSO4; if ((feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) != 0) caps |= IFCAP_TSO6; if ((feat->offload.rx_supported & (ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK | ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK)) != 0) caps |= IFCAP_RXCSUM; if ((feat->offload.rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) != 0) caps |= IFCAP_RXCSUM_IPV6; caps |= IFCAP_LRO | IFCAP_JUMBO_MTU; return (caps); } static void ena_update_host_info(struct ena_admin_host_info *host_info, if_t ifp) { host_info->supported_network_features[0] = (uint32_t)if_getcapabilities(ifp); } static void ena_update_hwassist(struct ena_adapter *adapter) { if_t ifp = adapter->ifp; uint32_t feat = adapter->tx_offload_cap; int cap = if_getcapenable(ifp); int flags = 0; if_clearhwassist(ifp); if ((cap & IFCAP_TXCSUM) != 0) { if ((feat & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK) != 0) flags |= CSUM_IP; if ((feat & (ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK | ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)) != 0) flags |= CSUM_IP_UDP | CSUM_IP_TCP; } if ((cap & IFCAP_TXCSUM_IPV6) != 0) flags |= CSUM_IP6_UDP | CSUM_IP6_TCP; if ((cap & IFCAP_TSO4) != 0) flags |= CSUM_IP_TSO; if ((cap & IFCAP_TSO6) != 0) flags |= CSUM_IP6_TSO; if_sethwassistbits(ifp, flags, 0); } static int ena_setup_ifnet(device_t pdev, struct ena_adapter *adapter, struct ena_com_dev_get_features_ctx *feat) { if_t ifp; int caps = 0; ifp = adapter->ifp = if_gethandle(IFT_ETHER); if (unlikely(ifp == NULL)) { ena_trace(ENA_ALERT, "can not allocate ifnet structure\n"); return (ENXIO); } if_initname(ifp, device_get_name(pdev), device_get_unit(pdev)); if_setdev(ifp, pdev); if_setsoftc(ifp, adapter); if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); if_setinitfn(ifp, ena_init); if_settransmitfn(ifp, ena_mq_start); if_setqflushfn(ifp, ena_qflush); if_setioctlfn(ifp, ena_ioctl); if_setgetcounterfn(ifp, ena_get_counter); if_setsendqlen(ifp, adapter->tx_ring_size); if_setsendqready(ifp); if_setmtu(ifp, ETHERMTU); if_setbaudrate(ifp, 0); /* Zeroize capabilities... */ if_setcapabilities(ifp, 0); if_setcapenable(ifp, 0); /* check hardware support */ caps = ena_get_dev_offloads(feat); /* ... and set them */ if_setcapabilitiesbit(ifp, caps, 0); /* TSO parameters */ ifp->if_hw_tsomax = ENA_TSO_MAXSIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); ifp->if_hw_tsomaxsegcount = adapter->max_tx_sgl_size - 1; ifp->if_hw_tsomaxsegsize = ENA_TSO_MAXSIZE; if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); if_setcapenable(ifp, if_getcapabilities(ifp)); /* * Specify the media types supported by this adapter and register * callbacks to update media and link information */ ifmedia_init(&adapter->media, IFM_IMASK, ena_media_change, ena_media_status); ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); ether_ifattach(ifp, adapter->mac_addr); return (0); } void ena_down(struct ena_adapter *adapter) { int rc; if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) { device_printf(adapter->pdev, "device is going DOWN\n"); callout_drain(&adapter->timer_service); ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP, adapter); if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); ena_free_io_irq(adapter); if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) { rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); if (unlikely(rc != 0)) device_printf(adapter->pdev, "Device reset failed\n"); } ena_destroy_all_io_queues(adapter); ena_free_all_tx_bufs(adapter); ena_free_all_rx_bufs(adapter); ena_free_all_tx_resources(adapter); ena_free_all_rx_resources(adapter); counter_u64_add(adapter->dev_stats.interface_down, 1); } } static int ena_calc_io_queue_num(struct ena_adapter *adapter, struct ena_com_dev_get_features_ctx *get_feat_ctx) { struct ena_com_dev *ena_dev = adapter->ena_dev; int io_tx_sq_num, io_tx_cq_num, io_rx_num, io_queue_num; /* Regular queues capabilities */ if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { struct ena_admin_queue_ext_feature_fields *max_queue_ext = &get_feat_ctx->max_queue_ext.max_queue_ext; io_rx_num = min_t(int, max_queue_ext->max_rx_sq_num, max_queue_ext->max_rx_cq_num); io_tx_sq_num = max_queue_ext->max_tx_sq_num; io_tx_cq_num = max_queue_ext->max_tx_cq_num; } else { struct ena_admin_queue_feature_desc *max_queues = &get_feat_ctx->max_queues; io_tx_sq_num = max_queues->max_sq_num; io_tx_cq_num = max_queues->max_cq_num; io_rx_num = min_t(int, io_tx_sq_num, io_tx_cq_num); } /* In case of LLQ use the llq fields for the tx SQ/CQ */ if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) io_tx_sq_num = get_feat_ctx->llq.max_llq_num; io_queue_num = min_t(int, mp_ncpus, ENA_MAX_NUM_IO_QUEUES); io_queue_num = min_t(int, io_queue_num, io_rx_num); io_queue_num = min_t(int, io_queue_num, io_tx_sq_num); io_queue_num = min_t(int, io_queue_num, io_tx_cq_num); /* 1 IRQ for for mgmnt and 1 IRQ for each TX/RX pair */ io_queue_num = min_t(int, io_queue_num, pci_msix_count(adapter->pdev) - 1); return (io_queue_num); } static int ena_enable_wc(struct resource *res) { #if defined(__i386) || defined(__amd64) || defined(__aarch64__) vm_offset_t va; vm_size_t len; int rc; va = (vm_offset_t)rman_get_virtual(res); len = rman_get_size(res); /* Enable write combining */ rc = pmap_change_attr(va, len, VM_MEMATTR_WRITE_COMBINING); if (unlikely(rc != 0)) { ena_trace(ENA_ALERT, "pmap_change_attr failed, %d\n", rc); return (rc); } return (0); #endif return (EOPNOTSUPP); } static int ena_set_queues_placement_policy(device_t pdev, struct ena_com_dev *ena_dev, struct ena_admin_feature_llq_desc *llq, struct ena_llq_configurations *llq_default_configurations) { struct ena_adapter *adapter = device_get_softc(pdev); int rc, rid; uint32_t llq_feature_mask; llq_feature_mask = 1 << ENA_ADMIN_LLQ; if (!(ena_dev->supported_features & llq_feature_mask)) { device_printf(pdev, "LLQ is not supported. Fallback to host mode policy.\n"); ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; return (0); } rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); if (unlikely(rc != 0)) { device_printf(pdev, "Failed to configure the device mode. " "Fallback to host mode policy.\n"); ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; return (0); } /* Nothing to config, exit */ if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) return (0); /* Try to allocate resources for LLQ bar */ rid = PCIR_BAR(ENA_MEM_BAR); adapter->memory = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (unlikely(adapter->memory == NULL)) { device_printf(pdev, "unable to allocate LLQ bar resource. " "Fallback to host mode policy.\n"); ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; return (0); } /* Enable write combining for better LLQ performance */ rc = ena_enable_wc(adapter->memory); if (unlikely(rc != 0)) { device_printf(pdev, "failed to enable write combining.\n"); return (rc); } /* * Save virtual address of the device's memory region * for the ena_com layer. */ ena_dev->mem_bar = rman_get_virtual(adapter->memory); return (0); } static inline void set_default_llq_configurations(struct ena_llq_configurations *llq_config) { llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B; llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; llq_config->llq_ring_entry_size_value = 128; } static int ena_calc_queue_size(struct ena_adapter *adapter, struct ena_calc_queue_size_ctx *ctx) { struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq; struct ena_com_dev *ena_dev = ctx->ena_dev; uint32_t tx_queue_size = ENA_DEFAULT_RING_SIZE; uint32_t rx_queue_size = adapter->rx_ring_size; if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { struct ena_admin_queue_ext_feature_fields *max_queue_ext = &ctx->get_feat_ctx->max_queue_ext.max_queue_ext; rx_queue_size = min_t(uint32_t, rx_queue_size, max_queue_ext->max_rx_cq_depth); rx_queue_size = min_t(uint32_t, rx_queue_size, max_queue_ext->max_rx_sq_depth); tx_queue_size = min_t(uint32_t, tx_queue_size, max_queue_ext->max_tx_cq_depth); if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) tx_queue_size = min_t(uint32_t, tx_queue_size, llq->max_llq_depth); else tx_queue_size = min_t(uint32_t, tx_queue_size, max_queue_ext->max_tx_sq_depth); ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, max_queue_ext->max_per_packet_rx_descs); ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, max_queue_ext->max_per_packet_tx_descs); } else { struct ena_admin_queue_feature_desc *max_queues = &ctx->get_feat_ctx->max_queues; rx_queue_size = min_t(uint32_t, rx_queue_size, max_queues->max_cq_depth); rx_queue_size = min_t(uint32_t, rx_queue_size, max_queues->max_sq_depth); tx_queue_size = min_t(uint32_t, tx_queue_size, max_queues->max_cq_depth); if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) tx_queue_size = min_t(uint32_t, tx_queue_size, llq->max_llq_depth); else tx_queue_size = min_t(uint32_t, tx_queue_size, max_queues->max_sq_depth); ctx->max_rx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, max_queues->max_packet_tx_descs); ctx->max_tx_sgl_size = min_t(uint16_t, ENA_PKT_MAX_BUFS, max_queues->max_packet_rx_descs); } /* round down to the nearest power of 2 */ rx_queue_size = 1 << (fls(rx_queue_size) - 1); tx_queue_size = 1 << (fls(tx_queue_size) - 1); if (unlikely(rx_queue_size == 0 || tx_queue_size == 0)) { device_printf(ctx->pdev, "Invalid queue size\n"); return (EFAULT); } ctx->rx_queue_size = rx_queue_size; ctx->tx_queue_size = tx_queue_size; return (0); } static int ena_handle_updated_queues(struct ena_adapter *adapter, struct ena_com_dev_get_features_ctx *get_feat_ctx) { struct ena_com_dev *ena_dev = adapter->ena_dev; struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 }; device_t pdev = adapter->pdev; bool are_queues_changed = false; int io_queue_num, rc; calc_queue_ctx.ena_dev = ena_dev; calc_queue_ctx.get_feat_ctx = get_feat_ctx; calc_queue_ctx.pdev = pdev; io_queue_num = ena_calc_io_queue_num(adapter, get_feat_ctx); rc = ena_calc_queue_size(adapter, &calc_queue_ctx); if (unlikely(rc != 0 || io_queue_num <= 0)) return EFAULT; if (adapter->tx_ring->buf_ring_size != adapter->buf_ring_size) are_queues_changed = true; if (unlikely(adapter->tx_ring_size > calc_queue_ctx.tx_queue_size || adapter->rx_ring_size > calc_queue_ctx.rx_queue_size)) { device_printf(pdev, "Not enough resources to allocate requested queue sizes " "(TX,RX)=(%d,%d), falling back to queue sizes " "(TX,RX)=(%d,%d)\n", adapter->tx_ring_size, adapter->rx_ring_size, calc_queue_ctx.tx_queue_size, calc_queue_ctx.rx_queue_size); adapter->tx_ring_size = calc_queue_ctx.tx_queue_size; adapter->rx_ring_size = calc_queue_ctx.rx_queue_size; adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size; adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size; are_queues_changed = true; } if (unlikely(adapter->num_queues > io_queue_num)) { device_printf(pdev, "Not enough resources to allocate %d queues, " "falling back to %d queues\n", adapter->num_queues, io_queue_num); adapter->num_queues = io_queue_num; if (ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter)) { ena_com_rss_destroy(ena_dev); rc = ena_rss_init_default(adapter); if (unlikely(rc != 0) && (rc != EOPNOTSUPP)) { device_printf(pdev, "Cannot init RSS rc: %d\n", rc); return (rc); } } are_queues_changed = true; } if (unlikely(are_queues_changed)) { ena_free_all_io_rings_resources(adapter); ena_init_io_rings(adapter); } return (0); } static int ena_rss_init_default(struct ena_adapter *adapter) { struct ena_com_dev *ena_dev = adapter->ena_dev; device_t dev = adapter->pdev; int qid, rc, i; rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE); if (unlikely(rc != 0)) { device_printf(dev, "Cannot init indirect table\n"); return (rc); } for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) { qid = i % adapter->num_queues; rc = ena_com_indirect_table_fill_entry(ena_dev, i, ENA_IO_RXQ_IDX(qid)); if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) { device_printf(dev, "Cannot fill indirect table\n"); goto err_rss_destroy; } } rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL, ENA_HASH_KEY_SIZE, 0xFFFFFFFF); if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) { device_printf(dev, "Cannot fill hash function\n"); goto err_rss_destroy; } rc = ena_com_set_default_hash_ctrl(ena_dev); if (unlikely((rc != 0) && (rc != EOPNOTSUPP))) { device_printf(dev, "Cannot fill hash control\n"); goto err_rss_destroy; } return (0); err_rss_destroy: ena_com_rss_destroy(ena_dev); return (rc); } static void ena_rss_init_default_deferred(void *arg) { struct ena_adapter *adapter; devclass_t dc; int max; int rc; dc = devclass_find("ena"); if (unlikely(dc == NULL)) { ena_trace(ENA_ALERT, "No devclass ena\n"); return; } max = devclass_get_maxunit(dc); while (max-- >= 0) { adapter = devclass_get_softc(dc, max); if (adapter != NULL) { rc = ena_rss_init_default(adapter); ENA_FLAG_SET_ATOMIC(ENA_FLAG_RSS_ACTIVE, adapter); if (unlikely(rc != 0)) { device_printf(adapter->pdev, "WARNING: RSS was not properly initialized," " it will affect bandwidth\n"); ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_RSS_ACTIVE, adapter); } } } } SYSINIT(ena_rss_init, SI_SUB_KICK_SCHEDULER, SI_ORDER_SECOND, ena_rss_init_default_deferred, NULL); static void ena_config_host_info(struct ena_com_dev *ena_dev, device_t dev) { struct ena_admin_host_info *host_info; uintptr_t rid; int rc; /* Allocate only the host info */ rc = ena_com_allocate_host_info(ena_dev); if (unlikely(rc != 0)) { ena_trace(ENA_ALERT, "Cannot allocate host info\n"); return; } host_info = ena_dev->host_attr.host_info; if (pci_get_id(dev, PCI_ID_RID, &rid) == 0) host_info->bdf = rid; host_info->os_type = ENA_ADMIN_OS_FREEBSD; host_info->kernel_ver = osreldate; sprintf(host_info->kernel_ver_str, "%d", osreldate); host_info->os_dist = 0; strncpy(host_info->os_dist_str, osrelease, sizeof(host_info->os_dist_str) - 1); host_info->driver_version = (DRV_MODULE_VER_MAJOR) | (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); host_info->num_cpus = mp_ncpus; rc = ena_com_set_host_attributes(ena_dev); if (unlikely(rc != 0)) { if (rc == EOPNOTSUPP) ena_trace(ENA_WARNING, "Cannot set host attributes\n"); else ena_trace(ENA_ALERT, "Cannot set host attributes\n"); goto err; } return; err: ena_com_delete_host_info(ena_dev); } static int ena_device_init(struct ena_adapter *adapter, device_t pdev, struct ena_com_dev_get_features_ctx *get_feat_ctx, int *wd_active) { struct ena_com_dev* ena_dev = adapter->ena_dev; bool readless_supported; uint32_t aenq_groups; int dma_width; int rc; rc = ena_com_mmio_reg_read_request_init(ena_dev); if (unlikely(rc != 0)) { device_printf(pdev, "failed to init mmio read less\n"); return (rc); } /* * The PCIe configuration space revision id indicate if mmio reg * read is disabled */ readless_supported = !(pci_get_revid(pdev) & ENA_MMIO_DISABLE_REG_READ); ena_com_set_mmio_read_mode(ena_dev, readless_supported); rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL); if (unlikely(rc != 0)) { device_printf(pdev, "Can not reset device\n"); goto err_mmio_read_less; } rc = ena_com_validate_version(ena_dev); if (unlikely(rc != 0)) { device_printf(pdev, "device version is too low\n"); goto err_mmio_read_less; } dma_width = ena_com_get_dma_width(ena_dev); if (unlikely(dma_width < 0)) { device_printf(pdev, "Invalid dma width value %d", dma_width); rc = dma_width; goto err_mmio_read_less; } adapter->dma_width = dma_width; /* ENA admin level init */ rc = ena_com_admin_init(ena_dev, &aenq_handlers); if (unlikely(rc != 0)) { device_printf(pdev, "Can not initialize ena admin queue with device\n"); goto err_mmio_read_less; } /* * To enable the msix interrupts the driver needs to know the number * of queues. So the driver uses polling mode to retrieve this * information */ ena_com_set_admin_polling_mode(ena_dev, true); ena_config_host_info(ena_dev, pdev); /* Get Device Attributes */ rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); if (unlikely(rc != 0)) { device_printf(pdev, "Cannot get attribute for ena device rc: %d\n", rc); goto err_admin_init; } aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | BIT(ENA_ADMIN_FATAL_ERROR) | BIT(ENA_ADMIN_WARNING) | BIT(ENA_ADMIN_NOTIFICATION) | BIT(ENA_ADMIN_KEEP_ALIVE); aenq_groups &= get_feat_ctx->aenq.supported_groups; rc = ena_com_set_aenq_config(ena_dev, aenq_groups); if (unlikely(rc != 0)) { device_printf(pdev, "Cannot configure aenq groups rc: %d\n", rc); goto err_admin_init; } *wd_active = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); return (0); err_admin_init: ena_com_delete_host_info(ena_dev); ena_com_admin_destroy(ena_dev); err_mmio_read_less: ena_com_mmio_reg_read_request_destroy(ena_dev); return (rc); } static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter, int io_vectors) { struct ena_com_dev *ena_dev = adapter->ena_dev; int rc; rc = ena_enable_msix(adapter); if (unlikely(rc != 0)) { device_printf(adapter->pdev, "Error with MSI-X enablement\n"); return (rc); } ena_setup_mgmnt_intr(adapter); rc = ena_request_mgmnt_irq(adapter); if (unlikely(rc != 0)) { device_printf(adapter->pdev, "Cannot setup mgmnt queue intr\n"); goto err_disable_msix; } ena_com_set_admin_polling_mode(ena_dev, false); ena_com_admin_aenq_enable(ena_dev); return (0); err_disable_msix: ena_disable_msix(adapter); return (rc); } /* Function called on ENA_ADMIN_KEEP_ALIVE event */ static void ena_keep_alive_wd(void *adapter_data, struct ena_admin_aenq_entry *aenq_e) { struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; struct ena_admin_aenq_keep_alive_desc *desc; sbintime_t stime; uint64_t rx_drops; desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e; rx_drops = ((uint64_t)desc->rx_drops_high << 32) | desc->rx_drops_low; counter_u64_zero(adapter->hw_stats.rx_drops); counter_u64_add(adapter->hw_stats.rx_drops, rx_drops); stime = getsbinuptime(); atomic_store_rel_64(&adapter->keep_alive_timestamp, stime); } /* Check for keep alive expiration */ static void check_for_missing_keep_alive(struct ena_adapter *adapter) { sbintime_t timestamp, time; if (adapter->wd_active == 0) return; if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) return; timestamp = atomic_load_acq_64(&adapter->keep_alive_timestamp); time = getsbinuptime() - timestamp; if (unlikely(time > adapter->keep_alive_timeout)) { device_printf(adapter->pdev, "Keep alive watchdog timeout.\n"); counter_u64_add(adapter->dev_stats.wd_expired, 1); if (likely(!ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) { adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO; ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); } } } /* Check if admin queue is enabled */ static void check_for_admin_com_state(struct ena_adapter *adapter) { if (unlikely(ena_com_get_admin_running_state(adapter->ena_dev) == false)) { device_printf(adapter->pdev, "ENA admin queue is not in running state!\n"); counter_u64_add(adapter->dev_stats.admin_q_pause, 1); if (likely(!ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) { adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO; ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); } } } static int check_for_rx_interrupt_queue(struct ena_adapter *adapter, struct ena_ring *rx_ring) { if (likely(rx_ring->first_interrupt)) return (0); if (ena_com_cq_empty(rx_ring->ena_com_io_cq)) return (0); rx_ring->no_interrupt_event_cnt++; if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) { device_printf(adapter->pdev, "Potential MSIX issue on Rx side " "Queue = %d. Reset the device\n", rx_ring->qid); if (likely(!ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) { adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT; ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); } return (EIO); } return (0); } static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter, struct ena_ring *tx_ring) { struct bintime curtime, time; struct ena_tx_buffer *tx_buf; sbintime_t time_offset; uint32_t missed_tx = 0; int i, rc = 0; getbinuptime(&curtime); for (i = 0; i < tx_ring->ring_size; i++) { tx_buf = &tx_ring->tx_buffer_info[i]; if (bintime_isset(&tx_buf->timestamp) == 0) continue; time = curtime; bintime_sub(&time, &tx_buf->timestamp); time_offset = bttosbt(time); if (unlikely(!tx_ring->first_interrupt && time_offset > 2 * adapter->missing_tx_timeout)) { /* * If after graceful period interrupt is still not * received, we schedule a reset. */ device_printf(adapter->pdev, "Potential MSIX issue on Tx side Queue = %d. " "Reset the device\n", tx_ring->qid); if (likely(!ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) { adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT; ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); } return (EIO); } /* Check again if packet is still waiting */ if (unlikely(time_offset > adapter->missing_tx_timeout)) { if (!tx_buf->print_once) ena_trace(ENA_WARNING, "Found a Tx that wasn't " "completed on time, qid %d, index %d.\n", tx_ring->qid, i); tx_buf->print_once = true; missed_tx++; } } if (unlikely(missed_tx > adapter->missing_tx_threshold)) { device_printf(adapter->pdev, "The number of lost tx completion is above the threshold " "(%d > %d). Reset the device\n", missed_tx, adapter->missing_tx_threshold); if (likely(!ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) { adapter->reset_reason = ENA_REGS_RESET_MISS_TX_CMPL; ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); } rc = EIO; } counter_u64_add(tx_ring->tx_stats.missing_tx_comp, missed_tx); return (rc); } /* * Check for TX which were not completed on time. * Timeout is defined by "missing_tx_timeout". * Reset will be performed if number of incompleted * transactions exceeds "missing_tx_threshold". */ static void check_for_missing_completions(struct ena_adapter *adapter) { struct ena_ring *tx_ring; struct ena_ring *rx_ring; int i, budget, rc; /* Make sure the driver doesn't turn the device in other process */ rmb(); if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) return; if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) return; if (adapter->missing_tx_timeout == ENA_HW_HINTS_NO_TIMEOUT) return; budget = adapter->missing_tx_max_queues; for (i = adapter->next_monitored_tx_qid; i < adapter->num_queues; i++) { tx_ring = &adapter->tx_ring[i]; rx_ring = &adapter->rx_ring[i]; rc = check_missing_comp_in_tx_queue(adapter, tx_ring); if (unlikely(rc != 0)) return; rc = check_for_rx_interrupt_queue(adapter, rx_ring); if (unlikely(rc != 0)) return; budget--; if (budget == 0) { i++; break; } } adapter->next_monitored_tx_qid = i % adapter->num_queues; } /* trigger rx cleanup after 2 consecutive detections */ #define EMPTY_RX_REFILL 2 /* For the rare case where the device runs out of Rx descriptors and the * msix handler failed to refill new Rx descriptors (due to a lack of memory * for example). * This case will lead to a deadlock: * The device won't send interrupts since all the new Rx packets will be dropped * The msix handler won't allocate new Rx descriptors so the device won't be * able to send new packets. * * When such a situation is detected - execute rx cleanup task in another thread */ static void check_for_empty_rx_ring(struct ena_adapter *adapter) { struct ena_ring *rx_ring; int i, refill_required; if (!ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) return; if (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter)) return; for (i = 0; i < adapter->num_queues; i++) { rx_ring = &adapter->rx_ring[i]; refill_required = ena_com_free_desc(rx_ring->ena_com_io_sq); if (unlikely(refill_required == (rx_ring->ring_size - 1))) { rx_ring->empty_rx_queue++; if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) { counter_u64_add(rx_ring->rx_stats.empty_rx_ring, 1); device_printf(adapter->pdev, "trigger refill for ring %d\n", i); taskqueue_enqueue(rx_ring->que->cleanup_tq, &rx_ring->que->cleanup_task); rx_ring->empty_rx_queue = 0; } } else { rx_ring->empty_rx_queue = 0; } } } static void ena_update_hints(struct ena_adapter *adapter, struct ena_admin_ena_hw_hints *hints) { struct ena_com_dev *ena_dev = adapter->ena_dev; if (hints->admin_completion_tx_timeout) ena_dev->admin_queue.completion_timeout = hints->admin_completion_tx_timeout * 1000; if (hints->mmio_read_timeout) /* convert to usec */ ena_dev->mmio_read.reg_read_to = hints->mmio_read_timeout * 1000; if (hints->missed_tx_completion_count_threshold_to_reset) adapter->missing_tx_threshold = hints->missed_tx_completion_count_threshold_to_reset; if (hints->missing_tx_completion_timeout) { if (hints->missing_tx_completion_timeout == ENA_HW_HINTS_NO_TIMEOUT) adapter->missing_tx_timeout = ENA_HW_HINTS_NO_TIMEOUT; else adapter->missing_tx_timeout = SBT_1MS * hints->missing_tx_completion_timeout; } if (hints->driver_watchdog_timeout) { if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; else adapter->keep_alive_timeout = SBT_1MS * hints->driver_watchdog_timeout; } } static void ena_timer_service(void *data) { struct ena_adapter *adapter = (struct ena_adapter *)data; struct ena_admin_host_info *host_info = adapter->ena_dev->host_attr.host_info; check_for_missing_keep_alive(adapter); check_for_admin_com_state(adapter); check_for_missing_completions(adapter); check_for_empty_rx_ring(adapter); if (host_info != NULL) ena_update_host_info(host_info, adapter->ifp); if (unlikely(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) { device_printf(adapter->pdev, "Trigger reset is on\n"); taskqueue_enqueue(adapter->reset_tq, &adapter->reset_task); return; } /* * Schedule another timeout one second from now. */ callout_schedule_sbt(&adapter->timer_service, SBT_1S, SBT_1S, 0); } void ena_destroy_device(struct ena_adapter *adapter, bool graceful) { if_t ifp = adapter->ifp; struct ena_com_dev *ena_dev = adapter->ena_dev; bool dev_up; if (!ENA_FLAG_ISSET(ENA_FLAG_DEVICE_RUNNING, adapter)) return; if_link_state_change(ifp, LINK_STATE_DOWN); callout_drain(&adapter->timer_service); dev_up = ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter); if (dev_up) ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter); else ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter); if (!graceful) ena_com_set_admin_running_state(ena_dev, false); if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP, adapter)) ena_down(adapter); /* * Stop the device from sending AENQ events (if the device was up, and * the trigger reset was on, ena_down already performs device reset) */ if (!(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter) && dev_up)) ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); ena_free_mgmnt_irq(adapter); ena_disable_msix(adapter); ena_com_abort_admin_commands(ena_dev); ena_com_wait_for_abort_completion(ena_dev); ena_com_admin_destroy(ena_dev); ena_com_mmio_reg_read_request_destroy(ena_dev); adapter->reset_reason = ENA_REGS_RESET_NORMAL; ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter); ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter); } static int ena_device_validate_params(struct ena_adapter *adapter, struct ena_com_dev_get_features_ctx *get_feat_ctx) { if (memcmp(get_feat_ctx->dev_attr.mac_addr, adapter->mac_addr, ETHER_ADDR_LEN) != 0) { device_printf(adapter->pdev, "Error, mac address are different\n"); return (EINVAL); } if (get_feat_ctx->dev_attr.max_mtu < if_getmtu(adapter->ifp)) { device_printf(adapter->pdev, "Error, device max mtu is smaller than ifp MTU\n"); return (EINVAL); } return 0; } int ena_restore_device(struct ena_adapter *adapter) { struct ena_com_dev_get_features_ctx get_feat_ctx; struct ena_com_dev *ena_dev = adapter->ena_dev; if_t ifp = adapter->ifp; device_t dev = adapter->pdev; int wd_active; int rc; ENA_FLAG_SET_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter); rc = ena_device_init(adapter, dev, &get_feat_ctx, &wd_active); if (rc != 0) { device_printf(dev, "Cannot initialize device\n"); goto err; } /* * Only enable WD if it was enabled before reset, so it won't override * value set by the user by the sysctl. */ if (adapter->wd_active != 0) adapter->wd_active = wd_active; rc = ena_device_validate_params(adapter, &get_feat_ctx); if (rc != 0) { device_printf(dev, "Validation of device parameters failed\n"); goto err_device_destroy; } rc = ena_handle_updated_queues(adapter, &get_feat_ctx); if (rc != 0) goto err_device_destroy; ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter); /* Make sure we don't have a race with AENQ Links state handler */ if (ENA_FLAG_ISSET(ENA_FLAG_LINK_UP, adapter)) if_link_state_change(ifp, LINK_STATE_UP); rc = ena_enable_msix_and_set_admin_interrupts(adapter, adapter->num_queues); if (rc != 0) { device_printf(dev, "Enable MSI-X failed\n"); goto err_device_destroy; } /* If the interface was up before the reset bring it up */ if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter)) { rc = ena_up(adapter); if (rc != 0) { device_printf(dev, "Failed to create I/O queues\n"); goto err_disable_msix; } } /* Indicate that device is running again and ready to work */ ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter); if (ENA_FLAG_ISSET(ENA_FLAG_DEV_UP_BEFORE_RESET, adapter)) { /* * As the AENQ handlers weren't executed during reset because * the flag ENA_FLAG_DEVICE_RUNNING was turned off, the * timestamp must be updated again That will prevent next reset * caused by missing keep alive. */ adapter->keep_alive_timestamp = getsbinuptime(); callout_reset_sbt(&adapter->timer_service, SBT_1S, SBT_1S, ena_timer_service, (void *)adapter, 0); } device_printf(dev, "Device reset completed successfully, Driver info: %s\n", ena_version); return (rc); err_disable_msix: ena_free_mgmnt_irq(adapter); ena_disable_msix(adapter); err_device_destroy: ena_com_abort_admin_commands(ena_dev); ena_com_wait_for_abort_completion(ena_dev); ena_com_admin_destroy(ena_dev); ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE); ena_com_mmio_reg_read_request_destroy(ena_dev); err: ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter); ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_ONGOING_RESET, adapter); device_printf(dev, "Reset attempt failed. Can not reset the device\n"); return (rc); } static void ena_reset_task(void *arg, int pending) { struct ena_adapter *adapter = (struct ena_adapter *)arg; if (unlikely(!ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) { device_printf(adapter->pdev, "device reset scheduled but trigger_reset is off\n"); return; } sx_xlock(&adapter->ioctl_sx); ena_destroy_device(adapter, false); ena_restore_device(adapter); sx_unlock(&adapter->ioctl_sx); } /** * ena_attach - Device Initialization Routine * @pdev: device information struct * * Returns 0 on success, otherwise on failure. * * ena_attach initializes an adapter identified by a device structure. * The OS initialization, configuring of the adapter private structure, * and a hardware reset occur. **/ static int ena_attach(device_t pdev) { struct ena_com_dev_get_features_ctx get_feat_ctx; struct ena_llq_configurations llq_config; struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 }; static int version_printed; struct ena_adapter *adapter; struct ena_com_dev *ena_dev = NULL; const char *queue_type_str; int io_queue_num; int rid, rc; adapter = device_get_softc(pdev); adapter->pdev = pdev; mtx_init(&adapter->global_mtx, "ENA global mtx", NULL, MTX_DEF); sx_init(&adapter->ioctl_sx, "ENA ioctl sx"); /* Set up the timer service */ callout_init_mtx(&adapter->timer_service, &adapter->global_mtx, 0); adapter->keep_alive_timeout = DEFAULT_KEEP_ALIVE_TO; adapter->missing_tx_timeout = DEFAULT_TX_CMP_TO; adapter->missing_tx_max_queues = DEFAULT_TX_MONITORED_QUEUES; adapter->missing_tx_threshold = DEFAULT_TX_CMP_THRESHOLD; if (version_printed++ == 0) device_printf(pdev, "%s\n", ena_version); /* Allocate memory for ena_dev structure */ ena_dev = malloc(sizeof(struct ena_com_dev), M_DEVBUF, M_WAITOK | M_ZERO); adapter->ena_dev = ena_dev; ena_dev->dmadev = pdev; rid = PCIR_BAR(ENA_REG_BAR); adapter->memory = NULL; adapter->registers = bus_alloc_resource_any(pdev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (unlikely(adapter->registers == NULL)) { device_printf(pdev, "unable to allocate bus resource: registers!\n"); rc = ENOMEM; goto err_dev_free; } ena_dev->bus = malloc(sizeof(struct ena_bus), M_DEVBUF, M_WAITOK | M_ZERO); /* Store register resources */ ((struct ena_bus*)(ena_dev->bus))->reg_bar_t = rman_get_bustag(adapter->registers); ((struct ena_bus*)(ena_dev->bus))->reg_bar_h = rman_get_bushandle(adapter->registers); if (unlikely(((struct ena_bus*)(ena_dev->bus))->reg_bar_h == 0)) { device_printf(pdev, "failed to pmap registers bar\n"); rc = ENXIO; goto err_bus_free; } ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; /* Initially clear all the flags */ ENA_FLAG_ZERO(adapter); /* Device initialization */ rc = ena_device_init(adapter, pdev, &get_feat_ctx, &adapter->wd_active); if (unlikely(rc != 0)) { device_printf(pdev, "ENA device init failed! (err: %d)\n", rc); rc = ENXIO; goto err_bus_free; } set_default_llq_configurations(&llq_config); rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx.llq, &llq_config); if (unlikely(rc != 0)) { device_printf(pdev, "failed to set placement policy\n"); goto err_com_free; } if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) queue_type_str = "Regular"; else queue_type_str = "Low Latency"; device_printf(pdev, "Placement policy: %s\n", queue_type_str); adapter->keep_alive_timestamp = getsbinuptime(); adapter->tx_offload_cap = get_feat_ctx.offload.tx; memcpy(adapter->mac_addr, get_feat_ctx.dev_attr.mac_addr, ETHER_ADDR_LEN); calc_queue_ctx.ena_dev = ena_dev; calc_queue_ctx.get_feat_ctx = &get_feat_ctx; calc_queue_ctx.pdev = pdev; /* calculate IO queue number to create */ io_queue_num = ena_calc_io_queue_num(adapter, &get_feat_ctx); ENA_ASSERT(io_queue_num > 0, "Invalid queue number: %d\n", io_queue_num); adapter->num_queues = io_queue_num; adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu; // Set the requested Rx ring size adapter->rx_ring_size = ENA_DEFAULT_RING_SIZE; /* calculatre ring sizes */ rc = ena_calc_queue_size(adapter, &calc_queue_ctx); if (unlikely((rc != 0) || (io_queue_num <= 0))) { rc = EFAULT; goto err_com_free; } adapter->reset_reason = ENA_REGS_RESET_NORMAL; adapter->tx_ring_size = calc_queue_ctx.tx_queue_size; adapter->rx_ring_size = calc_queue_ctx.rx_queue_size; adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size; adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size; adapter->buf_ring_size = ENA_DEFAULT_BUF_RING_SIZE; /* set up dma tags for rx and tx buffers */ rc = ena_setup_tx_dma_tag(adapter); if (unlikely(rc != 0)) { device_printf(pdev, "Failed to create TX DMA tag\n"); goto err_com_free; } rc = ena_setup_rx_dma_tag(adapter); if (unlikely(rc != 0)) { device_printf(pdev, "Failed to create RX DMA tag\n"); goto err_tx_tag_free; } /* initialize rings basic information */ device_printf(pdev, "Creating %d io queues. Rx queue size: %d, Tx queue size: %d\n", io_queue_num, calc_queue_ctx.rx_queue_size, calc_queue_ctx.tx_queue_size); ena_init_io_rings(adapter); rc = ena_enable_msix_and_set_admin_interrupts(adapter, io_queue_num); if (unlikely(rc != 0)) { device_printf(pdev, "Failed to enable and set the admin interrupts\n"); goto err_io_free; } /* setup network interface */ rc = ena_setup_ifnet(pdev, adapter, &get_feat_ctx); if (unlikely(rc != 0)) { device_printf(pdev, "Error with network interface setup\n"); goto err_msix_free; } /* Initialize reset task queue */ TASK_INIT(&adapter->reset_task, 0, ena_reset_task, adapter); adapter->reset_tq = taskqueue_create("ena_reset_enqueue", M_WAITOK | M_ZERO, taskqueue_thread_enqueue, &adapter->reset_tq); taskqueue_start_threads(&adapter->reset_tq, 1, PI_NET, "%s rstq", device_get_nameunit(adapter->pdev)); /* Initialize statistics */ ena_alloc_counters((counter_u64_t *)&adapter->dev_stats, sizeof(struct ena_stats_dev)); ena_alloc_counters((counter_u64_t *)&adapter->hw_stats, sizeof(struct ena_hw_stats)); ena_sysctl_add_nodes(adapter); #ifdef DEV_NETMAP rc = ena_netmap_attach(adapter); if (rc != 0) { device_printf(pdev, "netmap attach failed: %d\n", rc); goto err_detach; } #endif /* DEV_NETMAP */ /* Tell the stack that the interface is not active */ if_setdrvflagbits(adapter->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEVICE_RUNNING, adapter); return (0); #ifdef DEV_NETMAP err_detach: ether_ifdetach(adapter->ifp); #endif /* DEV_NETMAP */ err_msix_free: ena_com_dev_reset(adapter->ena_dev, ENA_REGS_RESET_INIT_ERR); ena_free_mgmnt_irq(adapter); ena_disable_msix(adapter); err_io_free: ena_free_all_io_rings_resources(adapter); ena_free_rx_dma_tag(adapter); err_tx_tag_free: ena_free_tx_dma_tag(adapter); err_com_free: ena_com_admin_destroy(ena_dev); ena_com_delete_host_info(ena_dev); ena_com_mmio_reg_read_request_destroy(ena_dev); err_bus_free: free(ena_dev->bus, M_DEVBUF); ena_free_pci_resources(adapter); err_dev_free: free(ena_dev, M_DEVBUF); return (rc); } /** * ena_detach - Device Removal Routine * @pdev: device information struct * * ena_detach is called by the device subsystem to alert the driver * that it should release a PCI device. **/ static int ena_detach(device_t pdev) { struct ena_adapter *adapter = device_get_softc(pdev); struct ena_com_dev *ena_dev = adapter->ena_dev; int rc; /* Make sure VLANS are not using driver */ if (adapter->ifp->if_vlantrunk != NULL) { device_printf(adapter->pdev ,"VLAN is in use, detach first\n"); return (EBUSY); } ether_ifdetach(adapter->ifp); /* Free reset task and callout */ callout_drain(&adapter->timer_service); while (taskqueue_cancel(adapter->reset_tq, &adapter->reset_task, NULL)) taskqueue_drain(adapter->reset_tq, &adapter->reset_task); taskqueue_free(adapter->reset_tq); sx_xlock(&adapter->ioctl_sx); ena_down(adapter); ena_destroy_device(adapter, true); sx_unlock(&adapter->ioctl_sx); #ifdef DEV_NETMAP netmap_detach(adapter->ifp); #endif /* DEV_NETMAP */ ena_free_all_io_rings_resources(adapter); ena_free_counters((counter_u64_t *)&adapter->hw_stats, sizeof(struct ena_hw_stats)); ena_free_counters((counter_u64_t *)&adapter->dev_stats, sizeof(struct ena_stats_dev)); rc = ena_free_rx_dma_tag(adapter); if (unlikely(rc != 0)) device_printf(adapter->pdev, "Unmapped RX DMA tag associations\n"); rc = ena_free_tx_dma_tag(adapter); if (unlikely(rc != 0)) device_printf(adapter->pdev, "Unmapped TX DMA tag associations\n"); ena_free_irqs(adapter); ena_free_pci_resources(adapter); if (likely(ENA_FLAG_ISSET(ENA_FLAG_RSS_ACTIVE, adapter))) ena_com_rss_destroy(ena_dev); ena_com_delete_host_info(ena_dev); mtx_destroy(&adapter->global_mtx); sx_destroy(&adapter->ioctl_sx); if_free(adapter->ifp); if (ena_dev->bus != NULL) free(ena_dev->bus, M_DEVBUF); if (ena_dev != NULL) free(ena_dev, M_DEVBUF); return (bus_generic_detach(pdev)); } /****************************************************************************** ******************************** AENQ Handlers ******************************* *****************************************************************************/ /** * ena_update_on_link_change: * Notify the network interface about the change in link status **/ static void ena_update_on_link_change(void *adapter_data, struct ena_admin_aenq_entry *aenq_e) { struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; struct ena_admin_aenq_link_change_desc *aenq_desc; int status; if_t ifp; aenq_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e; ifp = adapter->ifp; status = aenq_desc->flags & ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK; if (status != 0) { device_printf(adapter->pdev, "link is UP\n"); ENA_FLAG_SET_ATOMIC(ENA_FLAG_LINK_UP, adapter); if (!ENA_FLAG_ISSET(ENA_FLAG_ONGOING_RESET, adapter)) if_link_state_change(ifp, LINK_STATE_UP); } else { device_printf(adapter->pdev, "link is DOWN\n"); if_link_state_change(ifp, LINK_STATE_DOWN); ENA_FLAG_CLEAR_ATOMIC(ENA_FLAG_LINK_UP, adapter); } } static void ena_notification(void *adapter_data, struct ena_admin_aenq_entry *aenq_e) { struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; struct ena_admin_ena_hw_hints *hints; ENA_WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION, "Invalid group(%x) expected %x\n", aenq_e->aenq_common_desc.group, ENA_ADMIN_NOTIFICATION); switch (aenq_e->aenq_common_desc.syndrom) { case ENA_ADMIN_UPDATE_HINTS: hints = (struct ena_admin_ena_hw_hints *)(&aenq_e->inline_data_w4); ena_update_hints(adapter, hints); break; default: device_printf(adapter->pdev, "Invalid aenq notification link state %d\n", aenq_e->aenq_common_desc.syndrom); } } /** * This handler will called for unknown event group or unimplemented handlers **/ static void unimplemented_aenq_handler(void *adapter_data, struct ena_admin_aenq_entry *aenq_e) { struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; device_printf(adapter->pdev, "Unknown event was received or event with unimplemented handler\n"); } static struct ena_aenq_handlers aenq_handlers = { .handlers = { [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change, [ENA_ADMIN_NOTIFICATION] = ena_notification, [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd, }, .unimplemented_handler = unimplemented_aenq_handler }; /********************************************************************* * FreeBSD Device Interface Entry Points *********************************************************************/ static device_method_t ena_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ena_probe), DEVMETHOD(device_attach, ena_attach), DEVMETHOD(device_detach, ena_detach), DEVMETHOD_END }; static driver_t ena_driver = { "ena", ena_methods, sizeof(struct ena_adapter), }; devclass_t ena_devclass; DRIVER_MODULE(ena, pci, ena_driver, ena_devclass, 0, 0); MODULE_PNP_INFO("U16:vendor;U16:device", pci, ena, ena_vendor_info_array, nitems(ena_vendor_info_array) - 1); MODULE_DEPEND(ena, pci, 1, 1, 1); MODULE_DEPEND(ena, ether, 1, 1, 1); #ifdef DEV_NETMAP MODULE_DEPEND(ena, netmap, 1, 1, 1); #endif /* DEV_NETMAP */ /*********************************************************************/ diff --git a/sys/dev/malo/if_malo.c b/sys/dev/malo/if_malo.c index ae21af8b4536..a7823dd6003d 100644 --- a/sys/dev/malo/if_malo.c +++ b/sys/dev/malo/if_malo.c @@ -1,2164 +1,2164 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2008 Weongyo Jeong * Copyright (c) 2007 Marvell Semiconductor, Inc. * Copyright (c) 2007 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include #ifdef __FreeBSD__ __FBSDID("$FreeBSD$"); #endif #include "opt_malo.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include SYSCTL_NODE(_hw, OID_AUTO, malo, CTLFLAG_RD, 0, "Marvell 88w8335 driver parameters"); static int malo_txcoalesce = 8; /* # tx pkts to q before poking f/w*/ SYSCTL_INT(_hw_malo, OID_AUTO, txcoalesce, CTLFLAG_RWTUN, &malo_txcoalesce, 0, "tx buffers to send at once"); static int malo_rxbuf = MALO_RXBUF; /* # rx buffers to allocate */ SYSCTL_INT(_hw_malo, OID_AUTO, rxbuf, CTLFLAG_RWTUN, &malo_rxbuf, 0, "rx buffers allocated"); static int malo_rxquota = MALO_RXBUF; /* # max buffers to process */ SYSCTL_INT(_hw_malo, OID_AUTO, rxquota, CTLFLAG_RWTUN, &malo_rxquota, 0, "max rx buffers to process per interrupt"); static int malo_txbuf = MALO_TXBUF; /* # tx buffers to allocate */ SYSCTL_INT(_hw_malo, OID_AUTO, txbuf, CTLFLAG_RWTUN, &malo_txbuf, 0, "tx buffers allocated"); #ifdef MALO_DEBUG static int malo_debug = 0; SYSCTL_INT(_hw_malo, OID_AUTO, debug, CTLFLAG_RWTUN, &malo_debug, 0, "control debugging printfs"); enum { MALO_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ MALO_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */ MALO_DEBUG_RECV = 0x00000004, /* basic recv operation */ MALO_DEBUG_RECV_DESC = 0x00000008, /* recv descriptors */ MALO_DEBUG_RESET = 0x00000010, /* reset processing */ MALO_DEBUG_INTR = 0x00000040, /* ISR */ MALO_DEBUG_TX_PROC = 0x00000080, /* tx ISR proc */ MALO_DEBUG_RX_PROC = 0x00000100, /* rx ISR proc */ MALO_DEBUG_STATE = 0x00000400, /* 802.11 state transitions */ MALO_DEBUG_NODE = 0x00000800, /* node management */ MALO_DEBUG_RECV_ALL = 0x00001000, /* trace all frames (beacons) */ MALO_DEBUG_FW = 0x00008000, /* firmware */ MALO_DEBUG_ANY = 0xffffffff }; #define IS_BEACON(wh) \ ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK | \ IEEE80211_FC0_SUBTYPE_MASK)) == \ (IEEE80211_FC0_TYPE_MGT|IEEE80211_FC0_SUBTYPE_BEACON)) #define IFF_DUMPPKTS_RECV(sc, wh) \ (((sc->malo_debug & MALO_DEBUG_RECV) && \ ((sc->malo_debug & MALO_DEBUG_RECV_ALL) || !IS_BEACON(wh)))) #define IFF_DUMPPKTS_XMIT(sc) \ (sc->malo_debug & MALO_DEBUG_XMIT) #define DPRINTF(sc, m, fmt, ...) do { \ if (sc->malo_debug & (m)) \ printf(fmt, __VA_ARGS__); \ } while (0) #else #define DPRINTF(sc, m, fmt, ...) do { \ (void) sc; \ } while (0) #endif static MALLOC_DEFINE(M_MALODEV, "malodev", "malo driver dma buffers"); static struct ieee80211vap *malo_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void malo_vap_delete(struct ieee80211vap *); static int malo_dma_setup(struct malo_softc *); static int malo_setup_hwdma(struct malo_softc *); static void malo_txq_init(struct malo_softc *, struct malo_txq *, int); static void malo_tx_cleanupq(struct malo_softc *, struct malo_txq *); static void malo_parent(struct ieee80211com *); static int malo_transmit(struct ieee80211com *, struct mbuf *); static void malo_start(struct malo_softc *); static void malo_watchdog(void *); static void malo_updateslot(struct ieee80211com *); static int malo_newstate(struct ieee80211vap *, enum ieee80211_state, int); static void malo_scan_start(struct ieee80211com *); static void malo_scan_end(struct ieee80211com *); static void malo_set_channel(struct ieee80211com *); static int malo_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void malo_sysctlattach(struct malo_softc *); static void malo_announce(struct malo_softc *); static void malo_dma_cleanup(struct malo_softc *); static void malo_stop(struct malo_softc *); static int malo_chan_set(struct malo_softc *, struct ieee80211_channel *); static int malo_mode_init(struct malo_softc *); static void malo_tx_proc(void *, int); static void malo_rx_proc(void *, int); static void malo_init(void *); /* * Read/Write shorthands for accesses to BAR 0. Note that all BAR 1 * operations are done in the "hal" except getting H/W MAC address at * malo_attach and there should be no reference to them here. */ static uint32_t malo_bar0_read4(struct malo_softc *sc, bus_size_t off) { return bus_space_read_4(sc->malo_io0t, sc->malo_io0h, off); } static void malo_bar0_write4(struct malo_softc *sc, bus_size_t off, uint32_t val) { DPRINTF(sc, MALO_DEBUG_FW, "%s: off 0x%jx val 0x%x\n", __func__, (uintmax_t)off, val); bus_space_write_4(sc->malo_io0t, sc->malo_io0h, off, val); } int malo_attach(uint16_t devid, struct malo_softc *sc) { struct ieee80211com *ic = &sc->malo_ic; struct malo_hal *mh; int error; uint8_t bands[IEEE80211_MODE_BYTES]; MALO_LOCK_INIT(sc); callout_init_mtx(&sc->malo_watchdog_timer, &sc->malo_mtx, 0); mbufq_init(&sc->malo_snd, ifqmaxlen); mh = malo_hal_attach(sc->malo_dev, devid, sc->malo_io1h, sc->malo_io1t, sc->malo_dmat); if (mh == NULL) { device_printf(sc->malo_dev, "unable to attach HAL\n"); error = EIO; goto bad; } sc->malo_mh = mh; /* * Load firmware so we can get setup. We arbitrarily pick station * firmware; we'll re-load firmware as needed so setting up * the wrong mode isn't a big deal. */ error = malo_hal_fwload(mh, "malo8335-h", "malo8335-m"); if (error != 0) { device_printf(sc->malo_dev, "unable to setup firmware\n"); goto bad1; } /* XXX gethwspecs() extracts correct informations? not maybe! */ error = malo_hal_gethwspecs(mh, &sc->malo_hwspecs); if (error != 0) { device_printf(sc->malo_dev, "unable to fetch h/w specs\n"); goto bad1; } DPRINTF(sc, MALO_DEBUG_FW, "malo_hal_gethwspecs: hwversion 0x%x hostif 0x%x" "maxnum_wcb 0x%x maxnum_mcaddr 0x%x maxnum_tx_wcb 0x%x" "regioncode 0x%x num_antenna 0x%x fw_releasenum 0x%x" "wcbbase0 0x%x rxdesc_read 0x%x rxdesc_write 0x%x" "ul_fw_awakecookie 0x%x w[4] = %x %x %x %x", sc->malo_hwspecs.hwversion, sc->malo_hwspecs.hostinterface, sc->malo_hwspecs.maxnum_wcb, sc->malo_hwspecs.maxnum_mcaddr, sc->malo_hwspecs.maxnum_tx_wcb, sc->malo_hwspecs.regioncode, sc->malo_hwspecs.num_antenna, sc->malo_hwspecs.fw_releasenum, sc->malo_hwspecs.wcbbase0, sc->malo_hwspecs.rxdesc_read, sc->malo_hwspecs.rxdesc_write, sc->malo_hwspecs.ul_fw_awakecookie, sc->malo_hwspecs.wcbbase[0], sc->malo_hwspecs.wcbbase[1], sc->malo_hwspecs.wcbbase[2], sc->malo_hwspecs.wcbbase[3]); /* NB: firmware looks that it does not export regdomain info API. */ memset(bands, 0, sizeof(bands)); setbit(bands, IEEE80211_MODE_11B); setbit(bands, IEEE80211_MODE_11G); ieee80211_init_channels(ic, NULL, bands); sc->malo_txantenna = 0x2; /* h/w default */ sc->malo_rxantenna = 0xffff; /* h/w default */ /* * Allocate tx + rx descriptors and populate the lists. * We immediately push the information to the firmware * as otherwise it gets upset. */ error = malo_dma_setup(sc); if (error != 0) { device_printf(sc->malo_dev, "failed to setup descriptors: %d\n", error); goto bad1; } error = malo_setup_hwdma(sc); /* push to firmware */ if (error != 0) /* NB: malo_setupdma prints msg */ goto bad2; sc->malo_tq = taskqueue_create_fast("malo_taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->malo_tq); taskqueue_start_threads(&sc->malo_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->malo_dev)); - TASK_INIT(&sc->malo_rxtask, 0, malo_rx_proc, sc); + NET_TASK_INIT(&sc->malo_rxtask, 0, malo_rx_proc, sc); TASK_INIT(&sc->malo_txtask, 0, malo_tx_proc, sc); ic->ic_softc = sc; ic->ic_name = device_get_nameunit(sc->malo_dev); /* XXX not right but it's not used anywhere important */ ic->ic_phytype = IEEE80211_T_OFDM; ic->ic_opmode = IEEE80211_M_STA; ic->ic_caps = IEEE80211_C_STA /* station mode supported */ | IEEE80211_C_BGSCAN /* capable of bg scanning */ | IEEE80211_C_MONITOR /* monitor mode */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_TXPMGT /* capable of txpow mgt */ | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ ; IEEE80211_ADDR_COPY(ic->ic_macaddr, sc->malo_hwspecs.macaddr); /* * Transmit requires space in the packet for a special format transmit * record and optional padding between this record and the payload. * Ask the net80211 layer to arrange this when encapsulating * packets so we can add it efficiently. */ ic->ic_headroom = sizeof(struct malo_txrec) - sizeof(struct ieee80211_frame); /* call MI attach routine. */ ieee80211_ifattach(ic); /* override default methods */ ic->ic_vap_create = malo_vap_create; ic->ic_vap_delete = malo_vap_delete; ic->ic_raw_xmit = malo_raw_xmit; ic->ic_updateslot = malo_updateslot; ic->ic_scan_start = malo_scan_start; ic->ic_scan_end = malo_scan_end; ic->ic_set_channel = malo_set_channel; ic->ic_parent = malo_parent; ic->ic_transmit = malo_transmit; sc->malo_invalid = 0; /* ready to go, enable int handling */ ieee80211_radiotap_attach(ic, &sc->malo_tx_th.wt_ihdr, sizeof(sc->malo_tx_th), MALO_TX_RADIOTAP_PRESENT, &sc->malo_rx_th.wr_ihdr, sizeof(sc->malo_rx_th), MALO_RX_RADIOTAP_PRESENT); /* * Setup dynamic sysctl's. */ malo_sysctlattach(sc); if (bootverbose) ieee80211_announce(ic); malo_announce(sc); return 0; bad2: malo_dma_cleanup(sc); bad1: malo_hal_detach(mh); bad: sc->malo_invalid = 1; return error; } static struct ieee80211vap * malo_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct malo_softc *sc = ic->ic_softc; struct malo_vap *mvp; struct ieee80211vap *vap; if (!TAILQ_EMPTY(&ic->ic_vaps)) { device_printf(sc->malo_dev, "multiple vaps not supported\n"); return NULL; } switch (opmode) { case IEEE80211_M_STA: if (opmode == IEEE80211_M_STA) flags |= IEEE80211_CLONE_NOBEACONS; /* fall thru... */ case IEEE80211_M_MONITOR: break; default: device_printf(sc->malo_dev, "%s mode not supported\n", ieee80211_opmode_name[opmode]); return NULL; /* unsupported */ } mvp = malloc(sizeof(struct malo_vap), M_80211_VAP, M_WAITOK | M_ZERO); vap = &mvp->malo_vap; ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); /* override state transition machine */ mvp->malo_newstate = vap->iv_newstate; vap->iv_newstate = malo_newstate; /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status, mac); ic->ic_opmode = opmode; return vap; } static void malo_vap_delete(struct ieee80211vap *vap) { struct malo_vap *mvp = MALO_VAP(vap); ieee80211_vap_detach(vap); free(mvp, M_80211_VAP); } int malo_intr(void *arg) { struct malo_softc *sc = arg; struct malo_hal *mh = sc->malo_mh; uint32_t status; if (sc->malo_invalid) { /* * The hardware is not ready/present, don't touch anything. * Note this can happen early on if the IRQ is shared. */ DPRINTF(sc, MALO_DEBUG_ANY, "%s: invalid; ignored\n", __func__); return (FILTER_STRAY); } /* * Figure out the reason(s) for the interrupt. */ malo_hal_getisr(mh, &status); /* NB: clears ISR too */ if (status == 0) /* must be a shared irq */ return (FILTER_STRAY); DPRINTF(sc, MALO_DEBUG_INTR, "%s: status 0x%x imask 0x%x\n", __func__, status, sc->malo_imask); if (status & MALO_A2HRIC_BIT_RX_RDY) taskqueue_enqueue(sc->malo_tq, &sc->malo_rxtask); if (status & MALO_A2HRIC_BIT_TX_DONE) taskqueue_enqueue(sc->malo_tq, &sc->malo_txtask); if (status & MALO_A2HRIC_BIT_OPC_DONE) malo_hal_cmddone(mh); if (status & MALO_A2HRIC_BIT_MAC_EVENT) ; if (status & MALO_A2HRIC_BIT_RX_PROBLEM) ; if (status & MALO_A2HRIC_BIT_ICV_ERROR) { /* TKIP ICV error */ sc->malo_stats.mst_rx_badtkipicv++; } #ifdef MALO_DEBUG if (((status | sc->malo_imask) ^ sc->malo_imask) != 0) DPRINTF(sc, MALO_DEBUG_INTR, "%s: can't handle interrupt status 0x%x\n", __func__, status); #endif return (FILTER_HANDLED); } static void malo_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { bus_addr_t *paddr = (bus_addr_t*) arg; KASSERT(error == 0, ("error %u on bus_dma callback", error)); *paddr = segs->ds_addr; } static int malo_desc_setup(struct malo_softc *sc, const char *name, struct malo_descdma *dd, int nbuf, size_t bufsize, int ndesc, size_t descsize) { int error; uint8_t *ds; DPRINTF(sc, MALO_DEBUG_RESET, "%s: %s DMA: %u bufs (%ju) %u desc/buf (%ju)\n", __func__, name, nbuf, (uintmax_t) bufsize, ndesc, (uintmax_t) descsize); dd->dd_name = name; dd->dd_desc_len = nbuf * ndesc * descsize; /* * Setup DMA descriptor area. */ error = bus_dma_tag_create(bus_get_dma_tag(sc->malo_dev),/* parent */ PAGE_SIZE, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ dd->dd_desc_len, /* maxsize */ 1, /* nsegments */ dd->dd_desc_len, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &dd->dd_dmat); if (error != 0) { device_printf(sc->malo_dev, "cannot allocate %s DMA tag\n", dd->dd_name); return error; } /* allocate descriptors */ error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dd->dd_dmamap); if (error != 0) { device_printf(sc->malo_dev, "unable to alloc memory for %u %s descriptors, " "error %u\n", nbuf * ndesc, dd->dd_name, error); goto fail1; } error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap, dd->dd_desc, dd->dd_desc_len, malo_load_cb, &dd->dd_desc_paddr, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->malo_dev, "unable to map %s descriptors, error %u\n", dd->dd_name, error); goto fail2; } ds = dd->dd_desc; memset(ds, 0, dd->dd_desc_len); DPRINTF(sc, MALO_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> 0x%jx (%lu)\n", __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len, (uintmax_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len); return 0; fail2: bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); fail1: bus_dma_tag_destroy(dd->dd_dmat); memset(dd, 0, sizeof(*dd)); return error; } #define DS2PHYS(_dd, _ds) \ ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) static int malo_rxdma_setup(struct malo_softc *sc) { int error, bsize, i; struct malo_rxbuf *bf; struct malo_rxdesc *ds; error = malo_desc_setup(sc, "rx", &sc->malo_rxdma, malo_rxbuf, sizeof(struct malo_rxbuf), 1, sizeof(struct malo_rxdesc)); if (error != 0) return error; /* * Allocate rx buffers and set them up. */ bsize = malo_rxbuf * sizeof(struct malo_rxbuf); bf = malloc(bsize, M_MALODEV, M_NOWAIT | M_ZERO); if (bf == NULL) { device_printf(sc->malo_dev, "malloc of %u rx buffers failed\n", bsize); return error; } sc->malo_rxdma.dd_bufptr = bf; STAILQ_INIT(&sc->malo_rxbuf); ds = sc->malo_rxdma.dd_desc; for (i = 0; i < malo_rxbuf; i++, bf++, ds++) { bf->bf_desc = ds; bf->bf_daddr = DS2PHYS(&sc->malo_rxdma, ds); error = bus_dmamap_create(sc->malo_dmat, BUS_DMA_NOWAIT, &bf->bf_dmamap); if (error != 0) { device_printf(sc->malo_dev, "%s: unable to dmamap for rx buffer, error %d\n", __func__, error); return error; } /* NB: tail is intentional to preserve descriptor order */ STAILQ_INSERT_TAIL(&sc->malo_rxbuf, bf, bf_list); } return 0; } static int malo_txdma_setup(struct malo_softc *sc, struct malo_txq *txq) { int error, bsize, i; struct malo_txbuf *bf; struct malo_txdesc *ds; error = malo_desc_setup(sc, "tx", &txq->dma, malo_txbuf, sizeof(struct malo_txbuf), MALO_TXDESC, sizeof(struct malo_txdesc)); if (error != 0) return error; /* allocate and setup tx buffers */ bsize = malo_txbuf * sizeof(struct malo_txbuf); bf = malloc(bsize, M_MALODEV, M_NOWAIT | M_ZERO); if (bf == NULL) { device_printf(sc->malo_dev, "malloc of %u tx buffers failed\n", malo_txbuf); return ENOMEM; } txq->dma.dd_bufptr = bf; STAILQ_INIT(&txq->free); txq->nfree = 0; ds = txq->dma.dd_desc; for (i = 0; i < malo_txbuf; i++, bf++, ds += MALO_TXDESC) { bf->bf_desc = ds; bf->bf_daddr = DS2PHYS(&txq->dma, ds); error = bus_dmamap_create(sc->malo_dmat, BUS_DMA_NOWAIT, &bf->bf_dmamap); if (error != 0) { device_printf(sc->malo_dev, "unable to create dmamap for tx " "buffer %u, error %u\n", i, error); return error; } STAILQ_INSERT_TAIL(&txq->free, bf, bf_list); txq->nfree++; } return 0; } static void malo_desc_cleanup(struct malo_softc *sc, struct malo_descdma *dd) { bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); bus_dma_tag_destroy(dd->dd_dmat); memset(dd, 0, sizeof(*dd)); } static void malo_rxdma_cleanup(struct malo_softc *sc) { struct malo_rxbuf *bf; STAILQ_FOREACH(bf, &sc->malo_rxbuf, bf_list) { if (bf->bf_m != NULL) { m_freem(bf->bf_m); bf->bf_m = NULL; } if (bf->bf_dmamap != NULL) { bus_dmamap_destroy(sc->malo_dmat, bf->bf_dmamap); bf->bf_dmamap = NULL; } } STAILQ_INIT(&sc->malo_rxbuf); if (sc->malo_rxdma.dd_bufptr != NULL) { free(sc->malo_rxdma.dd_bufptr, M_MALODEV); sc->malo_rxdma.dd_bufptr = NULL; } if (sc->malo_rxdma.dd_desc_len != 0) malo_desc_cleanup(sc, &sc->malo_rxdma); } static void malo_txdma_cleanup(struct malo_softc *sc, struct malo_txq *txq) { struct malo_txbuf *bf; struct ieee80211_node *ni; STAILQ_FOREACH(bf, &txq->free, bf_list) { if (bf->bf_m != NULL) { m_freem(bf->bf_m); bf->bf_m = NULL; } ni = bf->bf_node; bf->bf_node = NULL; if (ni != NULL) { /* * Reclaim node reference. */ ieee80211_free_node(ni); } if (bf->bf_dmamap != NULL) { bus_dmamap_destroy(sc->malo_dmat, bf->bf_dmamap); bf->bf_dmamap = NULL; } } STAILQ_INIT(&txq->free); txq->nfree = 0; if (txq->dma.dd_bufptr != NULL) { free(txq->dma.dd_bufptr, M_MALODEV); txq->dma.dd_bufptr = NULL; } if (txq->dma.dd_desc_len != 0) malo_desc_cleanup(sc, &txq->dma); } static void malo_dma_cleanup(struct malo_softc *sc) { int i; for (i = 0; i < MALO_NUM_TX_QUEUES; i++) malo_txdma_cleanup(sc, &sc->malo_txq[i]); malo_rxdma_cleanup(sc); } static int malo_dma_setup(struct malo_softc *sc) { int error, i; /* rxdma initializing. */ error = malo_rxdma_setup(sc); if (error != 0) return error; /* NB: we just have 1 tx queue now. */ for (i = 0; i < MALO_NUM_TX_QUEUES; i++) { error = malo_txdma_setup(sc, &sc->malo_txq[i]); if (error != 0) { malo_dma_cleanup(sc); return error; } malo_txq_init(sc, &sc->malo_txq[i], i); } return 0; } static void malo_hal_set_rxtxdma(struct malo_softc *sc) { int i; malo_bar0_write4(sc, sc->malo_hwspecs.rxdesc_read, sc->malo_hwdma.rxdesc_read); malo_bar0_write4(sc, sc->malo_hwspecs.rxdesc_write, sc->malo_hwdma.rxdesc_read); for (i = 0; i < MALO_NUM_TX_QUEUES; i++) { malo_bar0_write4(sc, sc->malo_hwspecs.wcbbase[i], sc->malo_hwdma.wcbbase[i]); } } /* * Inform firmware of our tx/rx dma setup. The BAR 0 writes below are * for compatibility with older firmware. For current firmware we send * this information with a cmd block via malo_hal_sethwdma. */ static int malo_setup_hwdma(struct malo_softc *sc) { int i; struct malo_txq *txq; sc->malo_hwdma.rxdesc_read = sc->malo_rxdma.dd_desc_paddr; for (i = 0; i < MALO_NUM_TX_QUEUES; i++) { txq = &sc->malo_txq[i]; sc->malo_hwdma.wcbbase[i] = txq->dma.dd_desc_paddr; } sc->malo_hwdma.maxnum_txwcb = malo_txbuf; sc->malo_hwdma.maxnum_wcb = MALO_NUM_TX_QUEUES; malo_hal_set_rxtxdma(sc); return 0; } static void malo_txq_init(struct malo_softc *sc, struct malo_txq *txq, int qnum) { struct malo_txbuf *bf, *bn; struct malo_txdesc *ds; MALO_TXQ_LOCK_INIT(sc, txq); txq->qnum = qnum; txq->txpri = 0; /* XXX */ STAILQ_FOREACH(bf, &txq->free, bf_list) { bf->bf_txq = txq; ds = bf->bf_desc; bn = STAILQ_NEXT(bf, bf_list); if (bn == NULL) bn = STAILQ_FIRST(&txq->free); ds->physnext = htole32(bn->bf_daddr); } STAILQ_INIT(&txq->active); } /* * Reclaim resources for a setup queue. */ static void malo_tx_cleanupq(struct malo_softc *sc, struct malo_txq *txq) { /* XXX hal work? */ MALO_TXQ_LOCK_DESTROY(txq); } /* * Allocate a tx buffer for sending a frame. */ static struct malo_txbuf * malo_getbuf(struct malo_softc *sc, struct malo_txq *txq) { struct malo_txbuf *bf; MALO_TXQ_LOCK(txq); bf = STAILQ_FIRST(&txq->free); if (bf != NULL) { STAILQ_REMOVE_HEAD(&txq->free, bf_list); txq->nfree--; } MALO_TXQ_UNLOCK(txq); if (bf == NULL) { DPRINTF(sc, MALO_DEBUG_XMIT, "%s: out of xmit buffers on q %d\n", __func__, txq->qnum); sc->malo_stats.mst_tx_qstop++; } return bf; } static int malo_tx_dmasetup(struct malo_softc *sc, struct malo_txbuf *bf, struct mbuf *m0) { struct mbuf *m; int error; /* * Load the DMA map so any coalescing is done. This also calculates * the number of descriptors we need. */ error = bus_dmamap_load_mbuf_sg(sc->malo_dmat, bf->bf_dmamap, m0, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); if (error == EFBIG) { /* XXX packet requires too many descriptors */ bf->bf_nseg = MALO_TXDESC + 1; } else if (error != 0) { sc->malo_stats.mst_tx_busdma++; m_freem(m0); return error; } /* * Discard null packets and check for packets that require too many * TX descriptors. We try to convert the latter to a cluster. */ if (error == EFBIG) { /* too many desc's, linearize */ sc->malo_stats.mst_tx_linear++; m = m_defrag(m0, M_NOWAIT); if (m == NULL) { m_freem(m0); sc->malo_stats.mst_tx_nombuf++; return ENOMEM; } m0 = m; error = bus_dmamap_load_mbuf_sg(sc->malo_dmat, bf->bf_dmamap, m0, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); if (error != 0) { sc->malo_stats.mst_tx_busdma++; m_freem(m0); return error; } KASSERT(bf->bf_nseg <= MALO_TXDESC, ("too many segments after defrag; nseg %u", bf->bf_nseg)); } else if (bf->bf_nseg == 0) { /* null packet, discard */ sc->malo_stats.mst_tx_nodata++; m_freem(m0); return EIO; } DPRINTF(sc, MALO_DEBUG_XMIT, "%s: m %p len %u\n", __func__, m0, m0->m_pkthdr.len); bus_dmamap_sync(sc->malo_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); bf->bf_m = m0; return 0; } #ifdef MALO_DEBUG static void malo_printrxbuf(const struct malo_rxbuf *bf, u_int ix) { const struct malo_rxdesc *ds = bf->bf_desc; uint32_t status = le32toh(ds->status); printf("R[%2u] (DS.V:%p DS.P:0x%jx) NEXT:%08x DATA:%08x RC:%02x%s\n" " STAT:%02x LEN:%04x SNR:%02x NF:%02x CHAN:%02x" " RATE:%02x QOS:%04x\n", ix, ds, (uintmax_t)bf->bf_daddr, le32toh(ds->physnext), le32toh(ds->physbuffdata), ds->rxcontrol, ds->rxcontrol != MALO_RXD_CTRL_DRIVER_OWN ? "" : (status & MALO_RXD_STATUS_OK) ? " *" : " !", ds->status, le16toh(ds->pktlen), ds->snr, ds->nf, ds->channel, ds->rate, le16toh(ds->qosctrl)); } static void malo_printtxbuf(const struct malo_txbuf *bf, u_int qnum, u_int ix) { const struct malo_txdesc *ds = bf->bf_desc; uint32_t status = le32toh(ds->status); printf("Q%u[%3u]", qnum, ix); printf(" (DS.V:%p DS.P:0x%jx)\n", ds, (uintmax_t)bf->bf_daddr); printf(" NEXT:%08x DATA:%08x LEN:%04x STAT:%08x%s\n", le32toh(ds->physnext), le32toh(ds->pktptr), le16toh(ds->pktlen), status, status & MALO_TXD_STATUS_USED ? "" : (status & 3) != 0 ? " *" : " !"); printf(" RATE:%02x PRI:%x QOS:%04x SAP:%08x FORMAT:%04x\n", ds->datarate, ds->txpriority, le16toh(ds->qosctrl), le32toh(ds->sap_pktinfo), le16toh(ds->format)); #if 0 { const uint8_t *cp = (const uint8_t *) ds; int i; for (i = 0; i < sizeof(struct malo_txdesc); i++) { printf("%02x ", cp[i]); if (((i+1) % 16) == 0) printf("\n"); } printf("\n"); } #endif } #endif /* MALO_DEBUG */ static __inline void malo_updatetxrate(struct ieee80211_node *ni, int rix) { static const int ieeerates[] = { 2, 4, 11, 22, 44, 12, 18, 24, 36, 48, 96, 108 }; if (rix < nitems(ieeerates)) ni->ni_txrate = ieeerates[rix]; } static int malo_fix2rate(int fix_rate) { static const int rates[] = { 2, 4, 11, 22, 12, 18, 24, 36, 48, 96, 108 }; return (fix_rate < nitems(rates) ? rates[fix_rate] : 0); } /* idiomatic shorthands: MS = mask+shift, SM = shift+mask */ #define MS(v,x) (((v) & x) >> x##_S) #define SM(v,x) (((v) << x##_S) & x) /* * Process completed xmit descriptors from the specified queue. */ static int malo_tx_processq(struct malo_softc *sc, struct malo_txq *txq) { struct malo_txbuf *bf; struct malo_txdesc *ds; struct ieee80211_node *ni; int nreaped; uint32_t status; DPRINTF(sc, MALO_DEBUG_TX_PROC, "%s: tx queue %u\n", __func__, txq->qnum); for (nreaped = 0;; nreaped++) { MALO_TXQ_LOCK(txq); bf = STAILQ_FIRST(&txq->active); if (bf == NULL) { MALO_TXQ_UNLOCK(txq); break; } ds = bf->bf_desc; MALO_TXDESC_SYNC(txq, ds, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (ds->status & htole32(MALO_TXD_STATUS_FW_OWNED)) { MALO_TXQ_UNLOCK(txq); break; } STAILQ_REMOVE_HEAD(&txq->active, bf_list); MALO_TXQ_UNLOCK(txq); #ifdef MALO_DEBUG if (sc->malo_debug & MALO_DEBUG_XMIT_DESC) malo_printtxbuf(bf, txq->qnum, nreaped); #endif ni = bf->bf_node; if (ni != NULL) { status = le32toh(ds->status); if (status & MALO_TXD_STATUS_OK) { uint16_t format = le16toh(ds->format); uint8_t txant = MS(format, MALO_TXD_ANTENNA); sc->malo_stats.mst_ant_tx[txant]++; if (status & MALO_TXD_STATUS_OK_RETRY) sc->malo_stats.mst_tx_retries++; if (status & MALO_TXD_STATUS_OK_MORE_RETRY) sc->malo_stats.mst_tx_mretries++; malo_updatetxrate(ni, ds->datarate); sc->malo_stats.mst_tx_rate = ds->datarate; } else { if (status & MALO_TXD_STATUS_FAILED_LINK_ERROR) sc->malo_stats.mst_tx_linkerror++; if (status & MALO_TXD_STATUS_FAILED_XRETRY) sc->malo_stats.mst_tx_xretries++; if (status & MALO_TXD_STATUS_FAILED_AGING) sc->malo_stats.mst_tx_aging++; } /* XXX strip fw len in case header inspected */ m_adj(bf->bf_m, sizeof(uint16_t)); ieee80211_tx_complete(ni, bf->bf_m, (status & MALO_TXD_STATUS_OK) == 0); } else m_freem(bf->bf_m); ds->status = htole32(MALO_TXD_STATUS_IDLE); ds->pktlen = htole32(0); bus_dmamap_sync(sc->malo_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->malo_dmat, bf->bf_dmamap); bf->bf_m = NULL; bf->bf_node = NULL; MALO_TXQ_LOCK(txq); STAILQ_INSERT_TAIL(&txq->free, bf, bf_list); txq->nfree++; MALO_TXQ_UNLOCK(txq); } return nreaped; } /* * Deferred processing of transmit interrupt. */ static void malo_tx_proc(void *arg, int npending) { struct malo_softc *sc = arg; int i, nreaped; /* * Process each active queue. */ nreaped = 0; MALO_LOCK(sc); for (i = 0; i < MALO_NUM_TX_QUEUES; i++) { if (!STAILQ_EMPTY(&sc->malo_txq[i].active)) nreaped += malo_tx_processq(sc, &sc->malo_txq[i]); } if (nreaped != 0) { sc->malo_timer = 0; malo_start(sc); } MALO_UNLOCK(sc); } static int malo_tx_start(struct malo_softc *sc, struct ieee80211_node *ni, struct malo_txbuf *bf, struct mbuf *m0) { #define IS_DATA_FRAME(wh) \ ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK)) == IEEE80211_FC0_TYPE_DATA) int error, ismcast, iswep; int copyhdrlen, hdrlen, pktlen; struct ieee80211_frame *wh; struct ieee80211com *ic = &sc->malo_ic; struct ieee80211vap *vap = ni->ni_vap; struct malo_txdesc *ds; struct malo_txrec *tr; struct malo_txq *txq; uint16_t qos; wh = mtod(m0, struct ieee80211_frame *); iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED; ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); copyhdrlen = hdrlen = ieee80211_anyhdrsize(wh); pktlen = m0->m_pkthdr.len; if (IEEE80211_QOS_HAS_SEQ(wh)) { qos = *(uint16_t *)ieee80211_getqos(wh); if (IEEE80211_IS_DSTODS(wh)) copyhdrlen -= sizeof(qos); } else qos = 0; if (iswep) { struct ieee80211_key *k; /* * Construct the 802.11 header+trailer for an encrypted * frame. The only reason this can fail is because of an * unknown or unsupported cipher/key type. * * NB: we do this even though the firmware will ignore * what we've done for WEP and TKIP as we need the * ExtIV filled in for CCMP and this also adjusts * the headers which simplifies our work below. */ k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { /* * This can happen when the key is yanked after the * frame was queued. Just discard the frame; the * 802.11 layer counts failures and provides * debugging/diagnostics. */ m_freem(m0); return EIO; } /* * Adjust the packet length for the crypto additions * done during encap and any other bits that the f/w * will add later on. */ pktlen = m0->m_pkthdr.len; /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } if (ieee80211_radiotap_active_vap(vap)) { sc->malo_tx_th.wt_flags = 0; /* XXX */ if (iswep) sc->malo_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; sc->malo_tx_th.wt_txpower = ni->ni_txpower; sc->malo_tx_th.wt_antenna = sc->malo_txantenna; ieee80211_radiotap_tx(vap, m0); } /* * Copy up/down the 802.11 header; the firmware requires * we present a 2-byte payload length followed by a * 4-address header (w/o QoS), followed (optionally) by * any WEP/ExtIV header (but only filled in for CCMP). * We are assured the mbuf has sufficient headroom to * prepend in-place by the setup of ic_headroom in * malo_attach. */ if (hdrlen < sizeof(struct malo_txrec)) { const int space = sizeof(struct malo_txrec) - hdrlen; if (M_LEADINGSPACE(m0) < space) { /* NB: should never happen */ device_printf(sc->malo_dev, "not enough headroom, need %d found %zd, " "m_flags 0x%x m_len %d\n", space, M_LEADINGSPACE(m0), m0->m_flags, m0->m_len); ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len, 0, -1); m_freem(m0); /* XXX stat */ return EIO; } M_PREPEND(m0, space, M_NOWAIT); } tr = mtod(m0, struct malo_txrec *); if (wh != (struct ieee80211_frame *) &tr->wh) ovbcopy(wh, &tr->wh, hdrlen); /* * Note: the "firmware length" is actually the length of the fully * formed "802.11 payload". That is, it's everything except for * the 802.11 header. In particular this includes all crypto * material including the MIC! */ tr->fwlen = htole16(pktlen - hdrlen); /* * Load the DMA map so any coalescing is done. This * also calculates the number of descriptors we need. */ error = malo_tx_dmasetup(sc, bf, m0); if (error != 0) return error; bf->bf_node = ni; /* NB: held reference */ m0 = bf->bf_m; /* NB: may have changed */ tr = mtod(m0, struct malo_txrec *); wh = (struct ieee80211_frame *)&tr->wh; /* * Formulate tx descriptor. */ ds = bf->bf_desc; txq = bf->bf_txq; ds->qosctrl = qos; /* NB: already little-endian */ ds->pktptr = htole32(bf->bf_segs[0].ds_addr); ds->pktlen = htole16(bf->bf_segs[0].ds_len); /* NB: pPhysNext setup once, don't touch */ ds->datarate = IS_DATA_FRAME(wh) ? 1 : 0; ds->sap_pktinfo = 0; ds->format = 0; /* * Select transmit rate. */ switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { case IEEE80211_FC0_TYPE_MGT: sc->malo_stats.mst_tx_mgmt++; /* fall thru... */ case IEEE80211_FC0_TYPE_CTL: ds->txpriority = 1; break; case IEEE80211_FC0_TYPE_DATA: ds->txpriority = txq->qnum; break; default: device_printf(sc->malo_dev, "bogus frame type 0x%x (%s)\n", wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__); /* XXX statistic */ m_freem(m0); return EIO; } #ifdef MALO_DEBUG if (IFF_DUMPPKTS_XMIT(sc)) ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *)+sizeof(uint16_t), m0->m_len - sizeof(uint16_t), ds->datarate, -1); #endif MALO_TXQ_LOCK(txq); if (!IS_DATA_FRAME(wh)) ds->status |= htole32(1); ds->status |= htole32(MALO_TXD_STATUS_FW_OWNED); STAILQ_INSERT_TAIL(&txq->active, bf, bf_list); MALO_TXDESC_SYNC(txq, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); sc->malo_timer = 5; MALO_TXQ_UNLOCK(txq); return 0; } static int malo_transmit(struct ieee80211com *ic, struct mbuf *m) { struct malo_softc *sc = ic->ic_softc; int error; MALO_LOCK(sc); if (!sc->malo_running) { MALO_UNLOCK(sc); return (ENXIO); } error = mbufq_enqueue(&sc->malo_snd, m); if (error) { MALO_UNLOCK(sc); return (error); } malo_start(sc); MALO_UNLOCK(sc); return (0); } static void malo_start(struct malo_softc *sc) { struct ieee80211_node *ni; struct malo_txq *txq = &sc->malo_txq[0]; struct malo_txbuf *bf = NULL; struct mbuf *m; int nqueued = 0; MALO_LOCK_ASSERT(sc); if (!sc->malo_running || sc->malo_invalid) return; while ((m = mbufq_dequeue(&sc->malo_snd)) != NULL) { ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; bf = malo_getbuf(sc, txq); if (bf == NULL) { mbufq_prepend(&sc->malo_snd, m); sc->malo_stats.mst_tx_qstop++; break; } /* * Pass the frame to the h/w for transmission. */ if (malo_tx_start(sc, ni, bf, m)) { if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); if (bf != NULL) { bf->bf_m = NULL; bf->bf_node = NULL; MALO_TXQ_LOCK(txq); STAILQ_INSERT_HEAD(&txq->free, bf, bf_list); MALO_TXQ_UNLOCK(txq); } ieee80211_free_node(ni); continue; } nqueued++; if (nqueued >= malo_txcoalesce) { /* * Poke the firmware to process queued frames; * see below about (lack of) locking. */ nqueued = 0; malo_hal_txstart(sc->malo_mh, 0/*XXX*/); } } if (nqueued) { /* * NB: We don't need to lock against tx done because * this just prods the firmware to check the transmit * descriptors. The firmware will also start fetching * descriptors by itself if it notices new ones are * present when it goes to deliver a tx done interrupt * to the host. So if we race with tx done processing * it's ok. Delivering the kick here rather than in * malo_tx_start is an optimization to avoid poking the * firmware for each packet. * * NB: the queue id isn't used so 0 is ok. */ malo_hal_txstart(sc->malo_mh, 0/*XXX*/); } } static void malo_watchdog(void *arg) { struct malo_softc *sc = arg; callout_reset(&sc->malo_watchdog_timer, hz, malo_watchdog, sc); if (sc->malo_timer == 0 || --sc->malo_timer > 0) return; if (sc->malo_running && !sc->malo_invalid) { device_printf(sc->malo_dev, "watchdog timeout\n"); /* XXX no way to reset h/w. now */ counter_u64_add(sc->malo_ic.ic_oerrors, 1); sc->malo_stats.mst_watchdog++; } } static int malo_hal_reset(struct malo_softc *sc) { static int first = 0; struct ieee80211com *ic = &sc->malo_ic; struct malo_hal *mh = sc->malo_mh; if (first == 0) { /* * NB: when the device firstly is initialized, sometimes * firmware could override rx/tx dma registers so we re-set * these values once. */ malo_hal_set_rxtxdma(sc); first = 1; } malo_hal_setantenna(mh, MHA_ANTENNATYPE_RX, sc->malo_rxantenna); malo_hal_setantenna(mh, MHA_ANTENNATYPE_TX, sc->malo_txantenna); malo_hal_setradio(mh, 1, MHP_AUTO_PREAMBLE); malo_chan_set(sc, ic->ic_curchan); /* XXX needs other stuffs? */ return 1; } static __inline struct mbuf * malo_getrxmbuf(struct malo_softc *sc, struct malo_rxbuf *bf) { struct mbuf *m; bus_addr_t paddr; int error; /* XXX don't need mbuf, just dma buffer */ m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); if (m == NULL) { sc->malo_stats.mst_rx_nombuf++; /* XXX */ return NULL; } error = bus_dmamap_load(sc->malo_dmat, bf->bf_dmamap, mtod(m, caddr_t), MJUMPAGESIZE, malo_load_cb, &paddr, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->malo_dev, "%s: bus_dmamap_load failed, error %d\n", __func__, error); m_freem(m); return NULL; } bf->bf_data = paddr; bus_dmamap_sync(sc->malo_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); return m; } static int malo_rxbuf_init(struct malo_softc *sc, struct malo_rxbuf *bf) { struct malo_rxdesc *ds; ds = bf->bf_desc; if (bf->bf_m == NULL) { bf->bf_m = malo_getrxmbuf(sc, bf); if (bf->bf_m == NULL) { /* mark descriptor to be skipped */ ds->rxcontrol = MALO_RXD_CTRL_OS_OWN; /* NB: don't need PREREAD */ MALO_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREWRITE); return ENOMEM; } } /* * Setup descriptor. */ ds->qosctrl = 0; ds->snr = 0; ds->status = MALO_RXD_STATUS_IDLE; ds->channel = 0; ds->pktlen = htole16(MALO_RXSIZE); ds->nf = 0; ds->physbuffdata = htole32(bf->bf_data); /* NB: don't touch pPhysNext, set once */ ds->rxcontrol = MALO_RXD_CTRL_DRIVER_OWN; MALO_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return 0; } /* * Setup the rx data structures. This should only be done once or we may get * out of sync with the firmware. */ static int malo_startrecv(struct malo_softc *sc) { struct malo_rxbuf *bf, *prev; struct malo_rxdesc *ds; if (sc->malo_recvsetup == 1) { malo_mode_init(sc); /* set filters, etc. */ return 0; } prev = NULL; STAILQ_FOREACH(bf, &sc->malo_rxbuf, bf_list) { int error = malo_rxbuf_init(sc, bf); if (error != 0) { DPRINTF(sc, MALO_DEBUG_RECV, "%s: malo_rxbuf_init failed %d\n", __func__, error); return error; } if (prev != NULL) { ds = prev->bf_desc; ds->physnext = htole32(bf->bf_daddr); } prev = bf; } if (prev != NULL) { ds = prev->bf_desc; ds->physnext = htole32(STAILQ_FIRST(&sc->malo_rxbuf)->bf_daddr); } sc->malo_recvsetup = 1; malo_mode_init(sc); /* set filters, etc. */ return 0; } static void malo_init_locked(struct malo_softc *sc) { struct malo_hal *mh = sc->malo_mh; int error; MALO_LOCK_ASSERT(sc); /* * Stop anything previously setup. This is safe whether this is * the first time through or not. */ malo_stop(sc); /* * Push state to the firmware. */ if (!malo_hal_reset(sc)) { device_printf(sc->malo_dev, "%s: unable to reset hardware\n", __func__); return; } /* * Setup recv (once); transmit is already good to go. */ error = malo_startrecv(sc); if (error != 0) { device_printf(sc->malo_dev, "%s: unable to start recv logic, error %d\n", __func__, error); return; } /* * Enable interrupts. */ sc->malo_imask = MALO_A2HRIC_BIT_RX_RDY | MALO_A2HRIC_BIT_TX_DONE | MALO_A2HRIC_BIT_OPC_DONE | MALO_A2HRIC_BIT_MAC_EVENT | MALO_A2HRIC_BIT_RX_PROBLEM | MALO_A2HRIC_BIT_ICV_ERROR | MALO_A2HRIC_BIT_RADAR_DETECT | MALO_A2HRIC_BIT_CHAN_SWITCH; sc->malo_running = 1; malo_hal_intrset(mh, sc->malo_imask); callout_reset(&sc->malo_watchdog_timer, hz, malo_watchdog, sc); } static void malo_init(void *arg) { struct malo_softc *sc = (struct malo_softc *) arg; struct ieee80211com *ic = &sc->malo_ic; MALO_LOCK(sc); malo_init_locked(sc); MALO_UNLOCK(sc); if (sc->malo_running) ieee80211_start_all(ic); /* start all vap's */ } struct malo_copy_maddr_ctx { uint8_t macs[IEEE80211_ADDR_LEN * MALO_HAL_MCAST_MAX]; int nmc; }; static u_int malo_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int nmc) { struct malo_copy_maddr_ctx *ctx = arg; if (ctx->nmc == MALO_HAL_MCAST_MAX) return (0); IEEE80211_ADDR_COPY(ctx->macs + (ctx->nmc * IEEE80211_ADDR_LEN), LLADDR(sdl)); ctx->nmc++; return (1); } /* * Set the multicast filter contents into the hardware. */ static void malo_setmcastfilter(struct malo_softc *sc) { struct malo_copy_maddr_ctx ctx; struct ieee80211com *ic = &sc->malo_ic; struct ieee80211vap *vap; if (ic->ic_opmode == IEEE80211_M_MONITOR || ic->ic_allmulti > 0 || ic->ic_promisc > 0) goto all; ctx.nmc = 0; TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) if_foreach_llmaddr(vap->iv_ifp, malo_copy_maddr, &ctx); malo_hal_setmcast(sc->malo_mh, ctx.nmc, ctx.macs); all: /* * XXX we don't know how to set the f/w for supporting * IFF_ALLMULTI | IFF_PROMISC cases */ return; } static int malo_mode_init(struct malo_softc *sc) { struct ieee80211com *ic = &sc->malo_ic; struct malo_hal *mh = sc->malo_mh; malo_hal_setpromisc(mh, ic->ic_promisc > 0); malo_setmcastfilter(sc); return ENXIO; } static void malo_tx_draintxq(struct malo_softc *sc, struct malo_txq *txq) { struct ieee80211_node *ni; struct malo_txbuf *bf; u_int ix; /* * NB: this assumes output has been stopped and * we do not need to block malo_tx_tasklet */ for (ix = 0;; ix++) { MALO_TXQ_LOCK(txq); bf = STAILQ_FIRST(&txq->active); if (bf == NULL) { MALO_TXQ_UNLOCK(txq); break; } STAILQ_REMOVE_HEAD(&txq->active, bf_list); MALO_TXQ_UNLOCK(txq); #ifdef MALO_DEBUG if (sc->malo_debug & MALO_DEBUG_RESET) { struct ieee80211com *ic = &sc->malo_ic; const struct malo_txrec *tr = mtod(bf->bf_m, const struct malo_txrec *); malo_printtxbuf(bf, txq->qnum, ix); ieee80211_dump_pkt(ic, (const uint8_t *)&tr->wh, bf->bf_m->m_len - sizeof(tr->fwlen), 0, -1); } #endif /* MALO_DEBUG */ bus_dmamap_unload(sc->malo_dmat, bf->bf_dmamap); ni = bf->bf_node; bf->bf_node = NULL; if (ni != NULL) { /* * Reclaim node reference. */ ieee80211_free_node(ni); } m_freem(bf->bf_m); bf->bf_m = NULL; MALO_TXQ_LOCK(txq); STAILQ_INSERT_TAIL(&txq->free, bf, bf_list); txq->nfree++; MALO_TXQ_UNLOCK(txq); } } static void malo_stop(struct malo_softc *sc) { struct malo_hal *mh = sc->malo_mh; int i; DPRINTF(sc, MALO_DEBUG_ANY, "%s: invalid %u running %u\n", __func__, sc->malo_invalid, sc->malo_running); MALO_LOCK_ASSERT(sc); if (!sc->malo_running) return; /* * Shutdown the hardware and driver: * disable interrupts * turn off the radio * drain and release tx queues * * Note that some of this work is not possible if the hardware * is gone (invalid). */ sc->malo_running = 0; callout_stop(&sc->malo_watchdog_timer); sc->malo_timer = 0; /* disable interrupt. */ malo_hal_intrset(mh, 0); /* turn off the radio. */ malo_hal_setradio(mh, 0, MHP_AUTO_PREAMBLE); /* drain and release tx queues. */ for (i = 0; i < MALO_NUM_TX_QUEUES; i++) malo_tx_draintxq(sc, &sc->malo_txq[i]); } static void malo_parent(struct ieee80211com *ic) { struct malo_softc *sc = ic->ic_softc; int startall = 0; MALO_LOCK(sc); if (ic->ic_nrunning > 0) { /* * Beware of being called during attach/detach * to reset promiscuous mode. In that case we * will still be marked UP but not RUNNING. * However trying to re-init the interface * is the wrong thing to do as we've already * torn down much of our state. There's * probably a better way to deal with this. */ if (!sc->malo_running && !sc->malo_invalid) { malo_init(sc); startall = 1; } /* * To avoid rescanning another access point, * do not call malo_init() here. Instead, * only reflect promisc mode settings. */ malo_mode_init(sc); } else if (sc->malo_running) malo_stop(sc); MALO_UNLOCK(sc); if (startall) ieee80211_start_all(ic); } /* * Callback from the 802.11 layer to update the slot time * based on the current setting. We use it to notify the * firmware of ERP changes and the f/w takes care of things * like slot time and preamble. */ static void malo_updateslot(struct ieee80211com *ic) { struct malo_softc *sc = ic->ic_softc; struct malo_hal *mh = sc->malo_mh; int error; /* NB: can be called early; suppress needless cmds */ if (!sc->malo_running) return; DPRINTF(sc, MALO_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x %s slot, (ic_flags 0x%x)\n", __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags, ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", ic->ic_flags); if (ic->ic_flags & IEEE80211_F_SHSLOT) error = malo_hal_set_slot(mh, 1); else error = malo_hal_set_slot(mh, 0); if (error != 0) device_printf(sc->malo_dev, "setting %s slot failed\n", ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long"); } static int malo_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct ieee80211com *ic = vap->iv_ic; struct malo_softc *sc = ic->ic_softc; struct malo_hal *mh = sc->malo_mh; int error; DPRINTF(sc, MALO_DEBUG_STATE, "%s: %s -> %s\n", __func__, ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]); /* * Invoke the net80211 layer first so iv_bss is setup. */ error = MALO_VAP(vap)->malo_newstate(vap, nstate, arg); if (error != 0) return error; if (nstate == IEEE80211_S_RUN && vap->iv_state != IEEE80211_S_RUN) { struct ieee80211_node *ni = vap->iv_bss; enum ieee80211_phymode mode = ieee80211_chan2mode(ni->ni_chan); const struct ieee80211_txparam *tp = &vap->iv_txparms[mode]; DPRINTF(sc, MALO_DEBUG_STATE, "%s: %s(RUN): iv_flags 0x%08x bintvl %d bssid %s " "capinfo 0x%04x chan %d associd 0x%x mode %d rate %d\n", vap->iv_ifp->if_xname, __func__, vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid), ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan), ni->ni_associd, mode, tp->ucastrate); malo_hal_setradio(mh, 1, (ic->ic_flags & IEEE80211_F_SHPREAMBLE) ? MHP_SHORT_PREAMBLE : MHP_LONG_PREAMBLE); malo_hal_setassocid(sc->malo_mh, ni->ni_bssid, ni->ni_associd); malo_hal_set_rate(mh, mode, tp->ucastrate == IEEE80211_FIXED_RATE_NONE ? 0 : malo_fix2rate(tp->ucastrate)); } return 0; } static int malo_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct malo_softc *sc = ic->ic_softc; struct malo_txbuf *bf; struct malo_txq *txq; if (!sc->malo_running || sc->malo_invalid) { m_freem(m); return ENETDOWN; } /* * Grab a TX buffer and associated resources. Note that we depend * on the classification by the 802.11 layer to get to the right h/w * queue. Management frames must ALWAYS go on queue 1 but we * cannot just force that here because we may receive non-mgt frames. */ txq = &sc->malo_txq[0]; bf = malo_getbuf(sc, txq); if (bf == NULL) { m_freem(m); return ENOBUFS; } /* * Pass the frame to the h/w for transmission. */ if (malo_tx_start(sc, ni, bf, m) != 0) { bf->bf_m = NULL; bf->bf_node = NULL; MALO_TXQ_LOCK(txq); STAILQ_INSERT_HEAD(&txq->free, bf, bf_list); txq->nfree++; MALO_TXQ_UNLOCK(txq); return EIO; /* XXX */ } /* * NB: We don't need to lock against tx done because this just * prods the firmware to check the transmit descriptors. The firmware * will also start fetching descriptors by itself if it notices * new ones are present when it goes to deliver a tx done interrupt * to the host. So if we race with tx done processing it's ok. * Delivering the kick here rather than in malo_tx_start is * an optimization to avoid poking the firmware for each packet. * * NB: the queue id isn't used so 0 is ok. */ malo_hal_txstart(sc->malo_mh, 0/*XXX*/); return 0; } static void malo_sysctlattach(struct malo_softc *sc) { #ifdef MALO_DEBUG struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->malo_dev); struct sysctl_oid *tree = device_get_sysctl_tree(sc->malo_dev); sc->malo_debug = malo_debug; SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug", CTLFLAG_RW, &sc->malo_debug, 0, "control debugging printfs"); #endif } static void malo_announce(struct malo_softc *sc) { device_printf(sc->malo_dev, "versions [hw %d fw %d.%d.%d.%d] (regioncode %d)\n", sc->malo_hwspecs.hwversion, (sc->malo_hwspecs.fw_releasenum >> 24) & 0xff, (sc->malo_hwspecs.fw_releasenum >> 16) & 0xff, (sc->malo_hwspecs.fw_releasenum >> 8) & 0xff, (sc->malo_hwspecs.fw_releasenum >> 0) & 0xff, sc->malo_hwspecs.regioncode); if (bootverbose || malo_rxbuf != MALO_RXBUF) device_printf(sc->malo_dev, "using %u rx buffers\n", malo_rxbuf); if (bootverbose || malo_txbuf != MALO_TXBUF) device_printf(sc->malo_dev, "using %u tx buffers\n", malo_txbuf); } /* * Convert net80211 channel to a HAL channel. */ static void malo_mapchan(struct malo_hal_channel *hc, const struct ieee80211_channel *chan) { hc->channel = chan->ic_ieee; *(uint32_t *)&hc->flags = 0; if (IEEE80211_IS_CHAN_2GHZ(chan)) hc->flags.freqband = MALO_FREQ_BAND_2DOT4GHZ; } /* * Set/change channels. If the channel is really being changed, * it's done by reseting the chip. To accomplish this we must * first cleanup any pending DMA, then restart stuff after a la * malo_init. */ static int malo_chan_set(struct malo_softc *sc, struct ieee80211_channel *chan) { struct malo_hal *mh = sc->malo_mh; struct malo_hal_channel hchan; DPRINTF(sc, MALO_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x\n", __func__, chan->ic_freq, chan->ic_flags); /* * Convert to a HAL channel description with the flags constrained * to reflect the current operating mode. */ malo_mapchan(&hchan, chan); malo_hal_intrset(mh, 0); /* disable interrupts */ malo_hal_setchannel(mh, &hchan); malo_hal_settxpower(mh, &hchan); /* * Update internal state. */ sc->malo_tx_th.wt_chan_freq = htole16(chan->ic_freq); sc->malo_rx_th.wr_chan_freq = htole16(chan->ic_freq); if (IEEE80211_IS_CHAN_ANYG(chan)) { sc->malo_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_G); sc->malo_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_G); } else { sc->malo_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_B); sc->malo_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_B); } sc->malo_curchan = hchan; malo_hal_intrset(mh, sc->malo_imask); return 0; } static void malo_scan_start(struct ieee80211com *ic) { struct malo_softc *sc = ic->ic_softc; DPRINTF(sc, MALO_DEBUG_STATE, "%s\n", __func__); } static void malo_scan_end(struct ieee80211com *ic) { struct malo_softc *sc = ic->ic_softc; DPRINTF(sc, MALO_DEBUG_STATE, "%s\n", __func__); } static void malo_set_channel(struct ieee80211com *ic) { struct malo_softc *sc = ic->ic_softc; (void) malo_chan_set(sc, ic->ic_curchan); } static void malo_rx_proc(void *arg, int npending) { struct epoch_tracker et; struct malo_softc *sc = arg; struct ieee80211com *ic = &sc->malo_ic; struct malo_rxbuf *bf; struct malo_rxdesc *ds; struct mbuf *m, *mnew; struct ieee80211_qosframe *wh; struct ieee80211_node *ni; int off, len, hdrlen, pktlen, rssi, ntodo; uint8_t *data, status; uint32_t readptr, writeptr; DPRINTF(sc, MALO_DEBUG_RX_PROC, "%s: pending %u rdptr(0x%x) 0x%x wrptr(0x%x) 0x%x\n", __func__, npending, sc->malo_hwspecs.rxdesc_read, malo_bar0_read4(sc, sc->malo_hwspecs.rxdesc_read), sc->malo_hwspecs.rxdesc_write, malo_bar0_read4(sc, sc->malo_hwspecs.rxdesc_write)); readptr = malo_bar0_read4(sc, sc->malo_hwspecs.rxdesc_read); writeptr = malo_bar0_read4(sc, sc->malo_hwspecs.rxdesc_write); if (readptr == writeptr) return; bf = sc->malo_rxnext; for (ntodo = malo_rxquota; ntodo > 0 && readptr != writeptr; ntodo--) { if (bf == NULL) { bf = STAILQ_FIRST(&sc->malo_rxbuf); break; } ds = bf->bf_desc; if (bf->bf_m == NULL) { /* * If data allocation failed previously there * will be no buffer; try again to re-populate it. * Note the firmware will not advance to the next * descriptor with a dma buffer so we must mimic * this or we'll get out of sync. */ DPRINTF(sc, MALO_DEBUG_ANY, "%s: rx buf w/o dma memory\n", __func__); (void)malo_rxbuf_init(sc, bf); break; } MALO_RXDESC_SYNC(sc, ds, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (ds->rxcontrol != MALO_RXD_CTRL_DMA_OWN) break; readptr = le32toh(ds->physnext); #ifdef MALO_DEBUG if (sc->malo_debug & MALO_DEBUG_RECV_DESC) malo_printrxbuf(bf, 0); #endif status = ds->status; if (status & MALO_RXD_STATUS_DECRYPT_ERR_MASK) { counter_u64_add(ic->ic_ierrors, 1); goto rx_next; } /* * Sync the data buffer. */ len = le16toh(ds->pktlen); bus_dmamap_sync(sc->malo_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTREAD); /* * The 802.11 header is provided all or in part at the front; * use it to calculate the true size of the header that we'll * construct below. We use this to figure out where to copy * payload prior to constructing the header. */ m = bf->bf_m; data = mtod(m, uint8_t *); hdrlen = ieee80211_anyhdrsize(data + sizeof(uint16_t)); off = sizeof(uint16_t) + sizeof(struct ieee80211_frame_addr4); /* * Calculate RSSI. XXX wrong */ rssi = 2 * ((int) ds->snr - ds->nf); /* NB: .5 dBm */ if (rssi > 100) rssi = 100; pktlen = hdrlen + (len - off); /* * NB: we know our frame is at least as large as * IEEE80211_MIN_LEN because there is a 4-address frame at * the front. Hence there's no need to vet the packet length. * If the frame in fact is too small it should be discarded * at the net80211 layer. */ /* XXX don't need mbuf, just dma buffer */ mnew = malo_getrxmbuf(sc, bf); if (mnew == NULL) { counter_u64_add(ic->ic_ierrors, 1); goto rx_next; } /* * Attach the dma buffer to the mbuf; malo_rxbuf_init will * re-setup the rx descriptor using the replacement dma * buffer we just installed above. */ bf->bf_m = mnew; m->m_data += off - hdrlen; m->m_pkthdr.len = m->m_len = pktlen; /* * Piece 802.11 header together. */ wh = mtod(m, struct ieee80211_qosframe *); /* NB: don't need to do this sometimes but ... */ /* XXX special case so we can memcpy after m_devget? */ ovbcopy(data + sizeof(uint16_t), wh, hdrlen); if (IEEE80211_QOS_HAS_SEQ(wh)) *(uint16_t *)ieee80211_getqos(wh) = ds->qosctrl; if (ieee80211_radiotap_active(ic)) { sc->malo_rx_th.wr_flags = 0; sc->malo_rx_th.wr_rate = ds->rate; sc->malo_rx_th.wr_antsignal = rssi; sc->malo_rx_th.wr_antnoise = ds->nf; } #ifdef MALO_DEBUG if (IFF_DUMPPKTS_RECV(sc, wh)) { ieee80211_dump_pkt(ic, mtod(m, caddr_t), len, ds->rate, rssi); } #endif /* dispatch */ ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); NET_EPOCH_ENTER(et); if (ni != NULL) { (void) ieee80211_input(ni, m, rssi, ds->nf); ieee80211_free_node(ni); } else (void) ieee80211_input_all(ic, m, rssi, ds->nf); NET_EPOCH_EXIT(et); rx_next: /* NB: ignore ENOMEM so we process more descriptors */ (void) malo_rxbuf_init(sc, bf); bf = STAILQ_NEXT(bf, bf_list); } malo_bar0_write4(sc, sc->malo_hwspecs.rxdesc_read, readptr); sc->malo_rxnext = bf; if (mbufq_first(&sc->malo_snd) != NULL) malo_start(sc); } /* * Reclaim all tx queue resources. */ static void malo_tx_cleanup(struct malo_softc *sc) { int i; for (i = 0; i < MALO_NUM_TX_QUEUES; i++) malo_tx_cleanupq(sc, &sc->malo_txq[i]); } int malo_detach(struct malo_softc *sc) { struct ieee80211com *ic = &sc->malo_ic; malo_stop(sc); if (sc->malo_tq != NULL) { taskqueue_drain(sc->malo_tq, &sc->malo_rxtask); taskqueue_drain(sc->malo_tq, &sc->malo_txtask); taskqueue_free(sc->malo_tq); sc->malo_tq = NULL; } /* * NB: the order of these is important: * o call the 802.11 layer before detaching the hal to * insure callbacks into the driver to delete global * key cache entries can be handled * o reclaim the tx queue data structures after calling * the 802.11 layer as we'll get called back to reclaim * node state and potentially want to use them * o to cleanup the tx queues the hal is called, so detach * it last * Other than that, it's straightforward... */ ieee80211_ifdetach(ic); callout_drain(&sc->malo_watchdog_timer); malo_dma_cleanup(sc); malo_tx_cleanup(sc); malo_hal_detach(sc->malo_mh); mbufq_drain(&sc->malo_snd); MALO_LOCK_DESTROY(sc); return 0; } void malo_shutdown(struct malo_softc *sc) { malo_stop(sc); } void malo_suspend(struct malo_softc *sc) { malo_stop(sc); } void malo_resume(struct malo_softc *sc) { if (sc->malo_ic.ic_nrunning > 0) malo_init(sc); } diff --git a/sys/dev/mwl/if_mwl.c b/sys/dev/mwl/if_mwl.c index 6e8e560ba663..9918366f2c75 100644 --- a/sys/dev/mwl/if_mwl.c +++ b/sys/dev/mwl/if_mwl.c @@ -1,4832 +1,4832 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2007-2009 Sam Leffler, Errno Consulting * Copyright (c) 2007-2008 Marvell Semiconductor, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); /* * Driver for the Marvell 88W8363 Wireless LAN controller. */ #include "opt_inet.h" #include "opt_mwl.h" #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #endif /* INET */ #include #include /* idiomatic shorthands: MS = mask+shift, SM = shift+mask */ #define MS(v,x) (((v) & x) >> x##_S) #define SM(v,x) (((v) << x##_S) & x) static struct ieee80211vap *mwl_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void mwl_vap_delete(struct ieee80211vap *); static int mwl_setupdma(struct mwl_softc *); static int mwl_hal_reset(struct mwl_softc *sc); static int mwl_init(struct mwl_softc *); static void mwl_parent(struct ieee80211com *); static int mwl_reset(struct ieee80211vap *, u_long); static void mwl_stop(struct mwl_softc *); static void mwl_start(struct mwl_softc *); static int mwl_transmit(struct ieee80211com *, struct mbuf *); static int mwl_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static int mwl_media_change(struct ifnet *); static void mwl_watchdog(void *); static int mwl_ioctl(struct ieee80211com *, u_long, void *); static void mwl_radar_proc(void *, int); static void mwl_chanswitch_proc(void *, int); static void mwl_bawatchdog_proc(void *, int); static int mwl_key_alloc(struct ieee80211vap *, struct ieee80211_key *, ieee80211_keyix *, ieee80211_keyix *); static int mwl_key_delete(struct ieee80211vap *, const struct ieee80211_key *); static int mwl_key_set(struct ieee80211vap *, const struct ieee80211_key *); static int _mwl_key_set(struct ieee80211vap *, const struct ieee80211_key *, const uint8_t mac[IEEE80211_ADDR_LEN]); static int mwl_mode_init(struct mwl_softc *); static void mwl_update_mcast(struct ieee80211com *); static void mwl_update_promisc(struct ieee80211com *); static void mwl_updateslot(struct ieee80211com *); static int mwl_beacon_setup(struct ieee80211vap *); static void mwl_beacon_update(struct ieee80211vap *, int); #ifdef MWL_HOST_PS_SUPPORT static void mwl_update_ps(struct ieee80211vap *, int); static int mwl_set_tim(struct ieee80211_node *, int); #endif static int mwl_dma_setup(struct mwl_softc *); static void mwl_dma_cleanup(struct mwl_softc *); static struct ieee80211_node *mwl_node_alloc(struct ieee80211vap *, const uint8_t [IEEE80211_ADDR_LEN]); static void mwl_node_cleanup(struct ieee80211_node *); static void mwl_node_drain(struct ieee80211_node *); static void mwl_node_getsignal(const struct ieee80211_node *, int8_t *, int8_t *); static void mwl_node_getmimoinfo(const struct ieee80211_node *, struct ieee80211_mimo_info *); static int mwl_rxbuf_init(struct mwl_softc *, struct mwl_rxbuf *); static void mwl_rx_proc(void *, int); static void mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *, int); static int mwl_tx_setup(struct mwl_softc *, int, int); static int mwl_wme_update(struct ieee80211com *); static void mwl_tx_cleanupq(struct mwl_softc *, struct mwl_txq *); static void mwl_tx_cleanup(struct mwl_softc *); static uint16_t mwl_calcformat(uint8_t rate, const struct ieee80211_node *); static int mwl_tx_start(struct mwl_softc *, struct ieee80211_node *, struct mwl_txbuf *, struct mbuf *); static void mwl_tx_proc(void *, int); static int mwl_chan_set(struct mwl_softc *, struct ieee80211_channel *); static void mwl_draintxq(struct mwl_softc *); static void mwl_cleartxq(struct mwl_softc *, struct ieee80211vap *); static int mwl_recv_action(struct ieee80211_node *, const struct ieee80211_frame *, const uint8_t *, const uint8_t *); static int mwl_addba_request(struct ieee80211_node *, struct ieee80211_tx_ampdu *, int dialogtoken, int baparamset, int batimeout); static int mwl_addba_response(struct ieee80211_node *, struct ieee80211_tx_ampdu *, int status, int baparamset, int batimeout); static void mwl_addba_stop(struct ieee80211_node *, struct ieee80211_tx_ampdu *); static int mwl_startrecv(struct mwl_softc *); static MWL_HAL_APMODE mwl_getapmode(const struct ieee80211vap *, struct ieee80211_channel *); static int mwl_setapmode(struct ieee80211vap *, struct ieee80211_channel*); static void mwl_scan_start(struct ieee80211com *); static void mwl_scan_end(struct ieee80211com *); static void mwl_set_channel(struct ieee80211com *); static int mwl_peerstadb(struct ieee80211_node *, int aid, int staid, MWL_HAL_PEERINFO *pi); static int mwl_localstadb(struct ieee80211vap *); static int mwl_newstate(struct ieee80211vap *, enum ieee80211_state, int); static int allocstaid(struct mwl_softc *sc, int aid); static void delstaid(struct mwl_softc *sc, int staid); static void mwl_newassoc(struct ieee80211_node *, int); static void mwl_agestations(void *); static int mwl_setregdomain(struct ieee80211com *, struct ieee80211_regdomain *, int, struct ieee80211_channel []); static void mwl_getradiocaps(struct ieee80211com *, int, int *, struct ieee80211_channel []); static int mwl_getchannels(struct mwl_softc *); static void mwl_sysctlattach(struct mwl_softc *); static void mwl_announce(struct mwl_softc *); SYSCTL_NODE(_hw, OID_AUTO, mwl, CTLFLAG_RD, 0, "Marvell driver parameters"); static int mwl_rxdesc = MWL_RXDESC; /* # rx desc's to allocate */ SYSCTL_INT(_hw_mwl, OID_AUTO, rxdesc, CTLFLAG_RW, &mwl_rxdesc, 0, "rx descriptors allocated"); static int mwl_rxbuf = MWL_RXBUF; /* # rx buffers to allocate */ SYSCTL_INT(_hw_mwl, OID_AUTO, rxbuf, CTLFLAG_RWTUN, &mwl_rxbuf, 0, "rx buffers allocated"); static int mwl_txbuf = MWL_TXBUF; /* # tx buffers to allocate */ SYSCTL_INT(_hw_mwl, OID_AUTO, txbuf, CTLFLAG_RWTUN, &mwl_txbuf, 0, "tx buffers allocated"); static int mwl_txcoalesce = 8; /* # tx packets to q before poking f/w*/ SYSCTL_INT(_hw_mwl, OID_AUTO, txcoalesce, CTLFLAG_RWTUN, &mwl_txcoalesce, 0, "tx buffers to send at once"); static int mwl_rxquota = MWL_RXBUF; /* # max buffers to process */ SYSCTL_INT(_hw_mwl, OID_AUTO, rxquota, CTLFLAG_RWTUN, &mwl_rxquota, 0, "max rx buffers to process per interrupt"); static int mwl_rxdmalow = 3; /* # min buffers for wakeup */ SYSCTL_INT(_hw_mwl, OID_AUTO, rxdmalow, CTLFLAG_RWTUN, &mwl_rxdmalow, 0, "min free rx buffers before restarting traffic"); #ifdef MWL_DEBUG static int mwl_debug = 0; SYSCTL_INT(_hw_mwl, OID_AUTO, debug, CTLFLAG_RWTUN, &mwl_debug, 0, "control debugging printfs"); enum { MWL_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ MWL_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */ MWL_DEBUG_RECV = 0x00000004, /* basic recv operation */ MWL_DEBUG_RECV_DESC = 0x00000008, /* recv descriptors */ MWL_DEBUG_RESET = 0x00000010, /* reset processing */ MWL_DEBUG_BEACON = 0x00000020, /* beacon handling */ MWL_DEBUG_INTR = 0x00000040, /* ISR */ MWL_DEBUG_TX_PROC = 0x00000080, /* tx ISR proc */ MWL_DEBUG_RX_PROC = 0x00000100, /* rx ISR proc */ MWL_DEBUG_KEYCACHE = 0x00000200, /* key cache management */ MWL_DEBUG_STATE = 0x00000400, /* 802.11 state transitions */ MWL_DEBUG_NODE = 0x00000800, /* node management */ MWL_DEBUG_RECV_ALL = 0x00001000, /* trace all frames (beacons) */ MWL_DEBUG_TSO = 0x00002000, /* TSO processing */ MWL_DEBUG_AMPDU = 0x00004000, /* BA stream handling */ MWL_DEBUG_ANY = 0xffffffff }; #define IS_BEACON(wh) \ ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK|IEEE80211_FC0_SUBTYPE_MASK)) == \ (IEEE80211_FC0_TYPE_MGT|IEEE80211_FC0_SUBTYPE_BEACON)) #define IFF_DUMPPKTS_RECV(sc, wh) \ ((sc->sc_debug & MWL_DEBUG_RECV) && \ ((sc->sc_debug & MWL_DEBUG_RECV_ALL) || !IS_BEACON(wh))) #define IFF_DUMPPKTS_XMIT(sc) \ (sc->sc_debug & MWL_DEBUG_XMIT) #define DPRINTF(sc, m, fmt, ...) do { \ if (sc->sc_debug & (m)) \ printf(fmt, __VA_ARGS__); \ } while (0) #define KEYPRINTF(sc, hk, mac) do { \ if (sc->sc_debug & MWL_DEBUG_KEYCACHE) \ mwl_keyprint(sc, __func__, hk, mac); \ } while (0) static void mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix); static void mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix); #else #define IFF_DUMPPKTS_RECV(sc, wh) 0 #define IFF_DUMPPKTS_XMIT(sc) 0 #define DPRINTF(sc, m, fmt, ...) do { (void )sc; } while (0) #define KEYPRINTF(sc, k, mac) do { (void )sc; } while (0) #endif static MALLOC_DEFINE(M_MWLDEV, "mwldev", "mwl driver dma buffers"); /* * Each packet has fixed front matter: a 2-byte length * of the payload, followed by a 4-address 802.11 header * (regardless of the actual header and always w/o any * QoS header). The payload then follows. */ struct mwltxrec { uint16_t fwlen; struct ieee80211_frame_addr4 wh; } __packed; /* * Read/Write shorthands for accesses to BAR 0. Note * that all BAR 1 operations are done in the "hal" and * there should be no reference to them here. */ #ifdef MWL_DEBUG static __inline uint32_t RD4(struct mwl_softc *sc, bus_size_t off) { return bus_space_read_4(sc->sc_io0t, sc->sc_io0h, off); } #endif static __inline void WR4(struct mwl_softc *sc, bus_size_t off, uint32_t val) { bus_space_write_4(sc->sc_io0t, sc->sc_io0h, off, val); } int mwl_attach(uint16_t devid, struct mwl_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct mwl_hal *mh; int error = 0; DPRINTF(sc, MWL_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid); /* * Setup the RX free list lock early, so it can be consistently * removed. */ MWL_RXFREE_INIT(sc); mh = mwl_hal_attach(sc->sc_dev, devid, sc->sc_io1h, sc->sc_io1t, sc->sc_dmat); if (mh == NULL) { device_printf(sc->sc_dev, "unable to attach HAL\n"); error = EIO; goto bad; } sc->sc_mh = mh; /* * Load firmware so we can get setup. We arbitrarily * pick station firmware; we'll re-load firmware as * needed so setting up the wrong mode isn't a big deal. */ if (mwl_hal_fwload(mh, NULL) != 0) { device_printf(sc->sc_dev, "unable to setup builtin firmware\n"); error = EIO; goto bad1; } if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) { device_printf(sc->sc_dev, "unable to fetch h/w specs\n"); error = EIO; goto bad1; } error = mwl_getchannels(sc); if (error != 0) goto bad1; sc->sc_txantenna = 0; /* h/w default */ sc->sc_rxantenna = 0; /* h/w default */ sc->sc_invalid = 0; /* ready to go, enable int handling */ sc->sc_ageinterval = MWL_AGEINTERVAL; /* * Allocate tx+rx descriptors and populate the lists. * We immediately push the information to the firmware * as otherwise it gets upset. */ error = mwl_dma_setup(sc); if (error != 0) { device_printf(sc->sc_dev, "failed to setup descriptors: %d\n", error); goto bad1; } error = mwl_setupdma(sc); /* push to firmware */ if (error != 0) /* NB: mwl_setupdma prints msg */ goto bad1; callout_init(&sc->sc_timer, 1); callout_init_mtx(&sc->sc_watchdog, &sc->sc_mtx, 0); mbufq_init(&sc->sc_snd, ifqmaxlen); sc->sc_tq = taskqueue_create("mwl_taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->sc_tq); taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->sc_dev)); - TASK_INIT(&sc->sc_rxtask, 0, mwl_rx_proc, sc); + NET_TASK_INIT(&sc->sc_rxtask, 0, mwl_rx_proc, sc); TASK_INIT(&sc->sc_radartask, 0, mwl_radar_proc, sc); TASK_INIT(&sc->sc_chanswitchtask, 0, mwl_chanswitch_proc, sc); TASK_INIT(&sc->sc_bawatchdogtask, 0, mwl_bawatchdog_proc, sc); /* NB: insure BK queue is the lowest priority h/w queue */ if (!mwl_tx_setup(sc, WME_AC_BK, MWL_WME_AC_BK)) { device_printf(sc->sc_dev, "unable to setup xmit queue for %s traffic!\n", ieee80211_wme_acnames[WME_AC_BK]); error = EIO; goto bad2; } if (!mwl_tx_setup(sc, WME_AC_BE, MWL_WME_AC_BE) || !mwl_tx_setup(sc, WME_AC_VI, MWL_WME_AC_VI) || !mwl_tx_setup(sc, WME_AC_VO, MWL_WME_AC_VO)) { /* * Not enough hardware tx queues to properly do WME; * just punt and assign them all to the same h/w queue. * We could do a better job of this if, for example, * we allocate queues when we switch from station to * AP mode. */ if (sc->sc_ac2q[WME_AC_VI] != NULL) mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]); if (sc->sc_ac2q[WME_AC_BE] != NULL) mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]); sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK]; sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK]; sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK]; } TASK_INIT(&sc->sc_txtask, 0, mwl_tx_proc, sc); ic->ic_softc = sc; ic->ic_name = device_get_nameunit(sc->sc_dev); /* XXX not right but it's not used anywhere important */ ic->ic_phytype = IEEE80211_T_OFDM; ic->ic_opmode = IEEE80211_M_STA; ic->ic_caps = IEEE80211_C_STA /* station mode supported */ | IEEE80211_C_HOSTAP /* hostap mode */ | IEEE80211_C_MONITOR /* monitor mode */ #if 0 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ | IEEE80211_C_AHDEMO /* adhoc demo mode */ #endif | IEEE80211_C_MBSS /* mesh point link mode */ | IEEE80211_C_WDS /* WDS supported */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_WME /* WME/WMM supported */ | IEEE80211_C_BURST /* xmit bursting supported */ | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ | IEEE80211_C_BGSCAN /* capable of bg scanning */ | IEEE80211_C_TXFRAG /* handle tx frags */ | IEEE80211_C_TXPMGT /* capable of txpow mgt */ | IEEE80211_C_DFS /* DFS supported */ ; ic->ic_htcaps = IEEE80211_HTCAP_SMPS_ENA /* SM PS mode enabled */ | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width */ | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */ | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */ | IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */ #if MWL_AGGR_SIZE == 7935 | IEEE80211_HTCAP_MAXAMSDU_7935 /* max A-MSDU length */ #else | IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */ #endif #if 0 | IEEE80211_HTCAP_PSMP /* PSMP supported */ | IEEE80211_HTCAP_40INTOLERANT /* 40MHz intolerant */ #endif /* s/w capabilities */ | IEEE80211_HTC_HT /* HT operation */ | IEEE80211_HTC_AMPDU /* tx A-MPDU */ | IEEE80211_HTC_AMSDU /* tx A-MSDU */ | IEEE80211_HTC_SMPS /* SMPS available */ ; /* * Mark h/w crypto support. * XXX no way to query h/w support. */ ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP | IEEE80211_CRYPTO_AES_CCM | IEEE80211_CRYPTO_TKIP | IEEE80211_CRYPTO_TKIPMIC ; /* * Transmit requires space in the packet for a special * format transmit record and optional padding between * this record and the payload. Ask the net80211 layer * to arrange this when encapsulating packets so we can * add it efficiently. */ ic->ic_headroom = sizeof(struct mwltxrec) - sizeof(struct ieee80211_frame); IEEE80211_ADDR_COPY(ic->ic_macaddr, sc->sc_hwspecs.macAddr); /* call MI attach routine. */ ieee80211_ifattach(ic); ic->ic_setregdomain = mwl_setregdomain; ic->ic_getradiocaps = mwl_getradiocaps; /* override default methods */ ic->ic_raw_xmit = mwl_raw_xmit; ic->ic_newassoc = mwl_newassoc; ic->ic_updateslot = mwl_updateslot; ic->ic_update_mcast = mwl_update_mcast; ic->ic_update_promisc = mwl_update_promisc; ic->ic_wme.wme_update = mwl_wme_update; ic->ic_transmit = mwl_transmit; ic->ic_ioctl = mwl_ioctl; ic->ic_parent = mwl_parent; ic->ic_node_alloc = mwl_node_alloc; sc->sc_node_cleanup = ic->ic_node_cleanup; ic->ic_node_cleanup = mwl_node_cleanup; sc->sc_node_drain = ic->ic_node_drain; ic->ic_node_drain = mwl_node_drain; ic->ic_node_getsignal = mwl_node_getsignal; ic->ic_node_getmimoinfo = mwl_node_getmimoinfo; ic->ic_scan_start = mwl_scan_start; ic->ic_scan_end = mwl_scan_end; ic->ic_set_channel = mwl_set_channel; sc->sc_recv_action = ic->ic_recv_action; ic->ic_recv_action = mwl_recv_action; sc->sc_addba_request = ic->ic_addba_request; ic->ic_addba_request = mwl_addba_request; sc->sc_addba_response = ic->ic_addba_response; ic->ic_addba_response = mwl_addba_response; sc->sc_addba_stop = ic->ic_addba_stop; ic->ic_addba_stop = mwl_addba_stop; ic->ic_vap_create = mwl_vap_create; ic->ic_vap_delete = mwl_vap_delete; ieee80211_radiotap_attach(ic, &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), MWL_TX_RADIOTAP_PRESENT, &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), MWL_RX_RADIOTAP_PRESENT); /* * Setup dynamic sysctl's now that country code and * regdomain are available from the hal. */ mwl_sysctlattach(sc); if (bootverbose) ieee80211_announce(ic); mwl_announce(sc); return 0; bad2: mwl_dma_cleanup(sc); bad1: mwl_hal_detach(mh); bad: MWL_RXFREE_DESTROY(sc); sc->sc_invalid = 1; return error; } int mwl_detach(struct mwl_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; MWL_LOCK(sc); mwl_stop(sc); MWL_UNLOCK(sc); /* * NB: the order of these is important: * o call the 802.11 layer before detaching the hal to * insure callbacks into the driver to delete global * key cache entries can be handled * o reclaim the tx queue data structures after calling * the 802.11 layer as we'll get called back to reclaim * node state and potentially want to use them * o to cleanup the tx queues the hal is called, so detach * it last * Other than that, it's straightforward... */ ieee80211_ifdetach(ic); callout_drain(&sc->sc_watchdog); mwl_dma_cleanup(sc); MWL_RXFREE_DESTROY(sc); mwl_tx_cleanup(sc); mwl_hal_detach(sc->sc_mh); mbufq_drain(&sc->sc_snd); return 0; } /* * MAC address handling for multiple BSS on the same radio. * The first vap uses the MAC address from the EEPROM. For * subsequent vap's we set the U/L bit (bit 1) in the MAC * address and use the next six bits as an index. */ static void assign_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone) { int i; if (clone && mwl_hal_ismbsscapable(sc->sc_mh)) { /* NB: we only do this if h/w supports multiple bssid */ for (i = 0; i < 32; i++) if ((sc->sc_bssidmask & (1<sc_bssidmask |= 1<sc_nbssid0++; } static void reclaim_address(struct mwl_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN]) { int i = mac[0] >> 2; if (i != 0 || --sc->sc_nbssid0 == 0) sc->sc_bssidmask &= ~(1<ic_softc; struct mwl_hal *mh = sc->sc_mh; struct ieee80211vap *vap, *apvap; struct mwl_hal_vap *hvap; struct mwl_vap *mvp; uint8_t mac[IEEE80211_ADDR_LEN]; IEEE80211_ADDR_COPY(mac, mac0); switch (opmode) { case IEEE80211_M_HOSTAP: case IEEE80211_M_MBSS: if ((flags & IEEE80211_CLONE_MACADDR) == 0) assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID); hvap = mwl_hal_newvap(mh, MWL_HAL_AP, mac); if (hvap == NULL) { if ((flags & IEEE80211_CLONE_MACADDR) == 0) reclaim_address(sc, mac); return NULL; } break; case IEEE80211_M_STA: if ((flags & IEEE80211_CLONE_MACADDR) == 0) assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID); hvap = mwl_hal_newvap(mh, MWL_HAL_STA, mac); if (hvap == NULL) { if ((flags & IEEE80211_CLONE_MACADDR) == 0) reclaim_address(sc, mac); return NULL; } /* no h/w beacon miss support; always use s/w */ flags |= IEEE80211_CLONE_NOBEACONS; break; case IEEE80211_M_WDS: hvap = NULL; /* NB: we use associated AP vap */ if (sc->sc_napvaps == 0) return NULL; /* no existing AP vap */ break; case IEEE80211_M_MONITOR: hvap = NULL; break; case IEEE80211_M_IBSS: case IEEE80211_M_AHDEMO: default: return NULL; } mvp = malloc(sizeof(struct mwl_vap), M_80211_VAP, M_WAITOK | M_ZERO); mvp->mv_hvap = hvap; if (opmode == IEEE80211_M_WDS) { /* * WDS vaps must have an associated AP vap; find one. * XXX not right. */ TAILQ_FOREACH(apvap, &ic->ic_vaps, iv_next) if (apvap->iv_opmode == IEEE80211_M_HOSTAP) { mvp->mv_ap_hvap = MWL_VAP(apvap)->mv_hvap; break; } KASSERT(mvp->mv_ap_hvap != NULL, ("no ap vap")); } vap = &mvp->mv_vap; ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); /* override with driver methods */ mvp->mv_newstate = vap->iv_newstate; vap->iv_newstate = mwl_newstate; vap->iv_max_keyix = 0; /* XXX */ vap->iv_key_alloc = mwl_key_alloc; vap->iv_key_delete = mwl_key_delete; vap->iv_key_set = mwl_key_set; #ifdef MWL_HOST_PS_SUPPORT if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) { vap->iv_update_ps = mwl_update_ps; mvp->mv_set_tim = vap->iv_set_tim; vap->iv_set_tim = mwl_set_tim; } #endif vap->iv_reset = mwl_reset; vap->iv_update_beacon = mwl_beacon_update; /* override max aid so sta's cannot assoc when we're out of sta id's */ vap->iv_max_aid = MWL_MAXSTAID; /* override default A-MPDU rx parameters */ vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K; vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4; /* complete setup */ ieee80211_vap_attach(vap, mwl_media_change, ieee80211_media_status, mac); switch (vap->iv_opmode) { case IEEE80211_M_HOSTAP: case IEEE80211_M_MBSS: case IEEE80211_M_STA: /* * Setup sta db entry for local address. */ mwl_localstadb(vap); if (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_MBSS) sc->sc_napvaps++; else sc->sc_nstavaps++; break; case IEEE80211_M_WDS: sc->sc_nwdsvaps++; break; default: break; } /* * Setup overall operating mode. */ if (sc->sc_napvaps) ic->ic_opmode = IEEE80211_M_HOSTAP; else if (sc->sc_nstavaps) ic->ic_opmode = IEEE80211_M_STA; else ic->ic_opmode = opmode; return vap; } static void mwl_vap_delete(struct ieee80211vap *vap) { struct mwl_vap *mvp = MWL_VAP(vap); struct mwl_softc *sc = vap->iv_ic->ic_softc; struct mwl_hal *mh = sc->sc_mh; struct mwl_hal_vap *hvap = mvp->mv_hvap; enum ieee80211_opmode opmode = vap->iv_opmode; /* XXX disallow ap vap delete if WDS still present */ if (sc->sc_running) { /* quiesce h/w while we remove the vap */ mwl_hal_intrset(mh, 0); /* disable interrupts */ } ieee80211_vap_detach(vap); switch (opmode) { case IEEE80211_M_HOSTAP: case IEEE80211_M_MBSS: case IEEE80211_M_STA: KASSERT(hvap != NULL, ("no hal vap handle")); (void) mwl_hal_delstation(hvap, vap->iv_myaddr); mwl_hal_delvap(hvap); if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) sc->sc_napvaps--; else sc->sc_nstavaps--; /* XXX don't do it for IEEE80211_CLONE_MACADDR */ reclaim_address(sc, vap->iv_myaddr); break; case IEEE80211_M_WDS: sc->sc_nwdsvaps--; break; default: break; } mwl_cleartxq(sc, vap); free(mvp, M_80211_VAP); if (sc->sc_running) mwl_hal_intrset(mh, sc->sc_imask); } void mwl_suspend(struct mwl_softc *sc) { MWL_LOCK(sc); mwl_stop(sc); MWL_UNLOCK(sc); } void mwl_resume(struct mwl_softc *sc) { int error = EDOOFUS; MWL_LOCK(sc); if (sc->sc_ic.ic_nrunning > 0) error = mwl_init(sc); MWL_UNLOCK(sc); if (error == 0) ieee80211_start_all(&sc->sc_ic); /* start all vap's */ } void mwl_shutdown(void *arg) { struct mwl_softc *sc = arg; MWL_LOCK(sc); mwl_stop(sc); MWL_UNLOCK(sc); } /* * Interrupt handler. Most of the actual processing is deferred. */ void mwl_intr(void *arg) { struct mwl_softc *sc = arg; struct mwl_hal *mh = sc->sc_mh; uint32_t status; if (sc->sc_invalid) { /* * The hardware is not ready/present, don't touch anything. * Note this can happen early on if the IRQ is shared. */ DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid; ignored\n", __func__); return; } /* * Figure out the reason(s) for the interrupt. */ mwl_hal_getisr(mh, &status); /* NB: clears ISR too */ if (status == 0) /* must be a shared irq */ return; DPRINTF(sc, MWL_DEBUG_INTR, "%s: status 0x%x imask 0x%x\n", __func__, status, sc->sc_imask); if (status & MACREG_A2HRIC_BIT_RX_RDY) taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); if (status & MACREG_A2HRIC_BIT_TX_DONE) taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask); if (status & MACREG_A2HRIC_BIT_BA_WATCHDOG) taskqueue_enqueue(sc->sc_tq, &sc->sc_bawatchdogtask); if (status & MACREG_A2HRIC_BIT_OPC_DONE) mwl_hal_cmddone(mh); if (status & MACREG_A2HRIC_BIT_MAC_EVENT) { ; } if (status & MACREG_A2HRIC_BIT_ICV_ERROR) { /* TKIP ICV error */ sc->sc_stats.mst_rx_badtkipicv++; } if (status & MACREG_A2HRIC_BIT_QUEUE_EMPTY) { /* 11n aggregation queue is empty, re-fill */ ; } if (status & MACREG_A2HRIC_BIT_QUEUE_FULL) { ; } if (status & MACREG_A2HRIC_BIT_RADAR_DETECT) { /* radar detected, process event */ taskqueue_enqueue(sc->sc_tq, &sc->sc_radartask); } if (status & MACREG_A2HRIC_BIT_CHAN_SWITCH) { /* DFS channel switch */ taskqueue_enqueue(sc->sc_tq, &sc->sc_chanswitchtask); } } static void mwl_radar_proc(void *arg, int pending) { struct mwl_softc *sc = arg; struct ieee80211com *ic = &sc->sc_ic; DPRINTF(sc, MWL_DEBUG_ANY, "%s: radar detected, pending %u\n", __func__, pending); sc->sc_stats.mst_radardetect++; /* XXX stop h/w BA streams? */ IEEE80211_LOCK(ic); ieee80211_dfs_notify_radar(ic, ic->ic_curchan); IEEE80211_UNLOCK(ic); } static void mwl_chanswitch_proc(void *arg, int pending) { struct mwl_softc *sc = arg; struct ieee80211com *ic = &sc->sc_ic; DPRINTF(sc, MWL_DEBUG_ANY, "%s: channel switch notice, pending %u\n", __func__, pending); IEEE80211_LOCK(ic); sc->sc_csapending = 0; ieee80211_csa_completeswitch(ic); IEEE80211_UNLOCK(ic); } static void mwl_bawatchdog(const MWL_HAL_BASTREAM *sp) { struct ieee80211_node *ni = sp->data[0]; /* send DELBA and drop the stream */ ieee80211_ampdu_stop(ni, sp->data[1], IEEE80211_REASON_UNSPECIFIED); } static void mwl_bawatchdog_proc(void *arg, int pending) { struct mwl_softc *sc = arg; struct mwl_hal *mh = sc->sc_mh; const MWL_HAL_BASTREAM *sp; uint8_t bitmap, n; sc->sc_stats.mst_bawatchdog++; if (mwl_hal_getwatchdogbitmap(mh, &bitmap) != 0) { DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: could not get bitmap\n", __func__); sc->sc_stats.mst_bawatchdog_failed++; return; } DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: bitmap 0x%x\n", __func__, bitmap); if (bitmap == 0xff) { n = 0; /* disable all ba streams */ for (bitmap = 0; bitmap < 8; bitmap++) { sp = mwl_hal_bastream_lookup(mh, bitmap); if (sp != NULL) { mwl_bawatchdog(sp); n++; } } if (n == 0) { DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: no BA streams found\n", __func__); sc->sc_stats.mst_bawatchdog_empty++; } } else if (bitmap != 0xaa) { /* disable a single ba stream */ sp = mwl_hal_bastream_lookup(mh, bitmap); if (sp != NULL) { mwl_bawatchdog(sp); } else { DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: no BA stream %d\n", __func__, bitmap); sc->sc_stats.mst_bawatchdog_notfound++; } } } /* * Convert net80211 channel to a HAL channel. */ static void mwl_mapchan(MWL_HAL_CHANNEL *hc, const struct ieee80211_channel *chan) { hc->channel = chan->ic_ieee; *(uint32_t *)&hc->channelFlags = 0; if (IEEE80211_IS_CHAN_2GHZ(chan)) hc->channelFlags.FreqBand = MWL_FREQ_BAND_2DOT4GHZ; else if (IEEE80211_IS_CHAN_5GHZ(chan)) hc->channelFlags.FreqBand = MWL_FREQ_BAND_5GHZ; if (IEEE80211_IS_CHAN_HT40(chan)) { hc->channelFlags.ChnlWidth = MWL_CH_40_MHz_WIDTH; if (IEEE80211_IS_CHAN_HT40U(chan)) hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_ABOVE_CTRL_CH; else hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_BELOW_CTRL_CH; } else hc->channelFlags.ChnlWidth = MWL_CH_20_MHz_WIDTH; /* XXX 10MHz channels */ } /* * Inform firmware of our tx/rx dma setup. The BAR 0 * writes below are for compatibility with older firmware. * For current firmware we send this information with a * cmd block via mwl_hal_sethwdma. */ static int mwl_setupdma(struct mwl_softc *sc) { int error, i; sc->sc_hwdma.rxDescRead = sc->sc_rxdma.dd_desc_paddr; WR4(sc, sc->sc_hwspecs.rxDescRead, sc->sc_hwdma.rxDescRead); WR4(sc, sc->sc_hwspecs.rxDescWrite, sc->sc_hwdma.rxDescRead); for (i = 0; i < MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES; i++) { struct mwl_txq *txq = &sc->sc_txq[i]; sc->sc_hwdma.wcbBase[i] = txq->dma.dd_desc_paddr; WR4(sc, sc->sc_hwspecs.wcbBase[i], sc->sc_hwdma.wcbBase[i]); } sc->sc_hwdma.maxNumTxWcb = mwl_txbuf; sc->sc_hwdma.maxNumWCB = MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES; error = mwl_hal_sethwdma(sc->sc_mh, &sc->sc_hwdma); if (error != 0) { device_printf(sc->sc_dev, "unable to setup tx/rx dma; hal status %u\n", error); /* XXX */ } return error; } /* * Inform firmware of tx rate parameters. * Called after a channel change. */ static int mwl_setcurchanrates(struct mwl_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; const struct ieee80211_rateset *rs; MWL_HAL_TXRATE rates; memset(&rates, 0, sizeof(rates)); rs = ieee80211_get_suprates(ic, ic->ic_curchan); /* rate used to send management frames */ rates.MgtRate = rs->rs_rates[0] & IEEE80211_RATE_VAL; /* rate used to send multicast frames */ rates.McastRate = rates.MgtRate; return mwl_hal_settxrate_auto(sc->sc_mh, &rates); } /* * Inform firmware of tx rate parameters. Called whenever * user-settable params change and after a channel change. */ static int mwl_setrates(struct ieee80211vap *vap) { struct mwl_vap *mvp = MWL_VAP(vap); struct ieee80211_node *ni = vap->iv_bss; const struct ieee80211_txparam *tp = ni->ni_txparms; MWL_HAL_TXRATE rates; KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state)); /* * Update the h/w rate map. * NB: 0x80 for MCS is passed through unchanged */ memset(&rates, 0, sizeof(rates)); /* rate used to send management frames */ rates.MgtRate = tp->mgmtrate; /* rate used to send multicast frames */ rates.McastRate = tp->mcastrate; /* while here calculate EAPOL fixed rate cookie */ mvp->mv_eapolformat = htole16(mwl_calcformat(rates.MgtRate, ni)); return mwl_hal_settxrate(mvp->mv_hvap, tp->ucastrate != IEEE80211_FIXED_RATE_NONE ? RATE_FIXED : RATE_AUTO, &rates); } /* * Setup a fixed xmit rate cookie for EAPOL frames. */ static void mwl_seteapolformat(struct ieee80211vap *vap) { struct mwl_vap *mvp = MWL_VAP(vap); struct ieee80211_node *ni = vap->iv_bss; enum ieee80211_phymode mode; uint8_t rate; KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state)); mode = ieee80211_chan2mode(ni->ni_chan); /* * Use legacy rates when operating a mixed HT+non-HT bss. * NB: this may violate POLA for sta and wds vap's. */ if (mode == IEEE80211_MODE_11NA && (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0) rate = vap->iv_txparms[IEEE80211_MODE_11A].mgmtrate; else if (mode == IEEE80211_MODE_11NG && (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0) rate = vap->iv_txparms[IEEE80211_MODE_11G].mgmtrate; else rate = vap->iv_txparms[mode].mgmtrate; mvp->mv_eapolformat = htole16(mwl_calcformat(rate, ni)); } /* * Map SKU+country code to region code for radar bin'ing. */ static int mwl_map2regioncode(const struct ieee80211_regdomain *rd) { switch (rd->regdomain) { case SKU_FCC: case SKU_FCC3: return DOMAIN_CODE_FCC; case SKU_CA: return DOMAIN_CODE_IC; case SKU_ETSI: case SKU_ETSI2: case SKU_ETSI3: if (rd->country == CTRY_SPAIN) return DOMAIN_CODE_SPAIN; if (rd->country == CTRY_FRANCE || rd->country == CTRY_FRANCE2) return DOMAIN_CODE_FRANCE; /* XXX force 1.3.1 radar type */ return DOMAIN_CODE_ETSI_131; case SKU_JAPAN: return DOMAIN_CODE_MKK; case SKU_ROW: return DOMAIN_CODE_DGT; /* Taiwan */ case SKU_APAC: case SKU_APAC2: case SKU_APAC3: return DOMAIN_CODE_AUS; /* Australia */ } /* XXX KOREA? */ return DOMAIN_CODE_FCC; /* XXX? */ } static int mwl_hal_reset(struct mwl_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct mwl_hal *mh = sc->sc_mh; mwl_hal_setantenna(mh, WL_ANTENNATYPE_RX, sc->sc_rxantenna); mwl_hal_setantenna(mh, WL_ANTENNATYPE_TX, sc->sc_txantenna); mwl_hal_setradio(mh, 1, WL_AUTO_PREAMBLE); mwl_hal_setwmm(sc->sc_mh, (ic->ic_flags & IEEE80211_F_WME) != 0); mwl_chan_set(sc, ic->ic_curchan); /* NB: RF/RA performance tuned for indoor mode */ mwl_hal_setrateadaptmode(mh, 0); mwl_hal_setoptimizationlevel(mh, (ic->ic_flags & IEEE80211_F_BURST) != 0); mwl_hal_setregioncode(mh, mwl_map2regioncode(&ic->ic_regdomain)); mwl_hal_setaggampduratemode(mh, 1, 80); /* XXX */ mwl_hal_setcfend(mh, 0); /* XXX */ return 1; } static int mwl_init(struct mwl_softc *sc) { struct mwl_hal *mh = sc->sc_mh; int error = 0; MWL_LOCK_ASSERT(sc); /* * Stop anything previously setup. This is safe * whether this is the first time through or not. */ mwl_stop(sc); /* * Push vap-independent state to the firmware. */ if (!mwl_hal_reset(sc)) { device_printf(sc->sc_dev, "unable to reset hardware\n"); return EIO; } /* * Setup recv (once); transmit is already good to go. */ error = mwl_startrecv(sc); if (error != 0) { device_printf(sc->sc_dev, "unable to start recv logic\n"); return error; } /* * Enable interrupts. */ sc->sc_imask = MACREG_A2HRIC_BIT_RX_RDY | MACREG_A2HRIC_BIT_TX_DONE | MACREG_A2HRIC_BIT_OPC_DONE #if 0 | MACREG_A2HRIC_BIT_MAC_EVENT #endif | MACREG_A2HRIC_BIT_ICV_ERROR | MACREG_A2HRIC_BIT_RADAR_DETECT | MACREG_A2HRIC_BIT_CHAN_SWITCH #if 0 | MACREG_A2HRIC_BIT_QUEUE_EMPTY #endif | MACREG_A2HRIC_BIT_BA_WATCHDOG | MACREQ_A2HRIC_BIT_TX_ACK ; sc->sc_running = 1; mwl_hal_intrset(mh, sc->sc_imask); callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc); return 0; } static void mwl_stop(struct mwl_softc *sc) { MWL_LOCK_ASSERT(sc); if (sc->sc_running) { /* * Shutdown the hardware and driver. */ sc->sc_running = 0; callout_stop(&sc->sc_watchdog); sc->sc_tx_timer = 0; mwl_draintxq(sc); } } static int mwl_reset_vap(struct ieee80211vap *vap, int state) { struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap; struct ieee80211com *ic = vap->iv_ic; if (state == IEEE80211_S_RUN) mwl_setrates(vap); /* XXX off by 1? */ mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold); /* XXX auto? 20/40 split? */ mwl_hal_sethtgi(hvap, (vap->iv_flags_ht & (IEEE80211_FHT_SHORTGI20|IEEE80211_FHT_SHORTGI40)) ? 1 : 0); mwl_hal_setnprot(hvap, ic->ic_htprotmode == IEEE80211_PROT_NONE ? HTPROTECT_NONE : HTPROTECT_AUTO); /* XXX txpower cap */ /* re-setup beacons */ if (state == IEEE80211_S_RUN && (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_MBSS || vap->iv_opmode == IEEE80211_M_IBSS)) { mwl_setapmode(vap, vap->iv_bss->ni_chan); mwl_hal_setnprotmode(hvap, MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE)); return mwl_beacon_setup(vap); } return 0; } /* * Reset the hardware w/o losing operational state. * Used to reset or reload hardware state for a vap. */ static int mwl_reset(struct ieee80211vap *vap, u_long cmd) { struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap; int error = 0; if (hvap != NULL) { /* WDS, MONITOR, etc. */ struct ieee80211com *ic = vap->iv_ic; struct mwl_softc *sc = ic->ic_softc; struct mwl_hal *mh = sc->sc_mh; /* XXX handle DWDS sta vap change */ /* XXX do we need to disable interrupts? */ mwl_hal_intrset(mh, 0); /* disable interrupts */ error = mwl_reset_vap(vap, vap->iv_state); mwl_hal_intrset(mh, sc->sc_imask); } return error; } /* * Allocate a tx buffer for sending a frame. The * packet is assumed to have the WME AC stored so * we can use it to select the appropriate h/w queue. */ static struct mwl_txbuf * mwl_gettxbuf(struct mwl_softc *sc, struct mwl_txq *txq) { struct mwl_txbuf *bf; /* * Grab a TX buffer and associated resources. */ MWL_TXQ_LOCK(txq); bf = STAILQ_FIRST(&txq->free); if (bf != NULL) { STAILQ_REMOVE_HEAD(&txq->free, bf_list); txq->nfree--; } MWL_TXQ_UNLOCK(txq); if (bf == NULL) DPRINTF(sc, MWL_DEBUG_XMIT, "%s: out of xmit buffers on q %d\n", __func__, txq->qnum); return bf; } /* * Return a tx buffer to the queue it came from. Note there * are two cases because we must preserve the order of buffers * as it reflects the fixed order of descriptors in memory * (the firmware pre-fetches descriptors so we cannot reorder). */ static void mwl_puttxbuf_head(struct mwl_txq *txq, struct mwl_txbuf *bf) { bf->bf_m = NULL; bf->bf_node = NULL; MWL_TXQ_LOCK(txq); STAILQ_INSERT_HEAD(&txq->free, bf, bf_list); txq->nfree++; MWL_TXQ_UNLOCK(txq); } static void mwl_puttxbuf_tail(struct mwl_txq *txq, struct mwl_txbuf *bf) { bf->bf_m = NULL; bf->bf_node = NULL; MWL_TXQ_LOCK(txq); STAILQ_INSERT_TAIL(&txq->free, bf, bf_list); txq->nfree++; MWL_TXQ_UNLOCK(txq); } static int mwl_transmit(struct ieee80211com *ic, struct mbuf *m) { struct mwl_softc *sc = ic->ic_softc; int error; MWL_LOCK(sc); if (!sc->sc_running) { MWL_UNLOCK(sc); return (ENXIO); } error = mbufq_enqueue(&sc->sc_snd, m); if (error) { MWL_UNLOCK(sc); return (error); } mwl_start(sc); MWL_UNLOCK(sc); return (0); } static void mwl_start(struct mwl_softc *sc) { struct ieee80211_node *ni; struct mwl_txbuf *bf; struct mbuf *m; struct mwl_txq *txq = NULL; /* XXX silence gcc */ int nqueued; MWL_LOCK_ASSERT(sc); if (!sc->sc_running || sc->sc_invalid) return; nqueued = 0; while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) { /* * Grab the node for the destination. */ ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; KASSERT(ni != NULL, ("no node")); m->m_pkthdr.rcvif = NULL; /* committed, clear ref */ /* * Grab a TX buffer and associated resources. * We honor the classification by the 802.11 layer. */ txq = sc->sc_ac2q[M_WME_GETAC(m)]; bf = mwl_gettxbuf(sc, txq); if (bf == NULL) { m_freem(m); ieee80211_free_node(ni); #ifdef MWL_TX_NODROP sc->sc_stats.mst_tx_qstop++; break; #else DPRINTF(sc, MWL_DEBUG_XMIT, "%s: tail drop on q %d\n", __func__, txq->qnum); sc->sc_stats.mst_tx_qdrop++; continue; #endif /* MWL_TX_NODROP */ } /* * Pass the frame to the h/w for transmission. */ if (mwl_tx_start(sc, ni, bf, m)) { if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); mwl_puttxbuf_head(txq, bf); ieee80211_free_node(ni); continue; } nqueued++; if (nqueued >= mwl_txcoalesce) { /* * Poke the firmware to process queued frames; * see below about (lack of) locking. */ nqueued = 0; mwl_hal_txstart(sc->sc_mh, 0/*XXX*/); } } if (nqueued) { /* * NB: We don't need to lock against tx done because * this just prods the firmware to check the transmit * descriptors. The firmware will also start fetching * descriptors by itself if it notices new ones are * present when it goes to deliver a tx done interrupt * to the host. So if we race with tx done processing * it's ok. Delivering the kick here rather than in * mwl_tx_start is an optimization to avoid poking the * firmware for each packet. * * NB: the queue id isn't used so 0 is ok. */ mwl_hal_txstart(sc->sc_mh, 0/*XXX*/); } } static int mwl_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct mwl_softc *sc = ic->ic_softc; struct mwl_txbuf *bf; struct mwl_txq *txq; if (!sc->sc_running || sc->sc_invalid) { m_freem(m); return ENETDOWN; } /* * Grab a TX buffer and associated resources. * Note that we depend on the classification * by the 802.11 layer to get to the right h/w * queue. Management frames must ALWAYS go on * queue 1 but we cannot just force that here * because we may receive non-mgt frames. */ txq = sc->sc_ac2q[M_WME_GETAC(m)]; bf = mwl_gettxbuf(sc, txq); if (bf == NULL) { sc->sc_stats.mst_tx_qstop++; m_freem(m); return ENOBUFS; } /* * Pass the frame to the h/w for transmission. */ if (mwl_tx_start(sc, ni, bf, m)) { mwl_puttxbuf_head(txq, bf); return EIO; /* XXX */ } /* * NB: We don't need to lock against tx done because * this just prods the firmware to check the transmit * descriptors. The firmware will also start fetching * descriptors by itself if it notices new ones are * present when it goes to deliver a tx done interrupt * to the host. So if we race with tx done processing * it's ok. Delivering the kick here rather than in * mwl_tx_start is an optimization to avoid poking the * firmware for each packet. * * NB: the queue id isn't used so 0 is ok. */ mwl_hal_txstart(sc->sc_mh, 0/*XXX*/); return 0; } static int mwl_media_change(struct ifnet *ifp) { struct ieee80211vap *vap = ifp->if_softc; int error; error = ieee80211_media_change(ifp); /* NB: only the fixed rate can change and that doesn't need a reset */ if (error == ENETRESET) { mwl_setrates(vap); error = 0; } return error; } #ifdef MWL_DEBUG static void mwl_keyprint(struct mwl_softc *sc, const char *tag, const MWL_HAL_KEYVAL *hk, const uint8_t mac[IEEE80211_ADDR_LEN]) { static const char *ciphers[] = { "WEP", "TKIP", "AES-CCM", }; int i, n; printf("%s: [%u] %-7s", tag, hk->keyIndex, ciphers[hk->keyTypeId]); for (i = 0, n = hk->keyLen; i < n; i++) printf(" %02x", hk->key.aes[i]); printf(" mac %s", ether_sprintf(mac)); if (hk->keyTypeId == KEY_TYPE_ID_TKIP) { printf(" %s", "rxmic"); for (i = 0; i < sizeof(hk->key.tkip.rxMic); i++) printf(" %02x", hk->key.tkip.rxMic[i]); printf(" txmic"); for (i = 0; i < sizeof(hk->key.tkip.txMic); i++) printf(" %02x", hk->key.tkip.txMic[i]); } printf(" flags 0x%x\n", hk->keyFlags); } #endif /* * Allocate a key cache slot for a unicast key. The * firmware handles key allocation and every station is * guaranteed key space so we are always successful. */ static int mwl_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k, ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix) { struct mwl_softc *sc = vap->iv_ic->ic_softc; if (k->wk_keyix != IEEE80211_KEYIX_NONE || (k->wk_flags & IEEE80211_KEY_GROUP)) { if (!(&vap->iv_nw_keys[0] <= k && k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) { /* should not happen */ DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: bogus group key\n", __func__); return 0; } /* give the caller what they requested */ *keyix = *rxkeyix = ieee80211_crypto_get_key_wepidx(vap, k); } else { /* * Firmware handles key allocation. */ *keyix = *rxkeyix = 0; } return 1; } /* * Delete a key entry allocated by mwl_key_alloc. */ static int mwl_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k) { struct mwl_softc *sc = vap->iv_ic->ic_softc; struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap; MWL_HAL_KEYVAL hk; const uint8_t bcastaddr[IEEE80211_ADDR_LEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; if (hvap == NULL) { if (vap->iv_opmode != IEEE80211_M_WDS) { /* XXX monitor mode? */ DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: no hvap for opmode %d\n", __func__, vap->iv_opmode); return 0; } hvap = MWL_VAP(vap)->mv_ap_hvap; } DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: delete key %u\n", __func__, k->wk_keyix); memset(&hk, 0, sizeof(hk)); hk.keyIndex = k->wk_keyix; switch (k->wk_cipher->ic_cipher) { case IEEE80211_CIPHER_WEP: hk.keyTypeId = KEY_TYPE_ID_WEP; break; case IEEE80211_CIPHER_TKIP: hk.keyTypeId = KEY_TYPE_ID_TKIP; break; case IEEE80211_CIPHER_AES_CCM: hk.keyTypeId = KEY_TYPE_ID_AES; break; default: /* XXX should not happen */ DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n", __func__, k->wk_cipher->ic_cipher); return 0; } return (mwl_hal_keyreset(hvap, &hk, bcastaddr) == 0); /*XXX*/ } static __inline int addgroupflags(MWL_HAL_KEYVAL *hk, const struct ieee80211_key *k) { if (k->wk_flags & IEEE80211_KEY_GROUP) { if (k->wk_flags & IEEE80211_KEY_XMIT) hk->keyFlags |= KEY_FLAG_TXGROUPKEY; if (k->wk_flags & IEEE80211_KEY_RECV) hk->keyFlags |= KEY_FLAG_RXGROUPKEY; return 1; } else return 0; } /* * Set the key cache contents for the specified key. Key cache * slot(s) must already have been allocated by mwl_key_alloc. */ static int mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k) { return (_mwl_key_set(vap, k, k->wk_macaddr)); } static int _mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k, const uint8_t mac[IEEE80211_ADDR_LEN]) { #define GRPXMIT (IEEE80211_KEY_XMIT | IEEE80211_KEY_GROUP) /* NB: static wep keys are marked GROUP+tx/rx; GTK will be tx or rx */ #define IEEE80211_IS_STATICKEY(k) \ (((k)->wk_flags & (GRPXMIT|IEEE80211_KEY_RECV)) == \ (GRPXMIT|IEEE80211_KEY_RECV)) struct mwl_softc *sc = vap->iv_ic->ic_softc; struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap; const struct ieee80211_cipher *cip = k->wk_cipher; const uint8_t *macaddr; MWL_HAL_KEYVAL hk; KASSERT((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0, ("s/w crypto set?")); if (hvap == NULL) { if (vap->iv_opmode != IEEE80211_M_WDS) { /* XXX monitor mode? */ DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: no hvap for opmode %d\n", __func__, vap->iv_opmode); return 0; } hvap = MWL_VAP(vap)->mv_ap_hvap; } memset(&hk, 0, sizeof(hk)); hk.keyIndex = k->wk_keyix; switch (cip->ic_cipher) { case IEEE80211_CIPHER_WEP: hk.keyTypeId = KEY_TYPE_ID_WEP; hk.keyLen = k->wk_keylen; if (k->wk_keyix == vap->iv_def_txkey) hk.keyFlags = KEY_FLAG_WEP_TXKEY; if (!IEEE80211_IS_STATICKEY(k)) { /* NB: WEP is never used for the PTK */ (void) addgroupflags(&hk, k); } break; case IEEE80211_CIPHER_TKIP: hk.keyTypeId = KEY_TYPE_ID_TKIP; hk.key.tkip.tsc.high = (uint32_t)(k->wk_keytsc >> 16); hk.key.tkip.tsc.low = (uint16_t)k->wk_keytsc; hk.keyFlags = KEY_FLAG_TSC_VALID | KEY_FLAG_MICKEY_VALID; hk.keyLen = k->wk_keylen + IEEE80211_MICBUF_SIZE; if (!addgroupflags(&hk, k)) hk.keyFlags |= KEY_FLAG_PAIRWISE; break; case IEEE80211_CIPHER_AES_CCM: hk.keyTypeId = KEY_TYPE_ID_AES; hk.keyLen = k->wk_keylen; if (!addgroupflags(&hk, k)) hk.keyFlags |= KEY_FLAG_PAIRWISE; break; default: /* XXX should not happen */ DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n", __func__, k->wk_cipher->ic_cipher); return 0; } /* * NB: tkip mic keys get copied here too; the layout * just happens to match that in ieee80211_key. */ memcpy(hk.key.aes, k->wk_key, hk.keyLen); /* * Locate address of sta db entry for writing key; * the convention unfortunately is somewhat different * than how net80211, hostapd, and wpa_supplicant think. */ if (vap->iv_opmode == IEEE80211_M_STA) { /* * NB: keys plumbed before the sta reaches AUTH state * will be discarded or written to the wrong sta db * entry because iv_bss is meaningless. This is ok * (right now) because we handle deferred plumbing of * WEP keys when the sta reaches AUTH state. */ macaddr = vap->iv_bss->ni_bssid; if ((k->wk_flags & IEEE80211_KEY_GROUP) == 0) { /* XXX plumb to local sta db too for static key wep */ mwl_hal_keyset(hvap, &hk, vap->iv_myaddr); } } else if (vap->iv_opmode == IEEE80211_M_WDS && vap->iv_state != IEEE80211_S_RUN) { /* * Prior to RUN state a WDS vap will not it's BSS node * setup so we will plumb the key to the wrong mac * address (it'll be our local address). Workaround * this for the moment by grabbing the correct address. */ macaddr = vap->iv_des_bssid; } else if ((k->wk_flags & GRPXMIT) == GRPXMIT) macaddr = vap->iv_myaddr; else macaddr = mac; KEYPRINTF(sc, &hk, macaddr); return (mwl_hal_keyset(hvap, &hk, macaddr) == 0); #undef IEEE80211_IS_STATICKEY #undef GRPXMIT } /* * Set the multicast filter contents into the hardware. * XXX f/w has no support; just defer to the os. */ static void mwl_setmcastfilter(struct mwl_softc *sc) { #if 0 struct ether_multi *enm; struct ether_multistep estep; uint8_t macs[IEEE80211_ADDR_LEN*MWL_HAL_MCAST_MAX];/* XXX stack use */ uint8_t *mp; int nmc; mp = macs; nmc = 0; ETHER_FIRST_MULTI(estep, &sc->sc_ec, enm); while (enm != NULL) { /* XXX Punt on ranges. */ if (nmc == MWL_HAL_MCAST_MAX || !IEEE80211_ADDR_EQ(enm->enm_addrlo, enm->enm_addrhi)) { ifp->if_flags |= IFF_ALLMULTI; return; } IEEE80211_ADDR_COPY(mp, enm->enm_addrlo); mp += IEEE80211_ADDR_LEN, nmc++; ETHER_NEXT_MULTI(estep, enm); } ifp->if_flags &= ~IFF_ALLMULTI; mwl_hal_setmcast(sc->sc_mh, nmc, macs); #endif } static int mwl_mode_init(struct mwl_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct mwl_hal *mh = sc->sc_mh; mwl_hal_setpromisc(mh, ic->ic_promisc > 0); mwl_setmcastfilter(sc); return 0; } /* * Callback from the 802.11 layer after a multicast state change. */ static void mwl_update_mcast(struct ieee80211com *ic) { struct mwl_softc *sc = ic->ic_softc; mwl_setmcastfilter(sc); } /* * Callback from the 802.11 layer after a promiscuous mode change. * Note this interface does not check the operating mode as this * is an internal callback and we are expected to honor the current * state (e.g. this is used for setting the interface in promiscuous * mode when operating in hostap mode to do ACS). */ static void mwl_update_promisc(struct ieee80211com *ic) { struct mwl_softc *sc = ic->ic_softc; mwl_hal_setpromisc(sc->sc_mh, ic->ic_promisc > 0); } /* * Callback from the 802.11 layer to update the slot time * based on the current setting. We use it to notify the * firmware of ERP changes and the f/w takes care of things * like slot time and preamble. */ static void mwl_updateslot(struct ieee80211com *ic) { struct mwl_softc *sc = ic->ic_softc; struct mwl_hal *mh = sc->sc_mh; int prot; /* NB: can be called early; suppress needless cmds */ if (!sc->sc_running) return; /* * Calculate the ERP flags. The firwmare will use * this to carry out the appropriate measures. */ prot = 0; if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) { if ((ic->ic_flags & IEEE80211_F_SHSLOT) == 0) prot |= IEEE80211_ERP_NON_ERP_PRESENT; if (ic->ic_flags & IEEE80211_F_USEPROT) prot |= IEEE80211_ERP_USE_PROTECTION; if (ic->ic_flags & IEEE80211_F_USEBARKER) prot |= IEEE80211_ERP_LONG_PREAMBLE; } DPRINTF(sc, MWL_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x %s slot, (prot 0x%x ic_flags 0x%x)\n", __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags, ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", prot, ic->ic_flags); mwl_hal_setgprot(mh, prot); } /* * Setup the beacon frame. */ static int mwl_beacon_setup(struct ieee80211vap *vap) { struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap; struct ieee80211_node *ni = vap->iv_bss; struct mbuf *m; m = ieee80211_beacon_alloc(ni); if (m == NULL) return ENOBUFS; mwl_hal_setbeacon(hvap, mtod(m, const void *), m->m_len); m_free(m); return 0; } /* * Update the beacon frame in response to a change. */ static void mwl_beacon_update(struct ieee80211vap *vap, int item) { struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap; struct ieee80211com *ic = vap->iv_ic; KASSERT(hvap != NULL, ("no beacon")); switch (item) { case IEEE80211_BEACON_ERP: mwl_updateslot(ic); break; case IEEE80211_BEACON_HTINFO: mwl_hal_setnprotmode(hvap, MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE)); break; case IEEE80211_BEACON_CAPS: case IEEE80211_BEACON_WME: case IEEE80211_BEACON_APPIE: case IEEE80211_BEACON_CSA: break; case IEEE80211_BEACON_TIM: /* NB: firmware always forms TIM */ return; } /* XXX retain beacon frame and update */ mwl_beacon_setup(vap); } static void mwl_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { bus_addr_t *paddr = (bus_addr_t*) arg; KASSERT(error == 0, ("error %u on bus_dma callback", error)); *paddr = segs->ds_addr; } #ifdef MWL_HOST_PS_SUPPORT /* * Handle power save station occupancy changes. */ static void mwl_update_ps(struct ieee80211vap *vap, int nsta) { struct mwl_vap *mvp = MWL_VAP(vap); if (nsta == 0 || mvp->mv_last_ps_sta == 0) mwl_hal_setpowersave_bss(mvp->mv_hvap, nsta); mvp->mv_last_ps_sta = nsta; } /* * Handle associated station power save state changes. */ static int mwl_set_tim(struct ieee80211_node *ni, int set) { struct ieee80211vap *vap = ni->ni_vap; struct mwl_vap *mvp = MWL_VAP(vap); if (mvp->mv_set_tim(ni, set)) { /* NB: state change */ mwl_hal_setpowersave_sta(mvp->mv_hvap, IEEE80211_AID(ni->ni_associd), set); return 1; } else return 0; } #endif /* MWL_HOST_PS_SUPPORT */ static int mwl_desc_setup(struct mwl_softc *sc, const char *name, struct mwl_descdma *dd, int nbuf, size_t bufsize, int ndesc, size_t descsize) { uint8_t *ds; int error; DPRINTF(sc, MWL_DEBUG_RESET, "%s: %s DMA: %u bufs (%ju) %u desc/buf (%ju)\n", __func__, name, nbuf, (uintmax_t) bufsize, ndesc, (uintmax_t) descsize); dd->dd_name = name; dd->dd_desc_len = nbuf * ndesc * descsize; /* * Setup DMA descriptor area. */ error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ PAGE_SIZE, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ dd->dd_desc_len, /* maxsize */ 1, /* nsegments */ dd->dd_desc_len, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &dd->dd_dmat); if (error != 0) { device_printf(sc->sc_dev, "cannot allocate %s DMA tag\n", dd->dd_name); return error; } /* allocate descriptors */ error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dd->dd_dmamap); if (error != 0) { device_printf(sc->sc_dev, "unable to alloc memory for %u %s descriptors, " "error %u\n", nbuf * ndesc, dd->dd_name, error); goto fail1; } error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap, dd->dd_desc, dd->dd_desc_len, mwl_load_cb, &dd->dd_desc_paddr, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->sc_dev, "unable to map %s descriptors, error %u\n", dd->dd_name, error); goto fail2; } ds = dd->dd_desc; memset(ds, 0, dd->dd_desc_len); DPRINTF(sc, MWL_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> 0x%jx (%lu)\n", __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len, (uintmax_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len); return 0; fail2: bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); fail1: bus_dma_tag_destroy(dd->dd_dmat); memset(dd, 0, sizeof(*dd)); return error; #undef DS2PHYS } static void mwl_desc_cleanup(struct mwl_softc *sc, struct mwl_descdma *dd) { bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); bus_dma_tag_destroy(dd->dd_dmat); memset(dd, 0, sizeof(*dd)); } /* * Construct a tx q's free list. The order of entries on * the list must reflect the physical layout of tx descriptors * because the firmware pre-fetches descriptors. * * XXX might be better to use indices into the buffer array. */ static void mwl_txq_reset(struct mwl_softc *sc, struct mwl_txq *txq) { struct mwl_txbuf *bf; int i; bf = txq->dma.dd_bufptr; STAILQ_INIT(&txq->free); for (i = 0; i < mwl_txbuf; i++, bf++) STAILQ_INSERT_TAIL(&txq->free, bf, bf_list); txq->nfree = i; } #define DS2PHYS(_dd, _ds) \ ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) static int mwl_txdma_setup(struct mwl_softc *sc, struct mwl_txq *txq) { int error, bsize, i; struct mwl_txbuf *bf; struct mwl_txdesc *ds; error = mwl_desc_setup(sc, "tx", &txq->dma, mwl_txbuf, sizeof(struct mwl_txbuf), MWL_TXDESC, sizeof(struct mwl_txdesc)); if (error != 0) return error; /* allocate and setup tx buffers */ bsize = mwl_txbuf * sizeof(struct mwl_txbuf); bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO); if (bf == NULL) { device_printf(sc->sc_dev, "malloc of %u tx buffers failed\n", mwl_txbuf); return ENOMEM; } txq->dma.dd_bufptr = bf; ds = txq->dma.dd_desc; for (i = 0; i < mwl_txbuf; i++, bf++, ds += MWL_TXDESC) { bf->bf_desc = ds; bf->bf_daddr = DS2PHYS(&txq->dma, ds); error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &bf->bf_dmamap); if (error != 0) { device_printf(sc->sc_dev, "unable to create dmamap for tx " "buffer %u, error %u\n", i, error); return error; } } mwl_txq_reset(sc, txq); return 0; } static void mwl_txdma_cleanup(struct mwl_softc *sc, struct mwl_txq *txq) { struct mwl_txbuf *bf; int i; bf = txq->dma.dd_bufptr; for (i = 0; i < mwl_txbuf; i++, bf++) { KASSERT(bf->bf_m == NULL, ("mbuf on free list")); KASSERT(bf->bf_node == NULL, ("node on free list")); if (bf->bf_dmamap != NULL) bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); } STAILQ_INIT(&txq->free); txq->nfree = 0; if (txq->dma.dd_bufptr != NULL) { free(txq->dma.dd_bufptr, M_MWLDEV); txq->dma.dd_bufptr = NULL; } if (txq->dma.dd_desc_len != 0) mwl_desc_cleanup(sc, &txq->dma); } static int mwl_rxdma_setup(struct mwl_softc *sc) { int error, jumbosize, bsize, i; struct mwl_rxbuf *bf; struct mwl_jumbo *rbuf; struct mwl_rxdesc *ds; caddr_t data; error = mwl_desc_setup(sc, "rx", &sc->sc_rxdma, mwl_rxdesc, sizeof(struct mwl_rxbuf), 1, sizeof(struct mwl_rxdesc)); if (error != 0) return error; /* * Receive is done to a private pool of jumbo buffers. * This allows us to attach to mbuf's and avoid re-mapping * memory on each rx we post. We allocate a large chunk * of memory and manage it in the driver. The mbuf free * callback method is used to reclaim frames after sending * them up the stack. By default we allocate 2x the number of * rx descriptors configured so we have some slop to hold * us while frames are processed. */ if (mwl_rxbuf < 2*mwl_rxdesc) { device_printf(sc->sc_dev, "too few rx dma buffers (%d); increasing to %d\n", mwl_rxbuf, 2*mwl_rxdesc); mwl_rxbuf = 2*mwl_rxdesc; } jumbosize = roundup(MWL_AGGR_SIZE, PAGE_SIZE); sc->sc_rxmemsize = mwl_rxbuf*jumbosize; error = bus_dma_tag_create(sc->sc_dmat, /* parent */ PAGE_SIZE, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sc->sc_rxmemsize, /* maxsize */ 1, /* nsegments */ sc->sc_rxmemsize, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &sc->sc_rxdmat); if (error != 0) { device_printf(sc->sc_dev, "could not create rx DMA tag\n"); return error; } error = bus_dmamem_alloc(sc->sc_rxdmat, (void**) &sc->sc_rxmem, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &sc->sc_rxmap); if (error != 0) { device_printf(sc->sc_dev, "could not alloc %ju bytes of rx DMA memory\n", (uintmax_t) sc->sc_rxmemsize); return error; } error = bus_dmamap_load(sc->sc_rxdmat, sc->sc_rxmap, sc->sc_rxmem, sc->sc_rxmemsize, mwl_load_cb, &sc->sc_rxmem_paddr, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->sc_dev, "could not load rx DMA map\n"); return error; } /* * Allocate rx buffers and set them up. */ bsize = mwl_rxdesc * sizeof(struct mwl_rxbuf); bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO); if (bf == NULL) { device_printf(sc->sc_dev, "malloc of %u rx buffers failed\n", bsize); return error; } sc->sc_rxdma.dd_bufptr = bf; STAILQ_INIT(&sc->sc_rxbuf); ds = sc->sc_rxdma.dd_desc; for (i = 0; i < mwl_rxdesc; i++, bf++, ds++) { bf->bf_desc = ds; bf->bf_daddr = DS2PHYS(&sc->sc_rxdma, ds); /* pre-assign dma buffer */ bf->bf_data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize); /* NB: tail is intentional to preserve descriptor order */ STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); } /* * Place remainder of dma memory buffers on the free list. */ SLIST_INIT(&sc->sc_rxfree); for (; i < mwl_rxbuf; i++) { data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize); rbuf = MWL_JUMBO_DATA2BUF(data); SLIST_INSERT_HEAD(&sc->sc_rxfree, rbuf, next); sc->sc_nrxfree++; } return 0; } #undef DS2PHYS static void mwl_rxdma_cleanup(struct mwl_softc *sc) { if (sc->sc_rxmem_paddr != 0) { bus_dmamap_unload(sc->sc_rxdmat, sc->sc_rxmap); sc->sc_rxmem_paddr = 0; } if (sc->sc_rxmem != NULL) { bus_dmamem_free(sc->sc_rxdmat, sc->sc_rxmem, sc->sc_rxmap); sc->sc_rxmem = NULL; } if (sc->sc_rxdma.dd_bufptr != NULL) { free(sc->sc_rxdma.dd_bufptr, M_MWLDEV); sc->sc_rxdma.dd_bufptr = NULL; } if (sc->sc_rxdma.dd_desc_len != 0) mwl_desc_cleanup(sc, &sc->sc_rxdma); } static int mwl_dma_setup(struct mwl_softc *sc) { int error, i; error = mwl_rxdma_setup(sc); if (error != 0) { mwl_rxdma_cleanup(sc); return error; } for (i = 0; i < MWL_NUM_TX_QUEUES; i++) { error = mwl_txdma_setup(sc, &sc->sc_txq[i]); if (error != 0) { mwl_dma_cleanup(sc); return error; } } return 0; } static void mwl_dma_cleanup(struct mwl_softc *sc) { int i; for (i = 0; i < MWL_NUM_TX_QUEUES; i++) mwl_txdma_cleanup(sc, &sc->sc_txq[i]); mwl_rxdma_cleanup(sc); } static struct ieee80211_node * mwl_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) { struct ieee80211com *ic = vap->iv_ic; struct mwl_softc *sc = ic->ic_softc; const size_t space = sizeof(struct mwl_node); struct mwl_node *mn; mn = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO); if (mn == NULL) { /* XXX stat+msg */ return NULL; } DPRINTF(sc, MWL_DEBUG_NODE, "%s: mn %p\n", __func__, mn); return &mn->mn_node; } static void mwl_node_cleanup(struct ieee80211_node *ni) { struct ieee80211com *ic = ni->ni_ic; struct mwl_softc *sc = ic->ic_softc; struct mwl_node *mn = MWL_NODE(ni); DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p ic %p staid %d\n", __func__, ni, ni->ni_ic, mn->mn_staid); if (mn->mn_staid != 0) { struct ieee80211vap *vap = ni->ni_vap; if (mn->mn_hvap != NULL) { if (vap->iv_opmode == IEEE80211_M_STA) mwl_hal_delstation(mn->mn_hvap, vap->iv_myaddr); else mwl_hal_delstation(mn->mn_hvap, ni->ni_macaddr); } /* * NB: legacy WDS peer sta db entry is installed using * the associate ap's hvap; use it again to delete it. * XXX can vap be NULL? */ else if (vap->iv_opmode == IEEE80211_M_WDS && MWL_VAP(vap)->mv_ap_hvap != NULL) mwl_hal_delstation(MWL_VAP(vap)->mv_ap_hvap, ni->ni_macaddr); delstaid(sc, mn->mn_staid); mn->mn_staid = 0; } sc->sc_node_cleanup(ni); } /* * Reclaim rx dma buffers from packets sitting on the ampdu * reorder queue for a station. We replace buffers with a * system cluster (if available). */ static void mwl_ampdu_rxdma_reclaim(struct ieee80211_rx_ampdu *rap) { #if 0 int i, n, off; struct mbuf *m; void *cl; n = rap->rxa_qframes; for (i = 0; i < rap->rxa_wnd && n > 0; i++) { m = rap->rxa_m[i]; if (m == NULL) continue; n--; /* our dma buffers have a well-known free routine */ if ((m->m_flags & M_EXT) == 0 || m->m_ext.ext_free != mwl_ext_free) continue; /* * Try to allocate a cluster and move the data. */ off = m->m_data - m->m_ext.ext_buf; if (off + m->m_pkthdr.len > MCLBYTES) { /* XXX no AMSDU for now */ continue; } cl = pool_cache_get_paddr(&mclpool_cache, 0, &m->m_ext.ext_paddr); if (cl != NULL) { /* * Copy the existing data to the cluster, remove * the rx dma buffer, and attach the cluster in * its place. Note we preserve the offset to the * data so frames being bridged can still prepend * their headers without adding another mbuf. */ memcpy((caddr_t) cl + off, m->m_data, m->m_pkthdr.len); MEXTREMOVE(m); MEXTADD(m, cl, MCLBYTES, 0, NULL, &mclpool_cache); /* setup mbuf like _MCLGET does */ m->m_flags |= M_CLUSTER | M_EXT_RW; _MOWNERREF(m, M_EXT | M_CLUSTER); /* NB: m_data is clobbered by MEXTADDR, adjust */ m->m_data += off; } } #endif } /* * Callback to reclaim resources. We first let the * net80211 layer do it's thing, then if we are still * blocked by a lack of rx dma buffers we walk the ampdu * reorder q's to reclaim buffers by copying to a system * cluster. */ static void mwl_node_drain(struct ieee80211_node *ni) { struct ieee80211com *ic = ni->ni_ic; struct mwl_softc *sc = ic->ic_softc; struct mwl_node *mn = MWL_NODE(ni); DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p vap %p staid %d\n", __func__, ni, ni->ni_vap, mn->mn_staid); /* NB: call up first to age out ampdu q's */ sc->sc_node_drain(ni); /* XXX better to not check low water mark? */ if (sc->sc_rxblocked && mn->mn_staid != 0 && (ni->ni_flags & IEEE80211_NODE_HT)) { uint8_t tid; /* * Walk the reorder q and reclaim rx dma buffers by copying * the packet contents into clusters. */ for (tid = 0; tid < WME_NUM_TID; tid++) { struct ieee80211_rx_ampdu *rap; rap = &ni->ni_rx_ampdu[tid]; if ((rap->rxa_flags & IEEE80211_AGGR_XCHGPEND) == 0) continue; if (rap->rxa_qframes) mwl_ampdu_rxdma_reclaim(rap); } } } static void mwl_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise) { *rssi = ni->ni_ic->ic_node_getrssi(ni); #ifdef MWL_ANT_INFO_SUPPORT #if 0 /* XXX need to smooth data */ *noise = -MWL_NODE_CONST(ni)->mn_ai.nf; #else *noise = -95; /* XXX */ #endif #else *noise = -95; /* XXX */ #endif } /* * Convert Hardware per-antenna rssi info to common format: * Let a1, a2, a3 represent the amplitudes per chain * Let amax represent max[a1, a2, a3] * Rssi1_dBm = RSSI_dBm + 20*log10(a1/amax) * Rssi1_dBm = RSSI_dBm + 20*log10(a1) - 20*log10(amax) * We store a table that is 4*20*log10(idx) - the extra 4 is to store or * maintain some extra precision. * * Values are stored in .5 db format capped at 127. */ static void mwl_node_getmimoinfo(const struct ieee80211_node *ni, struct ieee80211_mimo_info *mi) { #define CVT(_dst, _src) do { \ (_dst) = rssi + ((logdbtbl[_src] - logdbtbl[rssi_max]) >> 2); \ (_dst) = (_dst) > 64 ? 127 : ((_dst) << 1); \ } while (0) static const int8_t logdbtbl[32] = { 0, 0, 24, 38, 48, 56, 62, 68, 72, 76, 80, 83, 86, 89, 92, 94, 96, 98, 100, 102, 104, 106, 107, 109, 110, 112, 113, 115, 116, 117, 118, 119 }; const struct mwl_node *mn = MWL_NODE_CONST(ni); uint8_t rssi = mn->mn_ai.rsvd1/2; /* XXX */ uint32_t rssi_max; rssi_max = mn->mn_ai.rssi_a; if (mn->mn_ai.rssi_b > rssi_max) rssi_max = mn->mn_ai.rssi_b; if (mn->mn_ai.rssi_c > rssi_max) rssi_max = mn->mn_ai.rssi_c; CVT(mi->ch[0].rssi[0], mn->mn_ai.rssi_a); CVT(mi->ch[1].rssi[0], mn->mn_ai.rssi_b); CVT(mi->ch[2].rssi[0], mn->mn_ai.rssi_c); mi->ch[0].noise[0] = mn->mn_ai.nf_a; mi->ch[1].noise[0] = mn->mn_ai.nf_b; mi->ch[2].noise[0] = mn->mn_ai.nf_c; #undef CVT } static __inline void * mwl_getrxdma(struct mwl_softc *sc) { struct mwl_jumbo *buf; void *data; /* * Allocate from jumbo pool. */ MWL_RXFREE_LOCK(sc); buf = SLIST_FIRST(&sc->sc_rxfree); if (buf == NULL) { DPRINTF(sc, MWL_DEBUG_ANY, "%s: out of rx dma buffers\n", __func__); sc->sc_stats.mst_rx_nodmabuf++; data = NULL; } else { SLIST_REMOVE_HEAD(&sc->sc_rxfree, next); sc->sc_nrxfree--; data = MWL_JUMBO_BUF2DATA(buf); } MWL_RXFREE_UNLOCK(sc); return data; } static __inline void mwl_putrxdma(struct mwl_softc *sc, void *data) { struct mwl_jumbo *buf; /* XXX bounds check data */ MWL_RXFREE_LOCK(sc); buf = MWL_JUMBO_DATA2BUF(data); SLIST_INSERT_HEAD(&sc->sc_rxfree, buf, next); sc->sc_nrxfree++; MWL_RXFREE_UNLOCK(sc); } static int mwl_rxbuf_init(struct mwl_softc *sc, struct mwl_rxbuf *bf) { struct mwl_rxdesc *ds; ds = bf->bf_desc; if (bf->bf_data == NULL) { bf->bf_data = mwl_getrxdma(sc); if (bf->bf_data == NULL) { /* mark descriptor to be skipped */ ds->RxControl = EAGLE_RXD_CTRL_OS_OWN; /* NB: don't need PREREAD */ MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREWRITE); sc->sc_stats.mst_rxbuf_failed++; return ENOMEM; } } /* * NB: DMA buffer contents is known to be unmodified * so there's no need to flush the data cache. */ /* * Setup descriptor. */ ds->QosCtrl = 0; ds->RSSI = 0; ds->Status = EAGLE_RXD_STATUS_IDLE; ds->Channel = 0; ds->PktLen = htole16(MWL_AGGR_SIZE); ds->SQ2 = 0; ds->pPhysBuffData = htole32(MWL_JUMBO_DMA_ADDR(sc, bf->bf_data)); /* NB: don't touch pPhysNext, set once */ ds->RxControl = EAGLE_RXD_CTRL_DRIVER_OWN; MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return 0; } static void mwl_ext_free(struct mbuf *m) { struct mwl_softc *sc = m->m_ext.ext_arg1; /* XXX bounds check data */ mwl_putrxdma(sc, m->m_ext.ext_buf); /* * If we were previously blocked by a lack of rx dma buffers * check if we now have enough to restart rx interrupt handling. * NB: we know we are called at splvm which is above splnet. */ if (sc->sc_rxblocked && sc->sc_nrxfree > mwl_rxdmalow) { sc->sc_rxblocked = 0; mwl_hal_intrset(sc->sc_mh, sc->sc_imask); } } struct mwl_frame_bar { u_int8_t i_fc[2]; u_int8_t i_dur[2]; u_int8_t i_ra[IEEE80211_ADDR_LEN]; u_int8_t i_ta[IEEE80211_ADDR_LEN]; /* ctl, seq, FCS */ } __packed; /* * Like ieee80211_anyhdrsize, but handles BAR frames * specially so the logic below to piece the 802.11 * header together works. */ static __inline int mwl_anyhdrsize(const void *data) { const struct ieee80211_frame *wh = data; if ((wh->i_fc[0]&IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) { switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) { case IEEE80211_FC0_SUBTYPE_CTS: case IEEE80211_FC0_SUBTYPE_ACK: return sizeof(struct ieee80211_frame_ack); case IEEE80211_FC0_SUBTYPE_BAR: return sizeof(struct mwl_frame_bar); } return sizeof(struct ieee80211_frame_min); } else return ieee80211_hdrsize(data); } static void mwl_handlemicerror(struct ieee80211com *ic, const uint8_t *data) { const struct ieee80211_frame *wh; struct ieee80211_node *ni; wh = (const struct ieee80211_frame *)(data + sizeof(uint16_t)); ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh); if (ni != NULL) { ieee80211_notify_michael_failure(ni->ni_vap, wh, 0); ieee80211_free_node(ni); } } /* * Convert hardware signal strength to rssi. The value * provided by the device has the noise floor added in; * we need to compensate for this but we don't have that * so we use a fixed value. * * The offset of 8 is good for both 2.4 and 5GHz. The LNA * offset is already set as part of the initial gain. This * will give at least +/- 3dB for 2.4GHz and +/- 5dB for 5GHz. */ static __inline int cvtrssi(uint8_t ssi) { int rssi = (int) ssi + 8; /* XXX hack guess until we have a real noise floor */ rssi = 2*(87 - rssi); /* NB: .5 dBm units */ return (rssi < 0 ? 0 : rssi > 127 ? 127 : rssi); } static void mwl_rx_proc(void *arg, int npending) { struct epoch_tracker et; struct mwl_softc *sc = arg; struct ieee80211com *ic = &sc->sc_ic; struct mwl_rxbuf *bf; struct mwl_rxdesc *ds; struct mbuf *m; struct ieee80211_qosframe *wh; struct ieee80211_node *ni; struct mwl_node *mn; int off, len, hdrlen, pktlen, rssi, ntodo; uint8_t *data, status; void *newdata; int16_t nf; DPRINTF(sc, MWL_DEBUG_RX_PROC, "%s: pending %u rdptr 0x%x wrptr 0x%x\n", __func__, npending, RD4(sc, sc->sc_hwspecs.rxDescRead), RD4(sc, sc->sc_hwspecs.rxDescWrite)); nf = -96; /* XXX */ bf = sc->sc_rxnext; for (ntodo = mwl_rxquota; ntodo > 0; ntodo--) { if (bf == NULL) bf = STAILQ_FIRST(&sc->sc_rxbuf); ds = bf->bf_desc; data = bf->bf_data; if (data == NULL) { /* * If data allocation failed previously there * will be no buffer; try again to re-populate it. * Note the firmware will not advance to the next * descriptor with a dma buffer so we must mimic * this or we'll get out of sync. */ DPRINTF(sc, MWL_DEBUG_ANY, "%s: rx buf w/o dma memory\n", __func__); (void) mwl_rxbuf_init(sc, bf); sc->sc_stats.mst_rx_dmabufmissing++; break; } MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (ds->RxControl != EAGLE_RXD_CTRL_DMA_OWN) break; #ifdef MWL_DEBUG if (sc->sc_debug & MWL_DEBUG_RECV_DESC) mwl_printrxbuf(bf, 0); #endif status = ds->Status; if (status & EAGLE_RXD_STATUS_DECRYPT_ERR_MASK) { counter_u64_add(ic->ic_ierrors, 1); sc->sc_stats.mst_rx_crypto++; /* * NB: Check EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR * for backwards compatibility. */ if (status != EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR && (status & EAGLE_RXD_STATUS_TKIP_MIC_DECRYPT_ERR)) { /* * MIC error, notify upper layers. */ bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap, BUS_DMASYNC_POSTREAD); mwl_handlemicerror(ic, data); sc->sc_stats.mst_rx_tkipmic++; } /* XXX too painful to tap packets */ goto rx_next; } /* * Sync the data buffer. */ len = le16toh(ds->PktLen); bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap, BUS_DMASYNC_POSTREAD); /* * The 802.11 header is provided all or in part at the front; * use it to calculate the true size of the header that we'll * construct below. We use this to figure out where to copy * payload prior to constructing the header. */ hdrlen = mwl_anyhdrsize(data + sizeof(uint16_t)); off = sizeof(uint16_t) + sizeof(struct ieee80211_frame_addr4); /* calculate rssi early so we can re-use for each aggregate */ rssi = cvtrssi(ds->RSSI); pktlen = hdrlen + (len - off); /* * NB: we know our frame is at least as large as * IEEE80211_MIN_LEN because there is a 4-address * frame at the front. Hence there's no need to * vet the packet length. If the frame in fact * is too small it should be discarded at the * net80211 layer. */ /* * Attach dma buffer to an mbuf. We tried * doing this based on the packet size (i.e. * copying small packets) but it turns out to * be a net loss. The tradeoff might be system * dependent (cache architecture is important). */ MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) { DPRINTF(sc, MWL_DEBUG_ANY, "%s: no rx mbuf\n", __func__); sc->sc_stats.mst_rx_nombuf++; goto rx_next; } /* * Acquire the replacement dma buffer before * processing the frame. If we're out of dma * buffers we disable rx interrupts and wait * for the free pool to reach mlw_rxdmalow buffers * before starting to do work again. If the firmware * runs out of descriptors then it will toss frames * which is better than our doing it as that can * starve our processing. It is also important that * we always process rx'd frames in case they are * A-MPDU as otherwise the host's view of the BA * window may get out of sync with the firmware. */ newdata = mwl_getrxdma(sc); if (newdata == NULL) { /* NB: stat+msg in mwl_getrxdma */ m_free(m); /* disable RX interrupt and mark state */ mwl_hal_intrset(sc->sc_mh, sc->sc_imask &~ MACREG_A2HRIC_BIT_RX_RDY); sc->sc_rxblocked = 1; ieee80211_drain(ic); /* XXX check rxblocked and immediately start again? */ goto rx_stop; } bf->bf_data = newdata; /* * Attach the dma buffer to the mbuf; * mwl_rxbuf_init will re-setup the rx * descriptor using the replacement dma * buffer we just installed above. */ m_extadd(m, data, MWL_AGGR_SIZE, mwl_ext_free, sc, NULL, 0, EXT_NET_DRV); m->m_data += off - hdrlen; m->m_pkthdr.len = m->m_len = pktlen; /* NB: dma buffer assumed read-only */ /* * Piece 802.11 header together. */ wh = mtod(m, struct ieee80211_qosframe *); /* NB: don't need to do this sometimes but ... */ /* XXX special case so we can memcpy after m_devget? */ ovbcopy(data + sizeof(uint16_t), wh, hdrlen); if (IEEE80211_QOS_HAS_SEQ(wh)) *(uint16_t *)ieee80211_getqos(wh) = ds->QosCtrl; /* * The f/w strips WEP header but doesn't clear * the WEP bit; mark the packet with M_WEP so * net80211 will treat the data as decrypted. * While here also clear the PWR_MGT bit since * power save is handled by the firmware and * passing this up will potentially cause the * upper layer to put a station in power save * (except when configured with MWL_HOST_PS_SUPPORT). */ if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) m->m_flags |= M_WEP; #ifdef MWL_HOST_PS_SUPPORT wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED; #else wh->i_fc[1] &= ~(IEEE80211_FC1_PROTECTED | IEEE80211_FC1_PWR_MGT); #endif if (ieee80211_radiotap_active(ic)) { struct mwl_rx_radiotap_header *tap = &sc->sc_rx_th; tap->wr_flags = 0; tap->wr_rate = ds->Rate; tap->wr_antsignal = rssi + nf; tap->wr_antnoise = nf; } if (IFF_DUMPPKTS_RECV(sc, wh)) { ieee80211_dump_pkt(ic, mtod(m, caddr_t), len, ds->Rate, rssi); } /* dispatch */ ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh); NET_EPOCH_ENTER(et); if (ni != NULL) { mn = MWL_NODE(ni); #ifdef MWL_ANT_INFO_SUPPORT mn->mn_ai.rssi_a = ds->ai.rssi_a; mn->mn_ai.rssi_b = ds->ai.rssi_b; mn->mn_ai.rssi_c = ds->ai.rssi_c; mn->mn_ai.rsvd1 = rssi; #endif /* tag AMPDU aggregates for reorder processing */ if (ni->ni_flags & IEEE80211_NODE_HT) m->m_flags |= M_AMPDU; (void) ieee80211_input(ni, m, rssi, nf); ieee80211_free_node(ni); } else (void) ieee80211_input_all(ic, m, rssi, nf); NET_EPOCH_EXIT(et); rx_next: /* NB: ignore ENOMEM so we process more descriptors */ (void) mwl_rxbuf_init(sc, bf); bf = STAILQ_NEXT(bf, bf_list); } rx_stop: sc->sc_rxnext = bf; if (mbufq_first(&sc->sc_snd) != NULL) { /* NB: kick fw; the tx thread may have been preempted */ mwl_hal_txstart(sc->sc_mh, 0); mwl_start(sc); } } static void mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *txq, int qnum) { struct mwl_txbuf *bf, *bn; struct mwl_txdesc *ds; MWL_TXQ_LOCK_INIT(sc, txq); txq->qnum = qnum; txq->txpri = 0; /* XXX */ #if 0 /* NB: q setup by mwl_txdma_setup XXX */ STAILQ_INIT(&txq->free); #endif STAILQ_FOREACH(bf, &txq->free, bf_list) { bf->bf_txq = txq; ds = bf->bf_desc; bn = STAILQ_NEXT(bf, bf_list); if (bn == NULL) bn = STAILQ_FIRST(&txq->free); ds->pPhysNext = htole32(bn->bf_daddr); } STAILQ_INIT(&txq->active); } /* * Setup a hardware data transmit queue for the specified * access control. We record the mapping from ac's * to h/w queues for use by mwl_tx_start. */ static int mwl_tx_setup(struct mwl_softc *sc, int ac, int mvtype) { struct mwl_txq *txq; if (ac >= nitems(sc->sc_ac2q)) { device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n", ac, nitems(sc->sc_ac2q)); return 0; } if (mvtype >= MWL_NUM_TX_QUEUES) { device_printf(sc->sc_dev, "mvtype %u out of range, max %u!\n", mvtype, MWL_NUM_TX_QUEUES); return 0; } txq = &sc->sc_txq[mvtype]; mwl_txq_init(sc, txq, mvtype); sc->sc_ac2q[ac] = txq; return 1; } /* * Update WME parameters for a transmit queue. */ static int mwl_txq_update(struct mwl_softc *sc, int ac) { #define MWL_EXPONENT_TO_VALUE(v) ((1<sc_ic; struct chanAccParams chp; struct mwl_txq *txq = sc->sc_ac2q[ac]; struct wmeParams *wmep; struct mwl_hal *mh = sc->sc_mh; int aifs, cwmin, cwmax, txoplim; ieee80211_wme_ic_getparams(ic, &chp); wmep = &chp.cap_wmeParams[ac]; aifs = wmep->wmep_aifsn; /* XXX in sta mode need to pass log values for cwmin/max */ cwmin = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); cwmax = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); txoplim = wmep->wmep_txopLimit; /* NB: units of 32us */ if (mwl_hal_setedcaparams(mh, txq->qnum, cwmin, cwmax, aifs, txoplim)) { device_printf(sc->sc_dev, "unable to update hardware queue " "parameters for %s traffic!\n", ieee80211_wme_acnames[ac]); return 0; } return 1; #undef MWL_EXPONENT_TO_VALUE } /* * Callback from the 802.11 layer to update WME parameters. */ static int mwl_wme_update(struct ieee80211com *ic) { struct mwl_softc *sc = ic->ic_softc; return !mwl_txq_update(sc, WME_AC_BE) || !mwl_txq_update(sc, WME_AC_BK) || !mwl_txq_update(sc, WME_AC_VI) || !mwl_txq_update(sc, WME_AC_VO) ? EIO : 0; } /* * Reclaim resources for a setup queue. */ static void mwl_tx_cleanupq(struct mwl_softc *sc, struct mwl_txq *txq) { /* XXX hal work? */ MWL_TXQ_LOCK_DESTROY(txq); } /* * Reclaim all tx queue resources. */ static void mwl_tx_cleanup(struct mwl_softc *sc) { int i; for (i = 0; i < MWL_NUM_TX_QUEUES; i++) mwl_tx_cleanupq(sc, &sc->sc_txq[i]); } static int mwl_tx_dmasetup(struct mwl_softc *sc, struct mwl_txbuf *bf, struct mbuf *m0) { struct mbuf *m; int error; /* * Load the DMA map so any coalescing is done. This * also calculates the number of descriptors we need. */ error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); if (error == EFBIG) { /* XXX packet requires too many descriptors */ bf->bf_nseg = MWL_TXDESC+1; } else if (error != 0) { sc->sc_stats.mst_tx_busdma++; m_freem(m0); return error; } /* * Discard null packets and check for packets that * require too many TX descriptors. We try to convert * the latter to a cluster. */ if (error == EFBIG) { /* too many desc's, linearize */ sc->sc_stats.mst_tx_linear++; #if MWL_TXDESC > 1 m = m_collapse(m0, M_NOWAIT, MWL_TXDESC); #else m = m_defrag(m0, M_NOWAIT); #endif if (m == NULL) { m_freem(m0); sc->sc_stats.mst_tx_nombuf++; return ENOMEM; } m0 = m; error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); if (error != 0) { sc->sc_stats.mst_tx_busdma++; m_freem(m0); return error; } KASSERT(bf->bf_nseg <= MWL_TXDESC, ("too many segments after defrag; nseg %u", bf->bf_nseg)); } else if (bf->bf_nseg == 0) { /* null packet, discard */ sc->sc_stats.mst_tx_nodata++; m_freem(m0); return EIO; } DPRINTF(sc, MWL_DEBUG_XMIT, "%s: m %p len %u\n", __func__, m0, m0->m_pkthdr.len); bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); bf->bf_m = m0; return 0; } static __inline int mwl_cvtlegacyrate(int rate) { switch (rate) { case 2: return 0; case 4: return 1; case 11: return 2; case 22: return 3; case 44: return 4; case 12: return 5; case 18: return 6; case 24: return 7; case 36: return 8; case 48: return 9; case 72: return 10; case 96: return 11; case 108:return 12; } return 0; } /* * Calculate fixed tx rate information per client state; * this value is suitable for writing to the Format field * of a tx descriptor. */ static uint16_t mwl_calcformat(uint8_t rate, const struct ieee80211_node *ni) { uint16_t fmt; fmt = SM(3, EAGLE_TXD_ANTENNA) | (IEEE80211_IS_CHAN_HT40D(ni->ni_chan) ? EAGLE_TXD_EXTCHAN_LO : EAGLE_TXD_EXTCHAN_HI); if (rate & IEEE80211_RATE_MCS) { /* HT MCS */ fmt |= EAGLE_TXD_FORMAT_HT /* NB: 0x80 implicitly stripped from ucastrate */ | SM(rate, EAGLE_TXD_RATE); /* XXX short/long GI may be wrong; re-check */ if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) { fmt |= EAGLE_TXD_CHW_40 | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40 ? EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG); } else { fmt |= EAGLE_TXD_CHW_20 | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20 ? EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG); } } else { /* legacy rate */ fmt |= EAGLE_TXD_FORMAT_LEGACY | SM(mwl_cvtlegacyrate(rate), EAGLE_TXD_RATE) | EAGLE_TXD_CHW_20 /* XXX iv_flags & IEEE80211_F_SHPREAMBLE? */ | (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE ? EAGLE_TXD_PREAMBLE_SHORT : EAGLE_TXD_PREAMBLE_LONG); } return fmt; } static int mwl_tx_start(struct mwl_softc *sc, struct ieee80211_node *ni, struct mwl_txbuf *bf, struct mbuf *m0) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = ni->ni_vap; int error, iswep, ismcast; int hdrlen, copyhdrlen, pktlen; struct mwl_txdesc *ds; struct mwl_txq *txq; struct ieee80211_frame *wh; struct mwltxrec *tr; struct mwl_node *mn; uint16_t qos; #if MWL_TXDESC > 1 int i; #endif wh = mtod(m0, struct ieee80211_frame *); iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED; ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); hdrlen = ieee80211_anyhdrsize(wh); copyhdrlen = hdrlen; pktlen = m0->m_pkthdr.len; if (IEEE80211_QOS_HAS_SEQ(wh)) { qos = *(uint16_t *)ieee80211_getqos(wh); if (IEEE80211_IS_DSTODS(wh)) copyhdrlen -= sizeof(qos); } else qos = 0; if (iswep) { const struct ieee80211_cipher *cip; struct ieee80211_key *k; /* * Construct the 802.11 header+trailer for an encrypted * frame. The only reason this can fail is because of an * unknown or unsupported cipher/key type. * * NB: we do this even though the firmware will ignore * what we've done for WEP and TKIP as we need the * ExtIV filled in for CCMP and this also adjusts * the headers which simplifies our work below. */ k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { /* * This can happen when the key is yanked after the * frame was queued. Just discard the frame; the * 802.11 layer counts failures and provides * debugging/diagnostics. */ m_freem(m0); return EIO; } /* * Adjust the packet length for the crypto additions * done during encap and any other bits that the f/w * will add later on. */ cip = k->wk_cipher; pktlen += cip->ic_header + cip->ic_miclen + cip->ic_trailer; /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } if (ieee80211_radiotap_active_vap(vap)) { sc->sc_tx_th.wt_flags = 0; /* XXX */ if (iswep) sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; #if 0 sc->sc_tx_th.wt_rate = ds->DataRate; #endif sc->sc_tx_th.wt_txpower = ni->ni_txpower; sc->sc_tx_th.wt_antenna = sc->sc_txantenna; ieee80211_radiotap_tx(vap, m0); } /* * Copy up/down the 802.11 header; the firmware requires * we present a 2-byte payload length followed by a * 4-address header (w/o QoS), followed (optionally) by * any WEP/ExtIV header (but only filled in for CCMP). * We are assured the mbuf has sufficient headroom to * prepend in-place by the setup of ic_headroom in * mwl_attach. */ if (hdrlen < sizeof(struct mwltxrec)) { const int space = sizeof(struct mwltxrec) - hdrlen; if (M_LEADINGSPACE(m0) < space) { /* NB: should never happen */ device_printf(sc->sc_dev, "not enough headroom, need %d found %zd, " "m_flags 0x%x m_len %d\n", space, M_LEADINGSPACE(m0), m0->m_flags, m0->m_len); ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len, 0, -1); m_freem(m0); sc->sc_stats.mst_tx_noheadroom++; return EIO; } M_PREPEND(m0, space, M_NOWAIT); } tr = mtod(m0, struct mwltxrec *); if (wh != (struct ieee80211_frame *) &tr->wh) ovbcopy(wh, &tr->wh, hdrlen); /* * Note: the "firmware length" is actually the length * of the fully formed "802.11 payload". That is, it's * everything except for the 802.11 header. In particular * this includes all crypto material including the MIC! */ tr->fwlen = htole16(pktlen - hdrlen); /* * Load the DMA map so any coalescing is done. This * also calculates the number of descriptors we need. */ error = mwl_tx_dmasetup(sc, bf, m0); if (error != 0) { /* NB: stat collected in mwl_tx_dmasetup */ DPRINTF(sc, MWL_DEBUG_XMIT, "%s: unable to setup dma\n", __func__); return error; } bf->bf_node = ni; /* NB: held reference */ m0 = bf->bf_m; /* NB: may have changed */ tr = mtod(m0, struct mwltxrec *); wh = (struct ieee80211_frame *)&tr->wh; /* * Formulate tx descriptor. */ ds = bf->bf_desc; txq = bf->bf_txq; ds->QosCtrl = qos; /* NB: already little-endian */ #if MWL_TXDESC == 1 /* * NB: multiframes should be zero because the descriptors * are initialized to zero. This should handle the case * where the driver is built with MWL_TXDESC=1 but we are * using firmware with multi-segment support. */ ds->PktPtr = htole32(bf->bf_segs[0].ds_addr); ds->PktLen = htole16(bf->bf_segs[0].ds_len); #else ds->multiframes = htole32(bf->bf_nseg); ds->PktLen = htole16(m0->m_pkthdr.len); for (i = 0; i < bf->bf_nseg; i++) { ds->PktPtrArray[i] = htole32(bf->bf_segs[i].ds_addr); ds->PktLenArray[i] = htole16(bf->bf_segs[i].ds_len); } #endif /* NB: pPhysNext, DataRate, and SapPktInfo setup once, don't touch */ ds->Format = 0; ds->pad = 0; ds->ack_wcb_addr = 0; mn = MWL_NODE(ni); /* * Select transmit rate. */ switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { case IEEE80211_FC0_TYPE_MGT: sc->sc_stats.mst_tx_mgmt++; /* fall thru... */ case IEEE80211_FC0_TYPE_CTL: /* NB: assign to BE q to avoid bursting */ ds->TxPriority = MWL_WME_AC_BE; break; case IEEE80211_FC0_TYPE_DATA: if (!ismcast) { const struct ieee80211_txparam *tp = ni->ni_txparms; /* * EAPOL frames get forced to a fixed rate and w/o * aggregation; otherwise check for any fixed rate * for the client (may depend on association state). */ if (m0->m_flags & M_EAPOL) { const struct mwl_vap *mvp = MWL_VAP_CONST(vap); ds->Format = mvp->mv_eapolformat; ds->pad = htole16( EAGLE_TXD_FIXED_RATE | EAGLE_TXD_DONT_AGGR); } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) { /* XXX pre-calculate per node */ ds->Format = htole16( mwl_calcformat(tp->ucastrate, ni)); ds->pad = htole16(EAGLE_TXD_FIXED_RATE); } /* NB: EAPOL frames will never have qos set */ if (qos == 0) ds->TxPriority = txq->qnum; #if MWL_MAXBA > 3 else if (mwl_bastream_match(&mn->mn_ba[3], qos)) ds->TxPriority = mn->mn_ba[3].txq; #endif #if MWL_MAXBA > 2 else if (mwl_bastream_match(&mn->mn_ba[2], qos)) ds->TxPriority = mn->mn_ba[2].txq; #endif #if MWL_MAXBA > 1 else if (mwl_bastream_match(&mn->mn_ba[1], qos)) ds->TxPriority = mn->mn_ba[1].txq; #endif #if MWL_MAXBA > 0 else if (mwl_bastream_match(&mn->mn_ba[0], qos)) ds->TxPriority = mn->mn_ba[0].txq; #endif else ds->TxPriority = txq->qnum; } else ds->TxPriority = txq->qnum; break; default: device_printf(sc->sc_dev, "bogus frame type 0x%x (%s)\n", wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__); sc->sc_stats.mst_tx_badframetype++; m_freem(m0); return EIO; } if (IFF_DUMPPKTS_XMIT(sc)) ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *)+sizeof(uint16_t), m0->m_len - sizeof(uint16_t), ds->DataRate, -1); MWL_TXQ_LOCK(txq); ds->Status = htole32(EAGLE_TXD_STATUS_FW_OWNED); STAILQ_INSERT_TAIL(&txq->active, bf, bf_list); MWL_TXDESC_SYNC(txq, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); sc->sc_tx_timer = 5; MWL_TXQ_UNLOCK(txq); return 0; } static __inline int mwl_cvtlegacyrix(int rix) { static const int ieeerates[] = { 2, 4, 11, 22, 44, 12, 18, 24, 36, 48, 72, 96, 108 }; return (rix < nitems(ieeerates) ? ieeerates[rix] : 0); } /* * Process completed xmit descriptors from the specified queue. */ static int mwl_tx_processq(struct mwl_softc *sc, struct mwl_txq *txq) { #define EAGLE_TXD_STATUS_MCAST \ (EAGLE_TXD_STATUS_MULTICAST_TX | EAGLE_TXD_STATUS_BROADCAST_TX) struct ieee80211com *ic = &sc->sc_ic; struct mwl_txbuf *bf; struct mwl_txdesc *ds; struct ieee80211_node *ni; struct mwl_node *an; int nreaped; uint32_t status; DPRINTF(sc, MWL_DEBUG_TX_PROC, "%s: tx queue %u\n", __func__, txq->qnum); for (nreaped = 0;; nreaped++) { MWL_TXQ_LOCK(txq); bf = STAILQ_FIRST(&txq->active); if (bf == NULL) { MWL_TXQ_UNLOCK(txq); break; } ds = bf->bf_desc; MWL_TXDESC_SYNC(txq, ds, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (ds->Status & htole32(EAGLE_TXD_STATUS_FW_OWNED)) { MWL_TXQ_UNLOCK(txq); break; } STAILQ_REMOVE_HEAD(&txq->active, bf_list); MWL_TXQ_UNLOCK(txq); #ifdef MWL_DEBUG if (sc->sc_debug & MWL_DEBUG_XMIT_DESC) mwl_printtxbuf(bf, txq->qnum, nreaped); #endif ni = bf->bf_node; if (ni != NULL) { an = MWL_NODE(ni); status = le32toh(ds->Status); if (status & EAGLE_TXD_STATUS_OK) { uint16_t Format = le16toh(ds->Format); uint8_t txant = MS(Format, EAGLE_TXD_ANTENNA); sc->sc_stats.mst_ant_tx[txant]++; if (status & EAGLE_TXD_STATUS_OK_RETRY) sc->sc_stats.mst_tx_retries++; if (status & EAGLE_TXD_STATUS_OK_MORE_RETRY) sc->sc_stats.mst_tx_mretries++; if (txq->qnum >= MWL_WME_AC_VO) ic->ic_wme.wme_hipri_traffic++; ni->ni_txrate = MS(Format, EAGLE_TXD_RATE); if ((Format & EAGLE_TXD_FORMAT_HT) == 0) { ni->ni_txrate = mwl_cvtlegacyrix( ni->ni_txrate); } else ni->ni_txrate |= IEEE80211_RATE_MCS; sc->sc_stats.mst_tx_rate = ni->ni_txrate; } else { if (status & EAGLE_TXD_STATUS_FAILED_LINK_ERROR) sc->sc_stats.mst_tx_linkerror++; if (status & EAGLE_TXD_STATUS_FAILED_XRETRY) sc->sc_stats.mst_tx_xretries++; if (status & EAGLE_TXD_STATUS_FAILED_AGING) sc->sc_stats.mst_tx_aging++; if (bf->bf_m->m_flags & M_FF) sc->sc_stats.mst_ff_txerr++; } if (bf->bf_m->m_flags & M_TXCB) /* XXX strip fw len in case header inspected */ m_adj(bf->bf_m, sizeof(uint16_t)); ieee80211_tx_complete(ni, bf->bf_m, (status & EAGLE_TXD_STATUS_OK) == 0); } else m_freem(bf->bf_m); ds->Status = htole32(EAGLE_TXD_STATUS_IDLE); bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); mwl_puttxbuf_tail(txq, bf); } return nreaped; #undef EAGLE_TXD_STATUS_MCAST } /* * Deferred processing of transmit interrupt; special-cased * for four hardware queues, 0-3. */ static void mwl_tx_proc(void *arg, int npending) { struct mwl_softc *sc = arg; int nreaped; /* * Process each active queue. */ nreaped = 0; if (!STAILQ_EMPTY(&sc->sc_txq[0].active)) nreaped += mwl_tx_processq(sc, &sc->sc_txq[0]); if (!STAILQ_EMPTY(&sc->sc_txq[1].active)) nreaped += mwl_tx_processq(sc, &sc->sc_txq[1]); if (!STAILQ_EMPTY(&sc->sc_txq[2].active)) nreaped += mwl_tx_processq(sc, &sc->sc_txq[2]); if (!STAILQ_EMPTY(&sc->sc_txq[3].active)) nreaped += mwl_tx_processq(sc, &sc->sc_txq[3]); if (nreaped != 0) { sc->sc_tx_timer = 0; if (mbufq_first(&sc->sc_snd) != NULL) { /* NB: kick fw; the tx thread may have been preempted */ mwl_hal_txstart(sc->sc_mh, 0); mwl_start(sc); } } } static void mwl_tx_draintxq(struct mwl_softc *sc, struct mwl_txq *txq) { struct ieee80211_node *ni; struct mwl_txbuf *bf; u_int ix; /* * NB: this assumes output has been stopped and * we do not need to block mwl_tx_tasklet */ for (ix = 0;; ix++) { MWL_TXQ_LOCK(txq); bf = STAILQ_FIRST(&txq->active); if (bf == NULL) { MWL_TXQ_UNLOCK(txq); break; } STAILQ_REMOVE_HEAD(&txq->active, bf_list); MWL_TXQ_UNLOCK(txq); #ifdef MWL_DEBUG if (sc->sc_debug & MWL_DEBUG_RESET) { struct ieee80211com *ic = &sc->sc_ic; const struct mwltxrec *tr = mtod(bf->bf_m, const struct mwltxrec *); mwl_printtxbuf(bf, txq->qnum, ix); ieee80211_dump_pkt(ic, (const uint8_t *)&tr->wh, bf->bf_m->m_len - sizeof(tr->fwlen), 0, -1); } #endif /* MWL_DEBUG */ bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); ni = bf->bf_node; if (ni != NULL) { /* * Reclaim node reference. */ ieee80211_free_node(ni); } m_freem(bf->bf_m); mwl_puttxbuf_tail(txq, bf); } } /* * Drain the transmit queues and reclaim resources. */ static void mwl_draintxq(struct mwl_softc *sc) { int i; for (i = 0; i < MWL_NUM_TX_QUEUES; i++) mwl_tx_draintxq(sc, &sc->sc_txq[i]); sc->sc_tx_timer = 0; } #ifdef MWL_DIAGAPI /* * Reset the transmit queues to a pristine state after a fw download. */ static void mwl_resettxq(struct mwl_softc *sc) { int i; for (i = 0; i < MWL_NUM_TX_QUEUES; i++) mwl_txq_reset(sc, &sc->sc_txq[i]); } #endif /* MWL_DIAGAPI */ /* * Clear the transmit queues of any frames submitted for the * specified vap. This is done when the vap is deleted so we * don't potentially reference the vap after it is gone. * Note we cannot remove the frames; we only reclaim the node * reference. */ static void mwl_cleartxq(struct mwl_softc *sc, struct ieee80211vap *vap) { struct mwl_txq *txq; struct mwl_txbuf *bf; int i; for (i = 0; i < MWL_NUM_TX_QUEUES; i++) { txq = &sc->sc_txq[i]; MWL_TXQ_LOCK(txq); STAILQ_FOREACH(bf, &txq->active, bf_list) { struct ieee80211_node *ni = bf->bf_node; if (ni != NULL && ni->ni_vap == vap) { bf->bf_node = NULL; ieee80211_free_node(ni); } } MWL_TXQ_UNLOCK(txq); } } static int mwl_recv_action(struct ieee80211_node *ni, const struct ieee80211_frame *wh, const uint8_t *frm, const uint8_t *efrm) { struct mwl_softc *sc = ni->ni_ic->ic_softc; const struct ieee80211_action *ia; ia = (const struct ieee80211_action *) frm; if (ia->ia_category == IEEE80211_ACTION_CAT_HT && ia->ia_action == IEEE80211_ACTION_HT_MIMOPWRSAVE) { const struct ieee80211_action_ht_mimopowersave *mps = (const struct ieee80211_action_ht_mimopowersave *) ia; mwl_hal_setmimops(sc->sc_mh, ni->ni_macaddr, mps->am_control & IEEE80211_A_HT_MIMOPWRSAVE_ENA, MS(mps->am_control, IEEE80211_A_HT_MIMOPWRSAVE_MODE)); return 0; } else return sc->sc_recv_action(ni, wh, frm, efrm); } static int mwl_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, int dialogtoken, int baparamset, int batimeout) { struct mwl_softc *sc = ni->ni_ic->ic_softc; struct ieee80211vap *vap = ni->ni_vap; struct mwl_node *mn = MWL_NODE(ni); struct mwl_bastate *bas; bas = tap->txa_private; if (bas == NULL) { const MWL_HAL_BASTREAM *sp; /* * Check for a free BA stream slot. */ #if MWL_MAXBA > 3 if (mn->mn_ba[3].bastream == NULL) bas = &mn->mn_ba[3]; else #endif #if MWL_MAXBA > 2 if (mn->mn_ba[2].bastream == NULL) bas = &mn->mn_ba[2]; else #endif #if MWL_MAXBA > 1 if (mn->mn_ba[1].bastream == NULL) bas = &mn->mn_ba[1]; else #endif #if MWL_MAXBA > 0 if (mn->mn_ba[0].bastream == NULL) bas = &mn->mn_ba[0]; else #endif { /* sta already has max BA streams */ /* XXX assign BA stream to highest priority tid */ DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: already has max bastreams\n", __func__); sc->sc_stats.mst_ampdu_reject++; return 0; } /* NB: no held reference to ni */ sp = mwl_hal_bastream_alloc(MWL_VAP(vap)->mv_hvap, (baparamset & IEEE80211_BAPS_POLICY_IMMEDIATE) != 0, ni->ni_macaddr, tap->txa_tid, ni->ni_htparam, ni, tap); if (sp == NULL) { /* * No available stream, return 0 so no * a-mpdu aggregation will be done. */ DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: no bastream available\n", __func__); sc->sc_stats.mst_ampdu_nostream++; return 0; } DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: alloc bastream %p\n", __func__, sp); /* NB: qos is left zero so we won't match in mwl_tx_start */ bas->bastream = sp; tap->txa_private = bas; } /* fetch current seq# from the firmware; if available */ if (mwl_hal_bastream_get_seqno(sc->sc_mh, bas->bastream, vap->iv_opmode == IEEE80211_M_STA ? vap->iv_myaddr : ni->ni_macaddr, &tap->txa_start) != 0) tap->txa_start = 0; return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, batimeout); } static int mwl_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, int code, int baparamset, int batimeout) { struct mwl_softc *sc = ni->ni_ic->ic_softc; struct mwl_bastate *bas; bas = tap->txa_private; if (bas == NULL) { /* XXX should not happen */ DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: no BA stream allocated, TID %d\n", __func__, tap->txa_tid); sc->sc_stats.mst_addba_nostream++; return 0; } if (code == IEEE80211_STATUS_SUCCESS) { struct ieee80211vap *vap = ni->ni_vap; int bufsiz, error; /* * Tell the firmware to setup the BA stream; * we know resources are available because we * pre-allocated one before forming the request. */ bufsiz = MS(baparamset, IEEE80211_BAPS_BUFSIZ); if (bufsiz == 0) bufsiz = IEEE80211_AGGR_BAWMAX; error = mwl_hal_bastream_create(MWL_VAP(vap)->mv_hvap, bas->bastream, bufsiz, bufsiz, tap->txa_start); if (error != 0) { /* * Setup failed, return immediately so no a-mpdu * aggregation will be done. */ mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream); mwl_bastream_free(bas); tap->txa_private = NULL; DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: create failed, error %d, bufsiz %d TID %d " "htparam 0x%x\n", __func__, error, bufsiz, tap->txa_tid, ni->ni_htparam); sc->sc_stats.mst_bacreate_failed++; return 0; } /* NB: cache txq to avoid ptr indirect */ mwl_bastream_setup(bas, tap->txa_tid, bas->bastream->txq); DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: bastream %p assigned to txq %d TID %d bufsiz %d " "htparam 0x%x\n", __func__, bas->bastream, bas->txq, tap->txa_tid, bufsiz, ni->ni_htparam); } else { /* * Other side NAK'd us; return the resources. */ DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: request failed with code %d, destroy bastream %p\n", __func__, code, bas->bastream); mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream); mwl_bastream_free(bas); tap->txa_private = NULL; } /* NB: firmware sends BAR so we don't need to */ return sc->sc_addba_response(ni, tap, code, baparamset, batimeout); } static void mwl_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) { struct mwl_softc *sc = ni->ni_ic->ic_softc; struct mwl_bastate *bas; bas = tap->txa_private; if (bas != NULL) { DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: destroy bastream %p\n", __func__, bas->bastream); mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream); mwl_bastream_free(bas); tap->txa_private = NULL; } sc->sc_addba_stop(ni, tap); } /* * Setup the rx data structures. This should only be * done once or we may get out of sync with the firmware. */ static int mwl_startrecv(struct mwl_softc *sc) { if (!sc->sc_recvsetup) { struct mwl_rxbuf *bf, *prev; struct mwl_rxdesc *ds; prev = NULL; STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { int error = mwl_rxbuf_init(sc, bf); if (error != 0) { DPRINTF(sc, MWL_DEBUG_RECV, "%s: mwl_rxbuf_init failed %d\n", __func__, error); return error; } if (prev != NULL) { ds = prev->bf_desc; ds->pPhysNext = htole32(bf->bf_daddr); } prev = bf; } if (prev != NULL) { ds = prev->bf_desc; ds->pPhysNext = htole32(STAILQ_FIRST(&sc->sc_rxbuf)->bf_daddr); } sc->sc_recvsetup = 1; } mwl_mode_init(sc); /* set filters, etc. */ return 0; } static MWL_HAL_APMODE mwl_getapmode(const struct ieee80211vap *vap, struct ieee80211_channel *chan) { MWL_HAL_APMODE mode; if (IEEE80211_IS_CHAN_HT(chan)) { if (vap->iv_flags_ht & IEEE80211_FHT_PUREN) mode = AP_MODE_N_ONLY; else if (IEEE80211_IS_CHAN_5GHZ(chan)) mode = AP_MODE_AandN; else if (vap->iv_flags & IEEE80211_F_PUREG) mode = AP_MODE_GandN; else mode = AP_MODE_BandGandN; } else if (IEEE80211_IS_CHAN_ANYG(chan)) { if (vap->iv_flags & IEEE80211_F_PUREG) mode = AP_MODE_G_ONLY; else mode = AP_MODE_MIXED; } else if (IEEE80211_IS_CHAN_B(chan)) mode = AP_MODE_B_ONLY; else if (IEEE80211_IS_CHAN_A(chan)) mode = AP_MODE_A_ONLY; else mode = AP_MODE_MIXED; /* XXX should not happen? */ return mode; } static int mwl_setapmode(struct ieee80211vap *vap, struct ieee80211_channel *chan) { struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap; return mwl_hal_setapmode(hvap, mwl_getapmode(vap, chan)); } /* * Set/change channels. */ static int mwl_chan_set(struct mwl_softc *sc, struct ieee80211_channel *chan) { struct mwl_hal *mh = sc->sc_mh; struct ieee80211com *ic = &sc->sc_ic; MWL_HAL_CHANNEL hchan; int maxtxpow; DPRINTF(sc, MWL_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x\n", __func__, chan->ic_freq, chan->ic_flags); /* * Convert to a HAL channel description with * the flags constrained to reflect the current * operating mode. */ mwl_mapchan(&hchan, chan); mwl_hal_intrset(mh, 0); /* disable interrupts */ #if 0 mwl_draintxq(sc); /* clear pending tx frames */ #endif mwl_hal_setchannel(mh, &hchan); /* * Tx power is cap'd by the regulatory setting and * possibly a user-set limit. We pass the min of * these to the hal to apply them to the cal data * for this channel. * XXX min bound? */ maxtxpow = 2*chan->ic_maxregpower; if (maxtxpow > ic->ic_txpowlimit) maxtxpow = ic->ic_txpowlimit; mwl_hal_settxpower(mh, &hchan, maxtxpow / 2); /* NB: potentially change mcast/mgt rates */ mwl_setcurchanrates(sc); /* * Update internal state. */ sc->sc_tx_th.wt_chan_freq = htole16(chan->ic_freq); sc->sc_rx_th.wr_chan_freq = htole16(chan->ic_freq); if (IEEE80211_IS_CHAN_A(chan)) { sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_A); sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_A); } else if (IEEE80211_IS_CHAN_ANYG(chan)) { sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_G); sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_G); } else { sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_B); sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_B); } sc->sc_curchan = hchan; mwl_hal_intrset(mh, sc->sc_imask); return 0; } static void mwl_scan_start(struct ieee80211com *ic) { struct mwl_softc *sc = ic->ic_softc; DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__); } static void mwl_scan_end(struct ieee80211com *ic) { struct mwl_softc *sc = ic->ic_softc; DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__); } static void mwl_set_channel(struct ieee80211com *ic) { struct mwl_softc *sc = ic->ic_softc; (void) mwl_chan_set(sc, ic->ic_curchan); } /* * Handle a channel switch request. We inform the firmware * and mark the global state to suppress various actions. * NB: we issue only one request to the fw; we may be called * multiple times if there are multiple vap's. */ static void mwl_startcsa(struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; struct mwl_softc *sc = ic->ic_softc; MWL_HAL_CHANNEL hchan; if (sc->sc_csapending) return; mwl_mapchan(&hchan, ic->ic_csa_newchan); /* 1 =>'s quiet channel */ mwl_hal_setchannelswitchie(sc->sc_mh, &hchan, 1, ic->ic_csa_count); sc->sc_csapending = 1; } /* * Plumb any static WEP key for the station. This is * necessary as we must propagate the key from the * global key table of the vap to each sta db entry. */ static void mwl_setanywepkey(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) { if ((vap->iv_flags & (IEEE80211_F_PRIVACY|IEEE80211_F_WPA)) == IEEE80211_F_PRIVACY && vap->iv_def_txkey != IEEE80211_KEYIX_NONE && vap->iv_nw_keys[vap->iv_def_txkey].wk_keyix != IEEE80211_KEYIX_NONE) (void) _mwl_key_set(vap, &vap->iv_nw_keys[vap->iv_def_txkey], mac); } static int mwl_peerstadb(struct ieee80211_node *ni, int aid, int staid, MWL_HAL_PEERINFO *pi) { #define WME(ie) ((const struct ieee80211_wme_info *) ie) struct ieee80211vap *vap = ni->ni_vap; struct mwl_hal_vap *hvap; int error; if (vap->iv_opmode == IEEE80211_M_WDS) { /* * WDS vap's do not have a f/w vap; instead they piggyback * on an AP vap and we must install the sta db entry and * crypto state using that AP's handle (the WDS vap has none). */ hvap = MWL_VAP(vap)->mv_ap_hvap; } else hvap = MWL_VAP(vap)->mv_hvap; error = mwl_hal_newstation(hvap, ni->ni_macaddr, aid, staid, pi, ni->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT), ni->ni_ies.wme_ie != NULL ? WME(ni->ni_ies.wme_ie)->wme_info : 0); if (error == 0) { /* * Setup security for this station. For sta mode this is * needed even though do the same thing on transition to * AUTH state because the call to mwl_hal_newstation * clobbers the crypto state we setup. */ mwl_setanywepkey(vap, ni->ni_macaddr); } return error; #undef WME } static void mwl_setglobalkeys(struct ieee80211vap *vap) { struct ieee80211_key *wk; wk = &vap->iv_nw_keys[0]; for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID]; wk++) if (wk->wk_keyix != IEEE80211_KEYIX_NONE) (void) _mwl_key_set(vap, wk, vap->iv_myaddr); } /* * Convert a legacy rate set to a firmware bitmask. */ static uint32_t get_rate_bitmap(const struct ieee80211_rateset *rs) { uint32_t rates; int i; rates = 0; for (i = 0; i < rs->rs_nrates; i++) switch (rs->rs_rates[i] & IEEE80211_RATE_VAL) { case 2: rates |= 0x001; break; case 4: rates |= 0x002; break; case 11: rates |= 0x004; break; case 22: rates |= 0x008; break; case 44: rates |= 0x010; break; case 12: rates |= 0x020; break; case 18: rates |= 0x040; break; case 24: rates |= 0x080; break; case 36: rates |= 0x100; break; case 48: rates |= 0x200; break; case 72: rates |= 0x400; break; case 96: rates |= 0x800; break; case 108: rates |= 0x1000; break; } return rates; } /* * Construct an HT firmware bitmask from an HT rate set. */ static uint32_t get_htrate_bitmap(const struct ieee80211_htrateset *rs) { uint32_t rates; int i; rates = 0; for (i = 0; i < rs->rs_nrates; i++) { if (rs->rs_rates[i] < 16) rates |= 1<rs_rates[i]; } return rates; } /* * Craft station database entry for station. * NB: use host byte order here, the hal handles byte swapping. */ static MWL_HAL_PEERINFO * mkpeerinfo(MWL_HAL_PEERINFO *pi, const struct ieee80211_node *ni) { const struct ieee80211vap *vap = ni->ni_vap; memset(pi, 0, sizeof(*pi)); pi->LegacyRateBitMap = get_rate_bitmap(&ni->ni_rates); pi->CapInfo = ni->ni_capinfo; if (ni->ni_flags & IEEE80211_NODE_HT) { /* HT capabilities, etc */ pi->HTCapabilitiesInfo = ni->ni_htcap; /* XXX pi.HTCapabilitiesInfo */ pi->MacHTParamInfo = ni->ni_htparam; pi->HTRateBitMap = get_htrate_bitmap(&ni->ni_htrates); pi->AddHtInfo.ControlChan = ni->ni_htctlchan; pi->AddHtInfo.AddChan = ni->ni_ht2ndchan; pi->AddHtInfo.OpMode = ni->ni_htopmode; pi->AddHtInfo.stbc = ni->ni_htstbc; /* constrain according to local configuration */ if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40) == 0) pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI40; if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) == 0) pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI20; if (ni->ni_chw != 40) pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_CHWIDTH40; } return pi; } /* * Re-create the local sta db entry for a vap to ensure * up to date WME state is pushed to the firmware. Because * this resets crypto state this must be followed by a * reload of any keys in the global key table. */ static int mwl_localstadb(struct ieee80211vap *vap) { #define WME(ie) ((const struct ieee80211_wme_info *) ie) struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap; struct ieee80211_node *bss; MWL_HAL_PEERINFO pi; int error; switch (vap->iv_opmode) { case IEEE80211_M_STA: bss = vap->iv_bss; error = mwl_hal_newstation(hvap, vap->iv_myaddr, 0, 0, vap->iv_state == IEEE80211_S_RUN ? mkpeerinfo(&pi, bss) : NULL, (bss->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT)), bss->ni_ies.wme_ie != NULL ? WME(bss->ni_ies.wme_ie)->wme_info : 0); if (error == 0) mwl_setglobalkeys(vap); break; case IEEE80211_M_HOSTAP: case IEEE80211_M_MBSS: error = mwl_hal_newstation(hvap, vap->iv_myaddr, 0, 0, NULL, vap->iv_flags & IEEE80211_F_WME, 0); if (error == 0) mwl_setglobalkeys(vap); break; default: error = 0; break; } return error; #undef WME } static int mwl_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct mwl_vap *mvp = MWL_VAP(vap); struct mwl_hal_vap *hvap = mvp->mv_hvap; struct ieee80211com *ic = vap->iv_ic; struct ieee80211_node *ni = NULL; struct mwl_softc *sc = ic->ic_softc; struct mwl_hal *mh = sc->sc_mh; enum ieee80211_state ostate = vap->iv_state; int error; DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: %s -> %s\n", vap->iv_ifp->if_xname, __func__, ieee80211_state_name[ostate], ieee80211_state_name[nstate]); callout_stop(&sc->sc_timer); /* * Clear current radar detection state. */ if (ostate == IEEE80211_S_CAC) { /* stop quiet mode radar detection */ mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_STOP); } else if (sc->sc_radarena) { /* stop in-service radar detection */ mwl_hal_setradardetection(mh, DR_DFS_DISABLE); sc->sc_radarena = 0; } /* * Carry out per-state actions before doing net80211 work. */ if (nstate == IEEE80211_S_INIT) { /* NB: only ap+sta vap's have a fw entity */ if (hvap != NULL) mwl_hal_stop(hvap); } else if (nstate == IEEE80211_S_SCAN) { mwl_hal_start(hvap); /* NB: this disables beacon frames */ mwl_hal_setinframode(hvap); } else if (nstate == IEEE80211_S_AUTH) { /* * Must create a sta db entry in case a WEP key needs to * be plumbed. This entry will be overwritten if we * associate; otherwise it will be reclaimed on node free. */ ni = vap->iv_bss; MWL_NODE(ni)->mn_hvap = hvap; (void) mwl_peerstadb(ni, 0, 0, NULL); } else if (nstate == IEEE80211_S_CSA) { /* XXX move to below? */ if (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_MBSS) mwl_startcsa(vap); } else if (nstate == IEEE80211_S_CAC) { /* XXX move to below? */ /* stop ap xmit and enable quiet mode radar detection */ mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_START); } /* * Invoke the parent method to do net80211 work. */ error = mvp->mv_newstate(vap, nstate, arg); /* * Carry out work that must be done after net80211 runs; * this work requires up to date state (e.g. iv_bss). */ if (error == 0 && nstate == IEEE80211_S_RUN) { /* NB: collect bss node again, it may have changed */ ni = vap->iv_bss; DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s(RUN): iv_flags 0x%08x bintvl %d bssid %s " "capinfo 0x%04x chan %d\n", vap->iv_ifp->if_xname, __func__, vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid), ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan)); /* * Recreate local sta db entry to update WME/HT state. */ mwl_localstadb(vap); switch (vap->iv_opmode) { case IEEE80211_M_HOSTAP: case IEEE80211_M_MBSS: if (ostate == IEEE80211_S_CAC) { /* enable in-service radar detection */ mwl_hal_setradardetection(mh, DR_IN_SERVICE_MONITOR_START); sc->sc_radarena = 1; } /* * Allocate and setup the beacon frame * (and related state). */ error = mwl_reset_vap(vap, IEEE80211_S_RUN); if (error != 0) { DPRINTF(sc, MWL_DEBUG_STATE, "%s: beacon setup failed, error %d\n", __func__, error); goto bad; } /* NB: must be after setting up beacon */ mwl_hal_start(hvap); break; case IEEE80211_M_STA: DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: aid 0x%x\n", vap->iv_ifp->if_xname, __func__, ni->ni_associd); /* * Set state now that we're associated. */ mwl_hal_setassocid(hvap, ni->ni_bssid, ni->ni_associd); mwl_setrates(vap); mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold); if ((vap->iv_flags & IEEE80211_F_DWDS) && sc->sc_ndwdsvaps++ == 0) mwl_hal_setdwds(mh, 1); break; case IEEE80211_M_WDS: DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: bssid %s\n", vap->iv_ifp->if_xname, __func__, ether_sprintf(ni->ni_bssid)); mwl_seteapolformat(vap); break; default: break; } /* * Set CS mode according to operating channel; * this mostly an optimization for 5GHz. * * NB: must follow mwl_hal_start which resets csmode */ if (IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan)) mwl_hal_setcsmode(mh, CSMODE_AGGRESSIVE); else mwl_hal_setcsmode(mh, CSMODE_AUTO_ENA); /* * Start timer to prod firmware. */ if (sc->sc_ageinterval != 0) callout_reset(&sc->sc_timer, sc->sc_ageinterval*hz, mwl_agestations, sc); } else if (nstate == IEEE80211_S_SLEEP) { /* XXX set chip in power save */ } else if ((vap->iv_flags & IEEE80211_F_DWDS) && --sc->sc_ndwdsvaps == 0) mwl_hal_setdwds(mh, 0); bad: return error; } /* * Manage station id's; these are separate from AID's * as AID's may have values out of the range of possible * station id's acceptable to the firmware. */ static int allocstaid(struct mwl_softc *sc, int aid) { int staid; if (!(0 < aid && aid < MWL_MAXSTAID) || isset(sc->sc_staid, aid)) { /* NB: don't use 0 */ for (staid = 1; staid < MWL_MAXSTAID; staid++) if (isclr(sc->sc_staid, staid)) break; } else staid = aid; setbit(sc->sc_staid, staid); return staid; } static void delstaid(struct mwl_softc *sc, int staid) { clrbit(sc->sc_staid, staid); } /* * Setup driver-specific state for a newly associated node. * Note that we're called also on a re-associate, the isnew * param tells us if this is the first time or not. */ static void mwl_newassoc(struct ieee80211_node *ni, int isnew) { struct ieee80211vap *vap = ni->ni_vap; struct mwl_softc *sc = vap->iv_ic->ic_softc; struct mwl_node *mn = MWL_NODE(ni); MWL_HAL_PEERINFO pi; uint16_t aid; int error; aid = IEEE80211_AID(ni->ni_associd); if (isnew) { mn->mn_staid = allocstaid(sc, aid); mn->mn_hvap = MWL_VAP(vap)->mv_hvap; } else { mn = MWL_NODE(ni); /* XXX reset BA stream? */ } DPRINTF(sc, MWL_DEBUG_NODE, "%s: mac %s isnew %d aid %d staid %d\n", __func__, ether_sprintf(ni->ni_macaddr), isnew, aid, mn->mn_staid); error = mwl_peerstadb(ni, aid, mn->mn_staid, mkpeerinfo(&pi, ni)); if (error != 0) { DPRINTF(sc, MWL_DEBUG_NODE, "%s: error %d creating sta db entry\n", __func__, error); /* XXX how to deal with error? */ } } /* * Periodically poke the firmware to age out station state * (power save queues, pending tx aggregates). */ static void mwl_agestations(void *arg) { struct mwl_softc *sc = arg; mwl_hal_setkeepalive(sc->sc_mh); if (sc->sc_ageinterval != 0) /* NB: catch dynamic changes */ callout_schedule(&sc->sc_timer, sc->sc_ageinterval*hz); } static const struct mwl_hal_channel * findhalchannel(const MWL_HAL_CHANNELINFO *ci, int ieee) { int i; for (i = 0; i < ci->nchannels; i++) { const struct mwl_hal_channel *hc = &ci->channels[i]; if (hc->ieee == ieee) return hc; } return NULL; } static int mwl_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd, int nchan, struct ieee80211_channel chans[]) { struct mwl_softc *sc = ic->ic_softc; struct mwl_hal *mh = sc->sc_mh; const MWL_HAL_CHANNELINFO *ci; int i; for (i = 0; i < nchan; i++) { struct ieee80211_channel *c = &chans[i]; const struct mwl_hal_channel *hc; if (IEEE80211_IS_CHAN_2GHZ(c)) { mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_2DOT4GHZ, IEEE80211_IS_CHAN_HT40(c) ? MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci); } else if (IEEE80211_IS_CHAN_5GHZ(c)) { mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_5GHZ, IEEE80211_IS_CHAN_HT40(c) ? MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci); } else { device_printf(sc->sc_dev, "%s: channel %u freq %u/0x%x not 2.4/5GHz\n", __func__, c->ic_ieee, c->ic_freq, c->ic_flags); return EINVAL; } /* * Verify channel has cal data and cap tx power. */ hc = findhalchannel(ci, c->ic_ieee); if (hc != NULL) { if (c->ic_maxpower > 2*hc->maxTxPow) c->ic_maxpower = 2*hc->maxTxPow; goto next; } if (IEEE80211_IS_CHAN_HT40(c)) { /* * Look for the extension channel since the * hal table only has the primary channel. */ hc = findhalchannel(ci, c->ic_extieee); if (hc != NULL) { if (c->ic_maxpower > 2*hc->maxTxPow) c->ic_maxpower = 2*hc->maxTxPow; goto next; } } device_printf(sc->sc_dev, "%s: no cal data for channel %u ext %u freq %u/0x%x\n", __func__, c->ic_ieee, c->ic_extieee, c->ic_freq, c->ic_flags); return EINVAL; next: ; } return 0; } #define IEEE80211_CHAN_HTG (IEEE80211_CHAN_HT|IEEE80211_CHAN_G) #define IEEE80211_CHAN_HTA (IEEE80211_CHAN_HT|IEEE80211_CHAN_A) static void addht40channels(struct ieee80211_channel chans[], int maxchans, int *nchans, const MWL_HAL_CHANNELINFO *ci, int flags) { int i, error; for (i = 0; i < ci->nchannels; i++) { const struct mwl_hal_channel *hc = &ci->channels[i]; error = ieee80211_add_channel_ht40(chans, maxchans, nchans, hc->ieee, hc->maxTxPow, flags); if (error != 0 && error != ENOENT) break; } } static void addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans, const MWL_HAL_CHANNELINFO *ci, const uint8_t bands[]) { int i, error; error = 0; for (i = 0; i < ci->nchannels && error == 0; i++) { const struct mwl_hal_channel *hc = &ci->channels[i]; error = ieee80211_add_channel(chans, maxchans, nchans, hc->ieee, hc->freq, hc->maxTxPow, 0, bands); } } static void getchannels(struct mwl_softc *sc, int maxchans, int *nchans, struct ieee80211_channel chans[]) { const MWL_HAL_CHANNELINFO *ci; uint8_t bands[IEEE80211_MODE_BYTES]; /* * Use the channel info from the hal to craft the * channel list. Note that we pass back an unsorted * list; the caller is required to sort it for us * (if desired). */ *nchans = 0; if (mwl_hal_getchannelinfo(sc->sc_mh, MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0) { memset(bands, 0, sizeof(bands)); setbit(bands, IEEE80211_MODE_11B); setbit(bands, IEEE80211_MODE_11G); setbit(bands, IEEE80211_MODE_11NG); addchannels(chans, maxchans, nchans, ci, bands); } if (mwl_hal_getchannelinfo(sc->sc_mh, MWL_FREQ_BAND_5GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0) { memset(bands, 0, sizeof(bands)); setbit(bands, IEEE80211_MODE_11A); setbit(bands, IEEE80211_MODE_11NA); addchannels(chans, maxchans, nchans, ci, bands); } if (mwl_hal_getchannelinfo(sc->sc_mh, MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0) addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG); if (mwl_hal_getchannelinfo(sc->sc_mh, MWL_FREQ_BAND_5GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0) addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA); } static void mwl_getradiocaps(struct ieee80211com *ic, int maxchans, int *nchans, struct ieee80211_channel chans[]) { struct mwl_softc *sc = ic->ic_softc; getchannels(sc, maxchans, nchans, chans); } static int mwl_getchannels(struct mwl_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; /* * Use the channel info from the hal to craft the * channel list for net80211. Note that we pass up * an unsorted list; net80211 will sort it for us. */ memset(ic->ic_channels, 0, sizeof(ic->ic_channels)); ic->ic_nchans = 0; getchannels(sc, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels); ic->ic_regdomain.regdomain = SKU_DEBUG; ic->ic_regdomain.country = CTRY_DEFAULT; ic->ic_regdomain.location = 'I'; ic->ic_regdomain.isocc[0] = ' '; /* XXX? */ ic->ic_regdomain.isocc[1] = ' '; return (ic->ic_nchans == 0 ? EIO : 0); } #undef IEEE80211_CHAN_HTA #undef IEEE80211_CHAN_HTG #ifdef MWL_DEBUG static void mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix) { const struct mwl_rxdesc *ds = bf->bf_desc; uint32_t status = le32toh(ds->Status); printf("R[%2u] (DS.V:%p DS.P:0x%jx) NEXT:%08x DATA:%08x RC:%02x%s\n" " STAT:%02x LEN:%04x RSSI:%02x CHAN:%02x RATE:%02x QOS:%04x HT:%04x\n", ix, ds, (uintmax_t)bf->bf_daddr, le32toh(ds->pPhysNext), le32toh(ds->pPhysBuffData), ds->RxControl, ds->RxControl != EAGLE_RXD_CTRL_DRIVER_OWN ? "" : (status & EAGLE_RXD_STATUS_OK) ? " *" : " !", ds->Status, le16toh(ds->PktLen), ds->RSSI, ds->Channel, ds->Rate, le16toh(ds->QosCtrl), le16toh(ds->HtSig2)); } static void mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix) { const struct mwl_txdesc *ds = bf->bf_desc; uint32_t status = le32toh(ds->Status); printf("Q%u[%3u]", qnum, ix); printf(" (DS.V:%p DS.P:0x%jx)\n", ds, (uintmax_t)bf->bf_daddr); printf(" NEXT:%08x DATA:%08x LEN:%04x STAT:%08x%s\n", le32toh(ds->pPhysNext), le32toh(ds->PktPtr), le16toh(ds->PktLen), status, status & EAGLE_TXD_STATUS_USED ? "" : (status & 3) != 0 ? " *" : " !"); printf(" RATE:%02x PRI:%x QOS:%04x SAP:%08x FORMAT:%04x\n", ds->DataRate, ds->TxPriority, le16toh(ds->QosCtrl), le32toh(ds->SapPktInfo), le16toh(ds->Format)); #if MWL_TXDESC > 1 printf(" MULTIFRAMES:%u LEN:%04x %04x %04x %04x %04x %04x\n" , le32toh(ds->multiframes) , le16toh(ds->PktLenArray[0]), le16toh(ds->PktLenArray[1]) , le16toh(ds->PktLenArray[2]), le16toh(ds->PktLenArray[3]) , le16toh(ds->PktLenArray[4]), le16toh(ds->PktLenArray[5]) ); printf(" DATA:%08x %08x %08x %08x %08x %08x\n" , le32toh(ds->PktPtrArray[0]), le32toh(ds->PktPtrArray[1]) , le32toh(ds->PktPtrArray[2]), le32toh(ds->PktPtrArray[3]) , le32toh(ds->PktPtrArray[4]), le32toh(ds->PktPtrArray[5]) ); #endif #if 0 { const uint8_t *cp = (const uint8_t *) ds; int i; for (i = 0; i < sizeof(struct mwl_txdesc); i++) { printf("%02x ", cp[i]); if (((i+1) % 16) == 0) printf("\n"); } printf("\n"); } #endif } #endif /* MWL_DEBUG */ #if 0 static void mwl_txq_dump(struct mwl_txq *txq) { struct mwl_txbuf *bf; int i = 0; MWL_TXQ_LOCK(txq); STAILQ_FOREACH(bf, &txq->active, bf_list) { struct mwl_txdesc *ds = bf->bf_desc; MWL_TXDESC_SYNC(txq, ds, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); #ifdef MWL_DEBUG mwl_printtxbuf(bf, txq->qnum, i); #endif i++; } MWL_TXQ_UNLOCK(txq); } #endif static void mwl_watchdog(void *arg) { struct mwl_softc *sc = arg; callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc); if (sc->sc_tx_timer == 0 || --sc->sc_tx_timer > 0) return; if (sc->sc_running && !sc->sc_invalid) { if (mwl_hal_setkeepalive(sc->sc_mh)) device_printf(sc->sc_dev, "transmit timeout (firmware hung?)\n"); else device_printf(sc->sc_dev, "transmit timeout\n"); #if 0 mwl_reset(sc); mwl_txq_dump(&sc->sc_txq[0]);/*XXX*/ #endif counter_u64_add(sc->sc_ic.ic_oerrors, 1); sc->sc_stats.mst_watchdog++; } } #ifdef MWL_DIAGAPI /* * Diagnostic interface to the HAL. This is used by various * tools to do things like retrieve register contents for * debugging. The mechanism is intentionally opaque so that * it can change frequently w/o concern for compatibility. */ static int mwl_ioctl_diag(struct mwl_softc *sc, struct mwl_diag *md) { struct mwl_hal *mh = sc->sc_mh; u_int id = md->md_id & MWL_DIAG_ID; void *indata = NULL; void *outdata = NULL; u_int32_t insize = md->md_in_size; u_int32_t outsize = md->md_out_size; int error = 0; if (md->md_id & MWL_DIAG_IN) { /* * Copy in data. */ indata = malloc(insize, M_TEMP, M_NOWAIT); if (indata == NULL) { error = ENOMEM; goto bad; } error = copyin(md->md_in_data, indata, insize); if (error) goto bad; } if (md->md_id & MWL_DIAG_DYN) { /* * Allocate a buffer for the results (otherwise the HAL * returns a pointer to a buffer where we can read the * results). Note that we depend on the HAL leaving this * pointer for us to use below in reclaiming the buffer; * may want to be more defensive. */ outdata = malloc(outsize, M_TEMP, M_NOWAIT); if (outdata == NULL) { error = ENOMEM; goto bad; } } if (mwl_hal_getdiagstate(mh, id, indata, insize, &outdata, &outsize)) { if (outsize < md->md_out_size) md->md_out_size = outsize; if (outdata != NULL) error = copyout(outdata, md->md_out_data, md->md_out_size); } else { error = EINVAL; } bad: if ((md->md_id & MWL_DIAG_IN) && indata != NULL) free(indata, M_TEMP); if ((md->md_id & MWL_DIAG_DYN) && outdata != NULL) free(outdata, M_TEMP); return error; } static int mwl_ioctl_reset(struct mwl_softc *sc, struct mwl_diag *md) { struct mwl_hal *mh = sc->sc_mh; int error; MWL_LOCK_ASSERT(sc); if (md->md_id == 0 && mwl_hal_fwload(mh, NULL) != 0) { device_printf(sc->sc_dev, "unable to load firmware\n"); return EIO; } if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) { device_printf(sc->sc_dev, "unable to fetch h/w specs\n"); return EIO; } error = mwl_setupdma(sc); if (error != 0) { /* NB: mwl_setupdma prints a msg */ return error; } /* * Reset tx/rx data structures; after reload we must * re-start the driver's notion of the next xmit/recv. */ mwl_draintxq(sc); /* clear pending frames */ mwl_resettxq(sc); /* rebuild tx q lists */ sc->sc_rxnext = NULL; /* force rx to start at the list head */ return 0; } #endif /* MWL_DIAGAPI */ static void mwl_parent(struct ieee80211com *ic) { struct mwl_softc *sc = ic->ic_softc; int startall = 0; MWL_LOCK(sc); if (ic->ic_nrunning > 0) { if (sc->sc_running) { /* * To avoid rescanning another access point, * do not call mwl_init() here. Instead, * only reflect promisc mode settings. */ mwl_mode_init(sc); } else { /* * Beware of being called during attach/detach * to reset promiscuous mode. In that case we * will still be marked UP but not RUNNING. * However trying to re-init the interface * is the wrong thing to do as we've already * torn down much of our state. There's * probably a better way to deal with this. */ if (!sc->sc_invalid) { mwl_init(sc); /* XXX lose error */ startall = 1; } } } else mwl_stop(sc); MWL_UNLOCK(sc); if (startall) ieee80211_start_all(ic); } static int mwl_ioctl(struct ieee80211com *ic, u_long cmd, void *data) { struct mwl_softc *sc = ic->ic_softc; struct ifreq *ifr = data; int error = 0; switch (cmd) { case SIOCGMVSTATS: mwl_hal_gethwstats(sc->sc_mh, &sc->sc_stats.hw_stats); #if 0 /* NB: embed these numbers to get a consistent view */ sc->sc_stats.mst_tx_packets = ifp->if_get_counter(ifp, IFCOUNTER_OPACKETS); sc->sc_stats.mst_rx_packets = ifp->if_get_counter(ifp, IFCOUNTER_IPACKETS); #endif /* * NB: Drop the softc lock in case of a page fault; * we'll accept any potential inconsisentcy in the * statistics. The alternative is to copy the data * to a local structure. */ return (copyout(&sc->sc_stats, ifr_data_get_ptr(ifr), sizeof (sc->sc_stats))); #ifdef MWL_DIAGAPI case SIOCGMVDIAG: /* XXX check privs */ return mwl_ioctl_diag(sc, (struct mwl_diag *) ifr); case SIOCGMVRESET: /* XXX check privs */ MWL_LOCK(sc); error = mwl_ioctl_reset(sc,(struct mwl_diag *) ifr); MWL_UNLOCK(sc); break; #endif /* MWL_DIAGAPI */ default: error = ENOTTY; break; } return (error); } #ifdef MWL_DEBUG static int mwl_sysctl_debug(SYSCTL_HANDLER_ARGS) { struct mwl_softc *sc = arg1; int debug, error; debug = sc->sc_debug | (mwl_hal_getdebug(sc->sc_mh) << 24); error = sysctl_handle_int(oidp, &debug, 0, req); if (error || !req->newptr) return error; mwl_hal_setdebug(sc->sc_mh, debug >> 24); sc->sc_debug = debug & 0x00ffffff; return 0; } #endif /* MWL_DEBUG */ static void mwl_sysctlattach(struct mwl_softc *sc) { #ifdef MWL_DEBUG struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); sc->sc_debug = mwl_debug; SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW, sc, 0, mwl_sysctl_debug, "I", "control debugging printfs"); #endif } /* * Announce various information on device/driver attach. */ static void mwl_announce(struct mwl_softc *sc) { device_printf(sc->sc_dev, "Rev A%d hardware, v%d.%d.%d.%d firmware (regioncode %d)\n", sc->sc_hwspecs.hwVersion, (sc->sc_hwspecs.fwReleaseNumber>>24) & 0xff, (sc->sc_hwspecs.fwReleaseNumber>>16) & 0xff, (sc->sc_hwspecs.fwReleaseNumber>>8) & 0xff, (sc->sc_hwspecs.fwReleaseNumber>>0) & 0xff, sc->sc_hwspecs.regionCode); sc->sc_fwrelease = sc->sc_hwspecs.fwReleaseNumber; if (bootverbose) { int i; for (i = 0; i <= WME_AC_VO; i++) { struct mwl_txq *txq = sc->sc_ac2q[i]; device_printf(sc->sc_dev, "Use hw queue %u for %s traffic\n", txq->qnum, ieee80211_wme_acnames[i]); } } if (bootverbose || mwl_rxdesc != MWL_RXDESC) device_printf(sc->sc_dev, "using %u rx descriptors\n", mwl_rxdesc); if (bootverbose || mwl_rxbuf != MWL_RXBUF) device_printf(sc->sc_dev, "using %u rx buffers\n", mwl_rxbuf); if (bootverbose || mwl_txbuf != MWL_TXBUF) device_printf(sc->sc_dev, "using %u tx buffers\n", mwl_txbuf); if (bootverbose && mwl_hal_ismbsscapable(sc->sc_mh)) device_printf(sc->sc_dev, "multi-bss support\n"); #ifdef MWL_TX_NODROP if (bootverbose) device_printf(sc->sc_dev, "no tx drop\n"); #endif } diff --git a/sys/dev/netmap/if_ptnet.c b/sys/dev/netmap/if_ptnet.c index 38149ac2650c..99d21c38f8ab 100644 --- a/sys/dev/netmap/if_ptnet.c +++ b/sys/dev/netmap/if_ptnet.c @@ -1,1998 +1,1999 @@ /*- * Copyright (c) 2016, Vincenzo Maffione * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* Driver for ptnet paravirtualized network device. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "opt_inet.h" #include "opt_inet6.h" #include #include #include #include #include #include #ifdef WITH_PTNETMAP #ifndef INET #error "INET not defined, cannot support offloadings" #endif #if __FreeBSD_version >= 1100000 static uint64_t ptnet_get_counter(if_t, ift_counter); #else typedef struct ifnet *if_t; #define if_getsoftc(_ifp) (_ifp)->if_softc #endif //#define PTNETMAP_STATS //#define DEBUG #ifdef DEBUG #define DBG(x) x #else /* !DEBUG */ #define DBG(x) #endif /* !DEBUG */ extern int ptnet_vnet_hdr; /* Tunable parameter */ struct ptnet_softc; struct ptnet_queue_stats { uint64_t packets; /* if_[io]packets */ uint64_t bytes; /* if_[io]bytes */ uint64_t errors; /* if_[io]errors */ uint64_t iqdrops; /* if_iqdrops */ uint64_t mcasts; /* if_[io]mcasts */ #ifdef PTNETMAP_STATS uint64_t intrs; uint64_t kicks; #endif /* PTNETMAP_STATS */ }; struct ptnet_queue { struct ptnet_softc *sc; struct resource *irq; void *cookie; int kring_id; struct nm_csb_atok *atok; struct nm_csb_ktoa *ktoa; unsigned int kick; struct mtx lock; struct buf_ring *bufring; /* for TX queues */ struct ptnet_queue_stats stats; #ifdef PTNETMAP_STATS struct ptnet_queue_stats last_stats; #endif /* PTNETMAP_STATS */ struct taskqueue *taskq; struct task task; char lock_name[16]; }; #define PTNET_Q_LOCK(_pq) mtx_lock(&(_pq)->lock) #define PTNET_Q_TRYLOCK(_pq) mtx_trylock(&(_pq)->lock) #define PTNET_Q_UNLOCK(_pq) mtx_unlock(&(_pq)->lock) struct ptnet_softc { device_t dev; if_t ifp; struct ifmedia media; struct mtx lock; char lock_name[16]; char hwaddr[ETHER_ADDR_LEN]; /* Mirror of PTFEAT register. */ uint32_t ptfeatures; unsigned int vnet_hdr_len; /* PCI BARs support. */ struct resource *iomem; struct resource *msix_mem; unsigned int num_rings; unsigned int num_tx_rings; struct ptnet_queue *queues; struct ptnet_queue *rxqueues; struct nm_csb_atok *csb_gh; struct nm_csb_ktoa *csb_hg; unsigned int min_tx_space; struct netmap_pt_guest_adapter *ptna; struct callout tick; #ifdef PTNETMAP_STATS struct timeval last_ts; #endif /* PTNETMAP_STATS */ }; #define PTNET_CORE_LOCK(_sc) mtx_lock(&(_sc)->lock) #define PTNET_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->lock) static int ptnet_probe(device_t); static int ptnet_attach(device_t); static int ptnet_detach(device_t); static int ptnet_suspend(device_t); static int ptnet_resume(device_t); static int ptnet_shutdown(device_t); static void ptnet_init(void *opaque); static int ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data); static int ptnet_init_locked(struct ptnet_softc *sc); static int ptnet_stop(struct ptnet_softc *sc); static int ptnet_transmit(if_t ifp, struct mbuf *m); static int ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget, bool may_resched); static void ptnet_qflush(if_t ifp); static void ptnet_tx_task(void *context, int pending); static int ptnet_media_change(if_t ifp); static void ptnet_media_status(if_t ifp, struct ifmediareq *ifmr); #ifdef PTNETMAP_STATS static void ptnet_tick(void *opaque); #endif static int ptnet_irqs_init(struct ptnet_softc *sc); static void ptnet_irqs_fini(struct ptnet_softc *sc); static uint32_t ptnet_nm_ptctl(struct ptnet_softc *sc, uint32_t cmd); static int ptnet_nm_config(struct netmap_adapter *na, struct nm_config_info *info); static void ptnet_update_vnet_hdr(struct ptnet_softc *sc); static int ptnet_nm_register(struct netmap_adapter *na, int onoff); static int ptnet_nm_txsync(struct netmap_kring *kring, int flags); static int ptnet_nm_rxsync(struct netmap_kring *kring, int flags); static void ptnet_nm_intr(struct netmap_adapter *na, int onoff); static void ptnet_tx_intr(void *opaque); static void ptnet_rx_intr(void *opaque); static unsigned ptnet_rx_discard(struct netmap_kring *kring, unsigned int head); static int ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched); static void ptnet_rx_task(void *context, int pending); #ifdef DEVICE_POLLING static poll_handler_t ptnet_poll; #endif static device_method_t ptnet_methods[] = { DEVMETHOD(device_probe, ptnet_probe), DEVMETHOD(device_attach, ptnet_attach), DEVMETHOD(device_detach, ptnet_detach), DEVMETHOD(device_suspend, ptnet_suspend), DEVMETHOD(device_resume, ptnet_resume), DEVMETHOD(device_shutdown, ptnet_shutdown), DEVMETHOD_END }; static driver_t ptnet_driver = { "ptnet", ptnet_methods, sizeof(struct ptnet_softc) }; /* We use (SI_ORDER_MIDDLE+2) here, see DEV_MODULE_ORDERED() invocation. */ static devclass_t ptnet_devclass; DRIVER_MODULE_ORDERED(ptnet, pci, ptnet_driver, ptnet_devclass, NULL, NULL, SI_ORDER_MIDDLE + 2); static int ptnet_probe(device_t dev) { if (pci_get_vendor(dev) != PTNETMAP_PCI_VENDOR_ID || pci_get_device(dev) != PTNETMAP_PCI_NETIF_ID) { return (ENXIO); } device_set_desc(dev, "ptnet network adapter"); return (BUS_PROBE_DEFAULT); } static inline void ptnet_kick(struct ptnet_queue *pq) { #ifdef PTNETMAP_STATS pq->stats.kicks ++; #endif /* PTNETMAP_STATS */ bus_write_4(pq->sc->iomem, pq->kick, 0); } #define PTNET_BUF_RING_SIZE 4096 #define PTNET_RX_BUDGET 512 #define PTNET_RX_BATCH 1 #define PTNET_TX_BUDGET 512 #define PTNET_TX_BATCH 64 #define PTNET_HDR_SIZE sizeof(struct virtio_net_hdr_mrg_rxbuf) #define PTNET_MAX_PKT_SIZE 65536 #define PTNET_CSUM_OFFLOAD (CSUM_TCP | CSUM_UDP) #define PTNET_CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6) #define PTNET_ALL_OFFLOAD (CSUM_TSO | PTNET_CSUM_OFFLOAD |\ PTNET_CSUM_OFFLOAD_IPV6) static int ptnet_attach(device_t dev) { uint32_t ptfeatures = 0; unsigned int num_rx_rings, num_tx_rings; struct netmap_adapter na_arg; unsigned int nifp_offset; struct ptnet_softc *sc; if_t ifp; uint32_t macreg; int err, rid; int i; sc = device_get_softc(dev); sc->dev = dev; /* Setup PCI resources. */ pci_enable_busmaster(dev); rid = PCIR_BAR(PTNETMAP_IO_PCI_BAR); sc->iomem = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (sc->iomem == NULL) { device_printf(dev, "Failed to map I/O BAR\n"); return (ENXIO); } /* Negotiate features with the hypervisor. */ if (ptnet_vnet_hdr) { ptfeatures |= PTNETMAP_F_VNET_HDR; } bus_write_4(sc->iomem, PTNET_IO_PTFEAT, ptfeatures); /* wanted */ ptfeatures = bus_read_4(sc->iomem, PTNET_IO_PTFEAT); /* acked */ sc->ptfeatures = ptfeatures; num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS); num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS); sc->num_rings = num_tx_rings + num_rx_rings; sc->num_tx_rings = num_tx_rings; if (sc->num_rings * sizeof(struct nm_csb_atok) > PAGE_SIZE) { device_printf(dev, "CSB cannot handle that many rings (%u)\n", sc->num_rings); err = ENOMEM; goto err_path; } /* Allocate CSB and carry out CSB allocation protocol. */ sc->csb_gh = contigmalloc(2*PAGE_SIZE, M_DEVBUF, M_NOWAIT | M_ZERO, (size_t)0, -1UL, PAGE_SIZE, 0); if (sc->csb_gh == NULL) { device_printf(dev, "Failed to allocate CSB\n"); err = ENOMEM; goto err_path; } sc->csb_hg = (struct nm_csb_ktoa *)(((char *)sc->csb_gh) + PAGE_SIZE); { /* * We use uint64_t rather than vm_paddr_t since we * need 64 bit addresses even on 32 bit platforms. */ uint64_t paddr = vtophys(sc->csb_gh); /* CSB allocation protocol: write to BAH first, then * to BAL (for both GH and HG sections). */ bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH, (paddr >> 32) & 0xffffffff); bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL, paddr & 0xffffffff); paddr = vtophys(sc->csb_hg); bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH, (paddr >> 32) & 0xffffffff); bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL, paddr & 0xffffffff); } /* Allocate and initialize per-queue data structures. */ sc->queues = malloc(sizeof(struct ptnet_queue) * sc->num_rings, M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->queues == NULL) { err = ENOMEM; goto err_path; } sc->rxqueues = sc->queues + num_tx_rings; for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; pq->sc = sc; pq->kring_id = i; pq->kick = PTNET_IO_KICK_BASE + 4 * i; pq->atok = sc->csb_gh + i; pq->ktoa = sc->csb_hg + i; snprintf(pq->lock_name, sizeof(pq->lock_name), "%s-%d", device_get_nameunit(dev), i); mtx_init(&pq->lock, pq->lock_name, NULL, MTX_DEF); if (i >= num_tx_rings) { /* RX queue: fix kring_id. */ pq->kring_id -= num_tx_rings; } else { /* TX queue: allocate buf_ring. */ pq->bufring = buf_ring_alloc(PTNET_BUF_RING_SIZE, M_DEVBUF, M_NOWAIT, &pq->lock); if (pq->bufring == NULL) { err = ENOMEM; goto err_path; } } } sc->min_tx_space = 64; /* Safe initial value. */ err = ptnet_irqs_init(sc); if (err) { goto err_path; } /* Setup Ethernet interface. */ sc->ifp = ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "Failed to allocate ifnet\n"); err = ENOMEM; goto err_path; } if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_baudrate = IF_Gbps(10); ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX; ifp->if_init = ptnet_init; ifp->if_ioctl = ptnet_ioctl; #if __FreeBSD_version >= 1100000 ifp->if_get_counter = ptnet_get_counter; #endif ifp->if_transmit = ptnet_transmit; ifp->if_qflush = ptnet_qflush; ifmedia_init(&sc->media, IFM_IMASK, ptnet_media_change, ptnet_media_status); ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX, 0, NULL); ifmedia_set(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX); macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_HI); sc->hwaddr[0] = (macreg >> 8) & 0xff; sc->hwaddr[1] = macreg & 0xff; macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_LO); sc->hwaddr[2] = (macreg >> 24) & 0xff; sc->hwaddr[3] = (macreg >> 16) & 0xff; sc->hwaddr[4] = (macreg >> 8) & 0xff; sc->hwaddr[5] = macreg & 0xff; ether_ifattach(ifp, sc->hwaddr); ifp->if_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU; if (sc->ptfeatures & PTNETMAP_F_VNET_HDR) { /* Similarly to what the vtnet driver does, we can emulate * VLAN offloadings by inserting and removing the 802.1Q * header during transmit and receive. We are then able * to do checksum offloading of VLAN frames. */ ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_LRO | IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWTAGGING; } ifp->if_capenable = ifp->if_capabilities; #ifdef DEVICE_POLLING /* Don't enable polling by default. */ ifp->if_capabilities |= IFCAP_POLLING; #endif snprintf(sc->lock_name, sizeof(sc->lock_name), "%s", device_get_nameunit(dev)); mtx_init(&sc->lock, sc->lock_name, "ptnet core lock", MTX_DEF); callout_init_mtx(&sc->tick, &sc->lock, 0); /* Prepare a netmap_adapter struct instance to do netmap_attach(). */ nifp_offset = bus_read_4(sc->iomem, PTNET_IO_NIFP_OFS); memset(&na_arg, 0, sizeof(na_arg)); na_arg.ifp = ifp; na_arg.num_tx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS); na_arg.num_rx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS); na_arg.num_tx_rings = num_tx_rings; na_arg.num_rx_rings = num_rx_rings; na_arg.nm_config = ptnet_nm_config; na_arg.nm_krings_create = ptnet_nm_krings_create; na_arg.nm_krings_delete = ptnet_nm_krings_delete; na_arg.nm_dtor = ptnet_nm_dtor; na_arg.nm_intr = ptnet_nm_intr; na_arg.nm_register = ptnet_nm_register; na_arg.nm_txsync = ptnet_nm_txsync; na_arg.nm_rxsync = ptnet_nm_rxsync; netmap_pt_guest_attach(&na_arg, nifp_offset, bus_read_4(sc->iomem, PTNET_IO_HOSTMEMID)); /* Now a netmap adapter for this ifp has been allocated, and it * can be accessed through NA(ifp). We also have to initialize the CSB * pointer. */ sc->ptna = (struct netmap_pt_guest_adapter *)NA(ifp); /* If virtio-net header was negotiated, set the virt_hdr_len field in * the netmap adapter, to inform users that this netmap adapter requires * the application to deal with the headers. */ ptnet_update_vnet_hdr(sc); device_printf(dev, "%s() completed\n", __func__); return (0); err_path: ptnet_detach(dev); return err; } /* Stop host sync-kloop if it was running. */ static void ptnet_device_shutdown(struct ptnet_softc *sc) { ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_DELETE); bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH, 0); bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL, 0); bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH, 0); bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL, 0); } static int ptnet_detach(device_t dev) { struct ptnet_softc *sc = device_get_softc(dev); int i; ptnet_device_shutdown(sc); #ifdef DEVICE_POLLING if (sc->ifp->if_capenable & IFCAP_POLLING) { ether_poll_deregister(sc->ifp); } #endif callout_drain(&sc->tick); if (sc->queues) { /* Drain taskqueues before calling if_detach. */ for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; if (pq->taskq) { taskqueue_drain(pq->taskq, &pq->task); } } } if (sc->ifp) { ether_ifdetach(sc->ifp); /* Uninitialize netmap adapters for this device. */ netmap_detach(sc->ifp); ifmedia_removeall(&sc->media); if_free(sc->ifp); sc->ifp = NULL; } ptnet_irqs_fini(sc); if (sc->csb_gh) { contigfree(sc->csb_gh, 2*PAGE_SIZE, M_DEVBUF); sc->csb_gh = NULL; sc->csb_hg = NULL; } if (sc->queues) { for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; if (mtx_initialized(&pq->lock)) { mtx_destroy(&pq->lock); } if (pq->bufring != NULL) { buf_ring_free(pq->bufring, M_DEVBUF); } } free(sc->queues, M_DEVBUF); sc->queues = NULL; } if (sc->iomem) { bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(PTNETMAP_IO_PCI_BAR), sc->iomem); sc->iomem = NULL; } mtx_destroy(&sc->lock); device_printf(dev, "%s() completed\n", __func__); return (0); } static int ptnet_suspend(device_t dev) { struct ptnet_softc *sc = device_get_softc(dev); (void)sc; return (0); } static int ptnet_resume(device_t dev) { struct ptnet_softc *sc = device_get_softc(dev); (void)sc; return (0); } static int ptnet_shutdown(device_t dev) { struct ptnet_softc *sc = device_get_softc(dev); ptnet_device_shutdown(sc); return (0); } static int ptnet_irqs_init(struct ptnet_softc *sc) { int rid = PCIR_BAR(PTNETMAP_MSIX_PCI_BAR); int nvecs = sc->num_rings; device_t dev = sc->dev; int err = ENOSPC; int cpu_cur; int i; if (pci_find_cap(dev, PCIY_MSIX, NULL) != 0) { device_printf(dev, "Could not find MSI-X capability\n"); return (ENXIO); } sc->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->msix_mem == NULL) { device_printf(dev, "Failed to allocate MSIX PCI BAR\n"); return (ENXIO); } if (pci_msix_count(dev) < nvecs) { device_printf(dev, "Not enough MSI-X vectors\n"); goto err_path; } err = pci_alloc_msix(dev, &nvecs); if (err) { device_printf(dev, "Failed to allocate MSI-X vectors\n"); goto err_path; } for (i = 0; i < nvecs; i++) { struct ptnet_queue *pq = sc->queues + i; rid = i + 1; pq->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (pq->irq == NULL) { device_printf(dev, "Failed to allocate interrupt " "for queue #%d\n", i); err = ENOSPC; goto err_path; } } cpu_cur = CPU_FIRST(); for (i = 0; i < nvecs; i++) { struct ptnet_queue *pq = sc->queues + i; void (*handler)(void *) = ptnet_tx_intr; if (i >= sc->num_tx_rings) { handler = ptnet_rx_intr; } err = bus_setup_intr(dev, pq->irq, INTR_TYPE_NET | INTR_MPSAFE, NULL /* intr_filter */, handler, pq, &pq->cookie); if (err) { device_printf(dev, "Failed to register intr handler " "for queue #%d\n", i); goto err_path; } bus_describe_intr(dev, pq->irq, pq->cookie, "q%d", i); #if 0 bus_bind_intr(sc->dev, pq->irq, cpu_cur); #endif cpu_cur = CPU_NEXT(cpu_cur); } device_printf(dev, "Allocated %d MSI-X vectors\n", nvecs); cpu_cur = CPU_FIRST(); for (i = 0; i < nvecs; i++) { struct ptnet_queue *pq = sc->queues + i; - static void (*handler)(void *context, int pending); - handler = (i < sc->num_tx_rings) ? ptnet_tx_task : ptnet_rx_task; + if (i < sc->num_tx_rings) + TASK_INIT(&pq->task, 0, ptnet_tx_task, pq); + else + NET_TASK_INIT(&pq->task, 0, ptnet_rx_task, pq); - TASK_INIT(&pq->task, 0, handler, pq); pq->taskq = taskqueue_create_fast("ptnet_queue", M_NOWAIT, taskqueue_thread_enqueue, &pq->taskq); taskqueue_start_threads(&pq->taskq, 1, PI_NET, "%s-pq-%d", device_get_nameunit(sc->dev), cpu_cur); cpu_cur = CPU_NEXT(cpu_cur); } return 0; err_path: ptnet_irqs_fini(sc); return err; } static void ptnet_irqs_fini(struct ptnet_softc *sc) { device_t dev = sc->dev; int i; for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; if (pq->taskq) { taskqueue_free(pq->taskq); pq->taskq = NULL; } if (pq->cookie) { bus_teardown_intr(dev, pq->irq, pq->cookie); pq->cookie = NULL; } if (pq->irq) { bus_release_resource(dev, SYS_RES_IRQ, i + 1, pq->irq); pq->irq = NULL; } } if (sc->msix_mem) { pci_release_msi(dev); bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(PTNETMAP_MSIX_PCI_BAR), sc->msix_mem); sc->msix_mem = NULL; } } static void ptnet_init(void *opaque) { struct ptnet_softc *sc = opaque; PTNET_CORE_LOCK(sc); ptnet_init_locked(sc); PTNET_CORE_UNLOCK(sc); } static int ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data) { struct ptnet_softc *sc = if_getsoftc(ifp); device_t dev = sc->dev; struct ifreq *ifr = (struct ifreq *)data; int mask __unused, err = 0; switch (cmd) { case SIOCSIFFLAGS: device_printf(dev, "SIOCSIFFLAGS %x\n", ifp->if_flags); PTNET_CORE_LOCK(sc); if (ifp->if_flags & IFF_UP) { /* Network stack wants the iff to be up. */ err = ptnet_init_locked(sc); } else { /* Network stack wants the iff to be down. */ err = ptnet_stop(sc); } /* We don't need to do nothing to support IFF_PROMISC, * since that is managed by the backend port. */ PTNET_CORE_UNLOCK(sc); break; case SIOCSIFCAP: device_printf(dev, "SIOCSIFCAP %x %x\n", ifr->ifr_reqcap, ifp->if_capenable); mask = ifr->ifr_reqcap ^ ifp->if_capenable; #ifdef DEVICE_POLLING if (mask & IFCAP_POLLING) { struct ptnet_queue *pq; int i; if (ifr->ifr_reqcap & IFCAP_POLLING) { err = ether_poll_register(ptnet_poll, ifp); if (err) { break; } /* Stop queues and sync with taskqueues. */ ifp->if_drv_flags &= ~IFF_DRV_RUNNING; for (i = 0; i < sc->num_rings; i++) { pq = sc-> queues + i; /* Make sure the worker sees the * IFF_DRV_RUNNING down. */ PTNET_Q_LOCK(pq); pq->atok->appl_need_kick = 0; PTNET_Q_UNLOCK(pq); /* Wait for rescheduling to finish. */ if (pq->taskq) { taskqueue_drain(pq->taskq, &pq->task); } } ifp->if_drv_flags |= IFF_DRV_RUNNING; } else { err = ether_poll_deregister(ifp); for (i = 0; i < sc->num_rings; i++) { pq = sc-> queues + i; PTNET_Q_LOCK(pq); pq->atok->appl_need_kick = 1; PTNET_Q_UNLOCK(pq); } } } #endif /* DEVICE_POLLING */ ifp->if_capenable = ifr->ifr_reqcap; break; case SIOCSIFMTU: /* We support any reasonable MTU. */ if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > PTNET_MAX_PKT_SIZE) { err = EINVAL; } else { PTNET_CORE_LOCK(sc); ifp->if_mtu = ifr->ifr_mtu; PTNET_CORE_UNLOCK(sc); } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: err = ifmedia_ioctl(ifp, ifr, &sc->media, cmd); break; default: err = ether_ioctl(ifp, cmd, data); break; } return err; } static int ptnet_init_locked(struct ptnet_softc *sc) { if_t ifp = sc->ifp; struct netmap_adapter *na_dr = &sc->ptna->dr.up; struct netmap_adapter *na_nm = &sc->ptna->hwup.up; unsigned int nm_buf_size; int ret; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { return 0; /* nothing to do */ } device_printf(sc->dev, "%s\n", __func__); /* Translate offload capabilities according to if_capenable. */ ifp->if_hwassist = 0; if (ifp->if_capenable & IFCAP_TXCSUM) ifp->if_hwassist |= PTNET_CSUM_OFFLOAD; if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) ifp->if_hwassist |= PTNET_CSUM_OFFLOAD_IPV6; if (ifp->if_capenable & IFCAP_TSO4) ifp->if_hwassist |= CSUM_IP_TSO; if (ifp->if_capenable & IFCAP_TSO6) ifp->if_hwassist |= CSUM_IP6_TSO; /* * Prepare the interface for netmap mode access. */ netmap_update_config(na_dr); ret = netmap_mem_finalize(na_dr->nm_mem, na_dr); if (ret) { device_printf(sc->dev, "netmap_mem_finalize() failed\n"); return ret; } if (sc->ptna->backend_users == 0) { ret = ptnet_nm_krings_create(na_nm); if (ret) { device_printf(sc->dev, "ptnet_nm_krings_create() " "failed\n"); goto err_mem_finalize; } ret = netmap_mem_rings_create(na_dr); if (ret) { device_printf(sc->dev, "netmap_mem_rings_create() " "failed\n"); goto err_rings_create; } ret = netmap_mem_get_lut(na_dr->nm_mem, &na_dr->na_lut); if (ret) { device_printf(sc->dev, "netmap_mem_get_lut() " "failed\n"); goto err_get_lut; } } ret = ptnet_nm_register(na_dr, 1 /* on */); if (ret) { goto err_register; } nm_buf_size = NETMAP_BUF_SIZE(na_dr); KASSERT(nm_buf_size > 0, ("Invalid netmap buffer size")); sc->min_tx_space = PTNET_MAX_PKT_SIZE / nm_buf_size + 2; device_printf(sc->dev, "%s: min_tx_space = %u\n", __func__, sc->min_tx_space); #ifdef PTNETMAP_STATS callout_reset(&sc->tick, hz, ptnet_tick, sc); #endif ifp->if_drv_flags |= IFF_DRV_RUNNING; return 0; err_register: memset(&na_dr->na_lut, 0, sizeof(na_dr->na_lut)); err_get_lut: netmap_mem_rings_delete(na_dr); err_rings_create: ptnet_nm_krings_delete(na_nm); err_mem_finalize: netmap_mem_deref(na_dr->nm_mem, na_dr); return ret; } /* To be called under core lock. */ static int ptnet_stop(struct ptnet_softc *sc) { if_t ifp = sc->ifp; struct netmap_adapter *na_dr = &sc->ptna->dr.up; struct netmap_adapter *na_nm = &sc->ptna->hwup.up; int i; device_printf(sc->dev, "%s\n", __func__); if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { return 0; /* nothing to do */ } /* Clear the driver-ready flag, and synchronize with all the queues, * so that after this loop we are sure nobody is working anymore with * the device. This scheme is taken from the vtnet driver. */ ifp->if_drv_flags &= ~IFF_DRV_RUNNING; callout_stop(&sc->tick); for (i = 0; i < sc->num_rings; i++) { PTNET_Q_LOCK(sc->queues + i); PTNET_Q_UNLOCK(sc->queues + i); } ptnet_nm_register(na_dr, 0 /* off */); if (sc->ptna->backend_users == 0) { netmap_mem_rings_delete(na_dr); ptnet_nm_krings_delete(na_nm); } netmap_mem_deref(na_dr->nm_mem, na_dr); return 0; } static void ptnet_qflush(if_t ifp) { struct ptnet_softc *sc = if_getsoftc(ifp); int i; /* Flush all the bufrings and do the interface flush. */ for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; struct mbuf *m; PTNET_Q_LOCK(pq); if (pq->bufring) { while ((m = buf_ring_dequeue_sc(pq->bufring))) { m_freem(m); } } PTNET_Q_UNLOCK(pq); } if_qflush(ifp); } static int ptnet_media_change(if_t ifp) { struct ptnet_softc *sc = if_getsoftc(ifp); struct ifmedia *ifm = &sc->media; if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { return EINVAL; } return 0; } #if __FreeBSD_version >= 1100000 static uint64_t ptnet_get_counter(if_t ifp, ift_counter cnt) { struct ptnet_softc *sc = if_getsoftc(ifp); struct ptnet_queue_stats stats[2]; int i; /* Accumulate statistics over the queues. */ memset(stats, 0, sizeof(stats)); for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; int idx = (i < sc->num_tx_rings) ? 0 : 1; stats[idx].packets += pq->stats.packets; stats[idx].bytes += pq->stats.bytes; stats[idx].errors += pq->stats.errors; stats[idx].iqdrops += pq->stats.iqdrops; stats[idx].mcasts += pq->stats.mcasts; } switch (cnt) { case IFCOUNTER_IPACKETS: return (stats[1].packets); case IFCOUNTER_IQDROPS: return (stats[1].iqdrops); case IFCOUNTER_IERRORS: return (stats[1].errors); case IFCOUNTER_OPACKETS: return (stats[0].packets); case IFCOUNTER_OBYTES: return (stats[0].bytes); case IFCOUNTER_OMCASTS: return (stats[0].mcasts); default: return (if_get_counter_default(ifp, cnt)); } } #endif #ifdef PTNETMAP_STATS /* Called under core lock. */ static void ptnet_tick(void *opaque) { struct ptnet_softc *sc = opaque; int i; for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; struct ptnet_queue_stats cur = pq->stats; struct timeval now; unsigned int delta; microtime(&now); delta = now.tv_usec - sc->last_ts.tv_usec + (now.tv_sec - sc->last_ts.tv_sec) * 1000000; delta /= 1000; /* in milliseconds */ if (delta == 0) continue; device_printf(sc->dev, "#%d[%u ms]:pkts %lu, kicks %lu, " "intr %lu\n", i, delta, (cur.packets - pq->last_stats.packets), (cur.kicks - pq->last_stats.kicks), (cur.intrs - pq->last_stats.intrs)); pq->last_stats = cur; } microtime(&sc->last_ts); callout_schedule(&sc->tick, hz); } #endif /* PTNETMAP_STATS */ static void ptnet_media_status(if_t ifp, struct ifmediareq *ifmr) { /* We are always active, as the backend netmap port is * always open in netmap mode. */ ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; ifmr->ifm_active = IFM_ETHER | IFM_10G_T | IFM_FDX; } static uint32_t ptnet_nm_ptctl(struct ptnet_softc *sc, uint32_t cmd) { /* * Write a command and read back error status, * with zero meaning success. */ bus_write_4(sc->iomem, PTNET_IO_PTCTL, cmd); return bus_read_4(sc->iomem, PTNET_IO_PTCTL); } static int ptnet_nm_config(struct netmap_adapter *na, struct nm_config_info *info) { struct ptnet_softc *sc = if_getsoftc(na->ifp); info->num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS); info->num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS); info->num_tx_descs = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS); info->num_rx_descs = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS); info->rx_buf_maxsize = NETMAP_BUF_SIZE(na); device_printf(sc->dev, "txr %u, rxr %u, txd %u, rxd %u, rxbufsz %u\n", info->num_tx_rings, info->num_rx_rings, info->num_tx_descs, info->num_rx_descs, info->rx_buf_maxsize); return 0; } static void ptnet_sync_from_csb(struct ptnet_softc *sc, struct netmap_adapter *na) { int i; /* Sync krings from the host, reading from * CSB. */ for (i = 0; i < sc->num_rings; i++) { struct nm_csb_atok *atok = sc->queues[i].atok; struct nm_csb_ktoa *ktoa = sc->queues[i].ktoa; struct netmap_kring *kring; if (i < na->num_tx_rings) { kring = na->tx_rings[i]; } else { kring = na->rx_rings[i - na->num_tx_rings]; } kring->rhead = kring->ring->head = atok->head; kring->rcur = kring->ring->cur = atok->cur; kring->nr_hwcur = ktoa->hwcur; kring->nr_hwtail = kring->rtail = kring->ring->tail = ktoa->hwtail; nm_prdis("%d,%d: csb {hc %u h %u c %u ht %u}", t, i, ktoa->hwcur, atok->head, atok->cur, ktoa->hwtail); nm_prdis("%d,%d: kring {hc %u rh %u rc %u h %u c %u ht %u rt %u t %u}", t, i, kring->nr_hwcur, kring->rhead, kring->rcur, kring->ring->head, kring->ring->cur, kring->nr_hwtail, kring->rtail, kring->ring->tail); } } static void ptnet_update_vnet_hdr(struct ptnet_softc *sc) { unsigned int wanted_hdr_len = ptnet_vnet_hdr ? PTNET_HDR_SIZE : 0; bus_write_4(sc->iomem, PTNET_IO_VNET_HDR_LEN, wanted_hdr_len); sc->vnet_hdr_len = bus_read_4(sc->iomem, PTNET_IO_VNET_HDR_LEN); sc->ptna->hwup.up.virt_hdr_len = sc->vnet_hdr_len; } static int ptnet_nm_register(struct netmap_adapter *na, int onoff) { /* device-specific */ if_t ifp = na->ifp; struct ptnet_softc *sc = if_getsoftc(ifp); int native = (na == &sc->ptna->hwup.up); struct ptnet_queue *pq; int ret = 0; int i; if (!onoff) { sc->ptna->backend_users--; } /* If this is the last netmap client, guest interrupt enable flags may * be in arbitrary state. Since these flags are going to be used also * by the netdevice driver, we have to make sure to start with * notifications enabled. Also, schedule NAPI to flush pending packets * in the RX rings, since we will not receive further interrupts * until these will be processed. */ if (native && !onoff && na->active_fds == 0) { nm_prinf("Exit netmap mode, re-enable interrupts"); for (i = 0; i < sc->num_rings; i++) { pq = sc->queues + i; pq->atok->appl_need_kick = 1; } } if (onoff) { if (sc->ptna->backend_users == 0) { /* Initialize notification enable fields in the CSB. */ for (i = 0; i < sc->num_rings; i++) { pq = sc->queues + i; pq->ktoa->kern_need_kick = 1; pq->atok->appl_need_kick = (!(ifp->if_capenable & IFCAP_POLLING) && i >= sc->num_tx_rings); } /* Set the virtio-net header length. */ ptnet_update_vnet_hdr(sc); /* Make sure the host adapter passed through is ready * for txsync/rxsync. */ ret = ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_CREATE); if (ret) { return ret; } /* Align the guest krings and rings to the state stored * in the CSB. */ ptnet_sync_from_csb(sc, na); } /* If not native, don't call nm_set_native_flags, since we don't want * to replace if_transmit method, nor set NAF_NETMAP_ON */ if (native) { netmap_krings_mode_commit(na, onoff); nm_set_native_flags(na); } } else { if (native) { nm_clear_native_flags(na); netmap_krings_mode_commit(na, onoff); } if (sc->ptna->backend_users == 0) { ret = ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_DELETE); } } if (onoff) { sc->ptna->backend_users++; } return ret; } static int ptnet_nm_txsync(struct netmap_kring *kring, int flags) { struct ptnet_softc *sc = if_getsoftc(kring->na->ifp); struct ptnet_queue *pq = sc->queues + kring->ring_id; bool notify; notify = netmap_pt_guest_txsync(pq->atok, pq->ktoa, kring, flags); if (notify) { ptnet_kick(pq); } return 0; } static int ptnet_nm_rxsync(struct netmap_kring *kring, int flags) { struct ptnet_softc *sc = if_getsoftc(kring->na->ifp); struct ptnet_queue *pq = sc->rxqueues + kring->ring_id; bool notify; notify = netmap_pt_guest_rxsync(pq->atok, pq->ktoa, kring, flags); if (notify) { ptnet_kick(pq); } return 0; } static void ptnet_nm_intr(struct netmap_adapter *na, int onoff) { struct ptnet_softc *sc = if_getsoftc(na->ifp); int i; for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; pq->atok->appl_need_kick = onoff; } } static void ptnet_tx_intr(void *opaque) { struct ptnet_queue *pq = opaque; struct ptnet_softc *sc = pq->sc; DBG(device_printf(sc->dev, "Tx interrupt #%d\n", pq->kring_id)); #ifdef PTNETMAP_STATS pq->stats.intrs ++; #endif /* PTNETMAP_STATS */ if (netmap_tx_irq(sc->ifp, pq->kring_id) != NM_IRQ_PASS) { return; } /* Schedule the tasqueue to flush process transmissions requests. * However, vtnet, if_em and if_igb just call ptnet_transmit() here, * at least when using MSI-X interrupts. The if_em driver, instead * schedule taskqueue when using legacy interrupts. */ taskqueue_enqueue(pq->taskq, &pq->task); } static void ptnet_rx_intr(void *opaque) { struct ptnet_queue *pq = opaque; struct ptnet_softc *sc = pq->sc; unsigned int unused; DBG(device_printf(sc->dev, "Rx interrupt #%d\n", pq->kring_id)); #ifdef PTNETMAP_STATS pq->stats.intrs ++; #endif /* PTNETMAP_STATS */ if (netmap_rx_irq(sc->ifp, pq->kring_id, &unused) != NM_IRQ_PASS) { return; } /* Like vtnet, if_igb and if_em drivers when using MSI-X interrupts, * receive-side processing is executed directly in the interrupt * service routine. Alternatively, we may schedule the taskqueue. */ ptnet_rx_eof(pq, PTNET_RX_BUDGET, true); } static void ptnet_vlan_tag_remove(struct mbuf *m) { struct ether_vlan_header *evh; evh = mtod(m, struct ether_vlan_header *); m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag); m->m_flags |= M_VLANTAG; /* Strip the 802.1Q header. */ bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN, ETHER_HDR_LEN - ETHER_TYPE_LEN); m_adj(m, ETHER_VLAN_ENCAP_LEN); } static void ptnet_ring_update(struct ptnet_queue *pq, struct netmap_kring *kring, unsigned int head, unsigned int sync_flags) { struct netmap_ring *ring = kring->ring; struct nm_csb_atok *atok = pq->atok; struct nm_csb_ktoa *ktoa = pq->ktoa; /* Some packets have been pushed to the netmap ring. We have * to tell the host to process the new packets, updating cur * and head in the CSB. */ ring->head = ring->cur = head; /* Mimic nm_txsync_prologue/nm_rxsync_prologue. */ kring->rcur = kring->rhead = head; nm_sync_kloop_appl_write(atok, kring->rcur, kring->rhead); /* Kick the host if needed. */ if (NM_ACCESS_ONCE(ktoa->kern_need_kick)) { atok->sync_flags = sync_flags; ptnet_kick(pq); } } #define PTNET_TX_NOSPACE(_h, _k, _min) \ ((((_h) < (_k)->rtail) ? 0 : (_k)->nkr_num_slots) + \ (_k)->rtail - (_h)) < (_min) /* This function may be called by the network stack, or by * by the taskqueue thread. */ static int ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget, bool may_resched) { struct ptnet_softc *sc = pq->sc; bool have_vnet_hdr = sc->vnet_hdr_len; struct netmap_adapter *na = &sc->ptna->dr.up; if_t ifp = sc->ifp; unsigned int batch_count = 0; struct nm_csb_atok *atok; struct nm_csb_ktoa *ktoa; struct netmap_kring *kring; struct netmap_ring *ring; struct netmap_slot *slot; unsigned int count = 0; unsigned int minspace; unsigned int head; unsigned int lim; struct mbuf *mhead; struct mbuf *mf; int nmbuf_bytes; uint8_t *nmbuf; if (!PTNET_Q_TRYLOCK(pq)) { /* We failed to acquire the lock, schedule the taskqueue. */ nm_prlim(1, "Deferring TX work"); if (may_resched) { taskqueue_enqueue(pq->taskq, &pq->task); } return 0; } if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { PTNET_Q_UNLOCK(pq); nm_prlim(1, "Interface is down"); return ENETDOWN; } atok = pq->atok; ktoa = pq->ktoa; kring = na->tx_rings[pq->kring_id]; ring = kring->ring; lim = kring->nkr_num_slots - 1; head = ring->head; minspace = sc->min_tx_space; while (count < budget) { if (PTNET_TX_NOSPACE(head, kring, minspace)) { /* We ran out of slot, let's see if the host has * freed up some, by reading hwcur and hwtail from * the CSB. */ ptnet_sync_tail(ktoa, kring); if (PTNET_TX_NOSPACE(head, kring, minspace)) { /* Still no slots available. Reactivate the * interrupts so that we can be notified * when some free slots are made available by * the host. */ atok->appl_need_kick = 1; /* Double check. We need a full barrier to * prevent the store to atok->appl_need_kick * to be reordered with the load from * ktoa->hwcur and ktoa->hwtail (store-load * barrier). */ nm_stld_barrier(); ptnet_sync_tail(ktoa, kring); if (likely(PTNET_TX_NOSPACE(head, kring, minspace))) { break; } nm_prlim(1, "Found more slots by doublecheck"); /* More slots were freed before reactivating * the interrupts. */ atok->appl_need_kick = 0; } } mhead = drbr_peek(ifp, pq->bufring); if (!mhead) { break; } /* Initialize transmission state variables. */ slot = ring->slot + head; nmbuf = NMB(na, slot); nmbuf_bytes = 0; /* If needed, prepare the virtio-net header at the beginning * of the first slot. */ if (have_vnet_hdr) { struct virtio_net_hdr *vh = (struct virtio_net_hdr *)nmbuf; /* For performance, we could replace this memset() with * two 8-bytes-wide writes. */ memset(nmbuf, 0, PTNET_HDR_SIZE); if (mhead->m_pkthdr.csum_flags & PTNET_ALL_OFFLOAD) { mhead = virtio_net_tx_offload(ifp, mhead, false, vh); if (unlikely(!mhead)) { /* Packet dropped because errors * occurred while preparing the vnet * header. Let's go ahead with the next * packet. */ pq->stats.errors ++; drbr_advance(ifp, pq->bufring); continue; } } nm_prdis(1, "%s: [csum_flags %lX] vnet hdr: flags %x " "csum_start %u csum_ofs %u hdr_len = %u " "gso_size %u gso_type %x", __func__, mhead->m_pkthdr.csum_flags, vh->flags, vh->csum_start, vh->csum_offset, vh->hdr_len, vh->gso_size, vh->gso_type); nmbuf += PTNET_HDR_SIZE; nmbuf_bytes += PTNET_HDR_SIZE; } for (mf = mhead; mf; mf = mf->m_next) { uint8_t *mdata = mf->m_data; int mlen = mf->m_len; for (;;) { int copy = NETMAP_BUF_SIZE(na) - nmbuf_bytes; if (mlen < copy) { copy = mlen; } memcpy(nmbuf, mdata, copy); mdata += copy; mlen -= copy; nmbuf += copy; nmbuf_bytes += copy; if (!mlen) { break; } slot->len = nmbuf_bytes; slot->flags = NS_MOREFRAG; head = nm_next(head, lim); KASSERT(head != ring->tail, ("Unexpectedly run out of TX space")); slot = ring->slot + head; nmbuf = NMB(na, slot); nmbuf_bytes = 0; } } /* Complete last slot and update head. */ slot->len = nmbuf_bytes; slot->flags = 0; head = nm_next(head, lim); /* Consume the packet just processed. */ drbr_advance(ifp, pq->bufring); /* Copy the packet to listeners. */ ETHER_BPF_MTAP(ifp, mhead); pq->stats.packets ++; pq->stats.bytes += mhead->m_pkthdr.len; if (mhead->m_flags & M_MCAST) { pq->stats.mcasts ++; } m_freem(mhead); count ++; if (++batch_count == PTNET_TX_BATCH) { ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM); batch_count = 0; } } if (batch_count) { ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM); } if (count >= budget && may_resched) { DBG(nm_prlim(1, "out of budget: resched, %d mbufs pending\n", drbr_inuse(ifp, pq->bufring))); taskqueue_enqueue(pq->taskq, &pq->task); } PTNET_Q_UNLOCK(pq); return count; } static int ptnet_transmit(if_t ifp, struct mbuf *m) { struct ptnet_softc *sc = if_getsoftc(ifp); struct ptnet_queue *pq; unsigned int queue_idx; int err; DBG(device_printf(sc->dev, "transmit %p\n", m)); /* Insert 802.1Q header if needed. */ if (m->m_flags & M_VLANTAG) { m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); if (m == NULL) { return ENOBUFS; } m->m_flags &= ~M_VLANTAG; } /* Get the flow-id if available. */ queue_idx = (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) ? m->m_pkthdr.flowid : curcpu; if (unlikely(queue_idx >= sc->num_tx_rings)) { queue_idx %= sc->num_tx_rings; } pq = sc->queues + queue_idx; err = drbr_enqueue(ifp, pq->bufring, m); if (err) { /* ENOBUFS when the bufring is full */ nm_prlim(1, "%s: drbr_enqueue() failed %d\n", __func__, err); pq->stats.errors ++; return err; } if (ifp->if_capenable & IFCAP_POLLING) { /* If polling is on, the transmit queues will be * drained by the poller. */ return 0; } err = ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true); return (err < 0) ? err : 0; } static unsigned int ptnet_rx_discard(struct netmap_kring *kring, unsigned int head) { struct netmap_ring *ring = kring->ring; struct netmap_slot *slot = ring->slot + head; for (;;) { head = nm_next(head, kring->nkr_num_slots - 1); if (!(slot->flags & NS_MOREFRAG) || head == ring->tail) { break; } slot = ring->slot + head; } return head; } static inline struct mbuf * ptnet_rx_slot(struct mbuf *mtail, uint8_t *nmbuf, unsigned int nmbuf_len) { uint8_t *mdata = mtod(mtail, uint8_t *) + mtail->m_len; do { unsigned int copy; if (mtail->m_len == MCLBYTES) { struct mbuf *mf; mf = m_getcl(M_NOWAIT, MT_DATA, 0); if (unlikely(!mf)) { return NULL; } mtail->m_next = mf; mtail = mf; mdata = mtod(mtail, uint8_t *); mtail->m_len = 0; } copy = MCLBYTES - mtail->m_len; if (nmbuf_len < copy) { copy = nmbuf_len; } memcpy(mdata, nmbuf, copy); nmbuf += copy; nmbuf_len -= copy; mdata += copy; mtail->m_len += copy; } while (nmbuf_len); return mtail; } static int ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched) { struct ptnet_softc *sc = pq->sc; bool have_vnet_hdr = sc->vnet_hdr_len; struct nm_csb_atok *atok = pq->atok; struct nm_csb_ktoa *ktoa = pq->ktoa; struct netmap_adapter *na = &sc->ptna->dr.up; struct netmap_kring *kring = na->rx_rings[pq->kring_id]; struct netmap_ring *ring = kring->ring; unsigned int const lim = kring->nkr_num_slots - 1; unsigned int batch_count = 0; if_t ifp = sc->ifp; unsigned int count = 0; uint32_t head; PTNET_Q_LOCK(pq); if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { goto unlock; } kring->nr_kflags &= ~NKR_PENDINTR; head = ring->head; while (count < budget) { uint32_t prev_head = head; struct mbuf *mhead, *mtail; struct virtio_net_hdr *vh; struct netmap_slot *slot; unsigned int nmbuf_len; uint8_t *nmbuf; int deliver = 1; /* the mbuf to the network stack. */ host_sync: if (head == ring->tail) { /* We ran out of slot, let's see if the host has * added some, by reading hwcur and hwtail from * the CSB. */ ptnet_sync_tail(ktoa, kring); if (head == ring->tail) { /* Still no slots available. Reactivate * interrupts as they were disabled by the * host thread right before issuing the * last interrupt. */ atok->appl_need_kick = 1; /* Double check for more completed RX slots. * We need a full barrier to prevent the store * to atok->appl_need_kick to be reordered with * the load from ktoa->hwcur and ktoa->hwtail * (store-load barrier). */ nm_stld_barrier(); ptnet_sync_tail(ktoa, kring); if (likely(head == ring->tail)) { break; } atok->appl_need_kick = 0; } } /* Initialize ring state variables, possibly grabbing the * virtio-net header. */ slot = ring->slot + head; nmbuf = NMB(na, slot); nmbuf_len = slot->len; vh = (struct virtio_net_hdr *)nmbuf; if (have_vnet_hdr) { if (unlikely(nmbuf_len < PTNET_HDR_SIZE)) { /* There is no good reason why host should * put the header in multiple netmap slots. * If this is the case, discard. */ nm_prlim(1, "Fragmented vnet-hdr: dropping"); head = ptnet_rx_discard(kring, head); pq->stats.iqdrops ++; deliver = 0; goto skip; } nm_prdis(1, "%s: vnet hdr: flags %x csum_start %u " "csum_ofs %u hdr_len = %u gso_size %u " "gso_type %x", __func__, vh->flags, vh->csum_start, vh->csum_offset, vh->hdr_len, vh->gso_size, vh->gso_type); nmbuf += PTNET_HDR_SIZE; nmbuf_len -= PTNET_HDR_SIZE; } /* Allocate the head of a new mbuf chain. * We use m_getcl() to allocate an mbuf with standard cluster * size (MCLBYTES). In the future we could use m_getjcl() * to choose different sizes. */ mhead = mtail = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (unlikely(mhead == NULL)) { device_printf(sc->dev, "%s: failed to allocate mbuf " "head\n", __func__); pq->stats.errors ++; break; } /* Initialize the mbuf state variables. */ mhead->m_pkthdr.len = nmbuf_len; mtail->m_len = 0; /* Scan all the netmap slots containing the current packet. */ for (;;) { DBG(device_printf(sc->dev, "%s: h %u t %u rcv frag " "len %u, flags %u\n", __func__, head, ring->tail, slot->len, slot->flags)); mtail = ptnet_rx_slot(mtail, nmbuf, nmbuf_len); if (unlikely(!mtail)) { /* Ouch. We ran out of memory while processing * a packet. We have to restore the previous * head position, free the mbuf chain, and * schedule the taskqueue to give the packet * another chance. */ device_printf(sc->dev, "%s: failed to allocate" " mbuf frag, reset head %u --> %u\n", __func__, head, prev_head); head = prev_head; m_freem(mhead); pq->stats.errors ++; if (may_resched) { taskqueue_enqueue(pq->taskq, &pq->task); } goto escape; } /* We have to increment head irrespective of the * NS_MOREFRAG being set or not. */ head = nm_next(head, lim); if (!(slot->flags & NS_MOREFRAG)) { break; } if (unlikely(head == ring->tail)) { /* The very last slot prepared by the host has * the NS_MOREFRAG set. Drop it and continue * the outer cycle (to do the double-check). */ nm_prlim(1, "Incomplete packet: dropping"); m_freem(mhead); pq->stats.iqdrops ++; goto host_sync; } slot = ring->slot + head; nmbuf = NMB(na, slot); nmbuf_len = slot->len; mhead->m_pkthdr.len += nmbuf_len; } mhead->m_pkthdr.rcvif = ifp; mhead->m_pkthdr.csum_flags = 0; /* Store the queue idx in the packet header. */ mhead->m_pkthdr.flowid = pq->kring_id; M_HASHTYPE_SET(mhead, M_HASHTYPE_OPAQUE); if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { struct ether_header *eh; eh = mtod(mhead, struct ether_header *); if (eh->ether_type == htons(ETHERTYPE_VLAN)) { ptnet_vlan_tag_remove(mhead); /* * With the 802.1Q header removed, update the * checksum starting location accordingly. */ if (vh->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) vh->csum_start -= ETHER_VLAN_ENCAP_LEN; } } if (unlikely(have_vnet_hdr && virtio_net_rx_csum(mhead, vh))) { m_freem(mhead); nm_prlim(1, "Csum offload error: dropping"); pq->stats.iqdrops ++; deliver = 0; } skip: count ++; if (++batch_count >= PTNET_RX_BATCH) { /* Some packets have been (or will be) pushed to the network * stack. We need to update the CSB to tell the host about * the new ring->cur and ring->head (RX buffer refill). */ ptnet_ring_update(pq, kring, head, NAF_FORCE_READ); batch_count = 0; } if (likely(deliver)) { pq->stats.packets ++; pq->stats.bytes += mhead->m_pkthdr.len; PTNET_Q_UNLOCK(pq); (*ifp->if_input)(ifp, mhead); PTNET_Q_LOCK(pq); /* The ring->head index (and related indices) are * updated under pq lock by ptnet_ring_update(). * Since we dropped the lock to call if_input(), we * must reload ring->head and restart processing the * ring from there. */ head = ring->head; if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { /* The interface has gone down while we didn't * have the lock. Stop any processing and exit. */ goto unlock; } } } escape: if (batch_count) { ptnet_ring_update(pq, kring, head, NAF_FORCE_READ); } if (count >= budget && may_resched) { /* If we ran out of budget or the double-check found new * slots to process, schedule the taskqueue. */ DBG(nm_prlim(1, "out of budget: resched h %u t %u\n", head, ring->tail)); taskqueue_enqueue(pq->taskq, &pq->task); } unlock: PTNET_Q_UNLOCK(pq); return count; } static void ptnet_rx_task(void *context, int pending) { struct ptnet_queue *pq = context; DBG(nm_prlim(1, "%s: pq #%u\n", __func__, pq->kring_id)); ptnet_rx_eof(pq, PTNET_RX_BUDGET, true); } static void ptnet_tx_task(void *context, int pending) { struct ptnet_queue *pq = context; DBG(nm_prlim(1, "%s: pq #%u\n", __func__, pq->kring_id)); ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true); } #ifdef DEVICE_POLLING /* We don't need to handle differently POLL_AND_CHECK_STATUS and * POLL_ONLY, since we don't have an Interrupt Status Register. */ static int ptnet_poll(if_t ifp, enum poll_cmd cmd, int budget) { struct ptnet_softc *sc = if_getsoftc(ifp); unsigned int queue_budget; unsigned int count = 0; bool borrow = false; int i; KASSERT(sc->num_rings > 0, ("Found no queues in while polling ptnet")); queue_budget = MAX(budget / sc->num_rings, 1); nm_prlim(1, "Per-queue budget is %d", queue_budget); while (budget) { unsigned int rcnt = 0; for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; if (borrow) { queue_budget = MIN(queue_budget, budget); if (queue_budget == 0) { break; } } if (i < sc->num_tx_rings) { rcnt += ptnet_drain_transmit_queue(pq, queue_budget, false); } else { rcnt += ptnet_rx_eof(pq, queue_budget, false); } } if (!rcnt) { /* A scan of the queues gave no result, we can * stop here. */ break; } if (rcnt > budget) { /* This may happen when initial budget < sc->num_rings, * since one packet budget is given to each queue * anyway. Just pretend we didn't eat "so much". */ rcnt = budget; } count += rcnt; budget -= rcnt; borrow = true; } return count; } #endif /* DEVICE_POLLING */ #endif /* WITH_PTNETMAP */ diff --git a/sys/dev/nfe/if_nfe.c b/sys/dev/nfe/if_nfe.c index 940bad645037..246e257e3b2b 100644 --- a/sys/dev/nfe/if_nfe.c +++ b/sys/dev/nfe/if_nfe.c @@ -1,3408 +1,3408 @@ /* $OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $ */ /*- * Copyright (c) 2006 Shigeaki Tagashira * Copyright (c) 2006 Damien Bergamini * Copyright (c) 2005, 2006 Jonathan Gray * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ #include __FBSDID("$FreeBSD$"); #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MODULE_DEPEND(nfe, pci, 1, 1, 1); MODULE_DEPEND(nfe, ether, 1, 1, 1); MODULE_DEPEND(nfe, miibus, 1, 1, 1); /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" static int nfe_probe(device_t); static int nfe_attach(device_t); static int nfe_detach(device_t); static int nfe_suspend(device_t); static int nfe_resume(device_t); static int nfe_shutdown(device_t); static int nfe_can_use_msix(struct nfe_softc *); static int nfe_detect_msik9(struct nfe_softc *); static void nfe_power(struct nfe_softc *); static int nfe_miibus_readreg(device_t, int, int); static int nfe_miibus_writereg(device_t, int, int, int); static void nfe_miibus_statchg(device_t); static void nfe_mac_config(struct nfe_softc *, struct mii_data *); static void nfe_set_intr(struct nfe_softc *); static __inline void nfe_enable_intr(struct nfe_softc *); static __inline void nfe_disable_intr(struct nfe_softc *); static int nfe_ioctl(if_t, u_long, caddr_t); static void nfe_alloc_msix(struct nfe_softc *, int); static int nfe_intr(void *); static void nfe_int_task(void *, int); static __inline void nfe_discard_rxbuf(struct nfe_softc *, int); static __inline void nfe_discard_jrxbuf(struct nfe_softc *, int); static int nfe_newbuf(struct nfe_softc *, int); static int nfe_jnewbuf(struct nfe_softc *, int); static int nfe_rxeof(struct nfe_softc *, int, int *); static int nfe_jrxeof(struct nfe_softc *, int, int *); static void nfe_txeof(struct nfe_softc *); static int nfe_encap(struct nfe_softc *, struct mbuf **); static void nfe_setmulti(struct nfe_softc *); static void nfe_start(if_t); static void nfe_start_locked(if_t); static void nfe_watchdog(if_t); static void nfe_init(void *); static void nfe_init_locked(void *); static void nfe_stop(if_t); static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); static void nfe_alloc_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *); static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); static int nfe_init_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *); static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); static void nfe_free_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *); static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); static void nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); static int nfe_ifmedia_upd(if_t); static void nfe_ifmedia_sts(if_t, struct ifmediareq *); static void nfe_tick(void *); static void nfe_get_macaddr(struct nfe_softc *, uint8_t *); static void nfe_set_macaddr(struct nfe_softc *, uint8_t *); static void nfe_dma_map_segs(void *, bus_dma_segment_t *, int, int); static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); static int sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS); static void nfe_sysctl_node(struct nfe_softc *); static void nfe_stats_clear(struct nfe_softc *); static void nfe_stats_update(struct nfe_softc *); static void nfe_set_linkspeed(struct nfe_softc *); static void nfe_set_wol(struct nfe_softc *); #ifdef NFE_DEBUG static int nfedebug = 0; #define DPRINTF(sc, ...) do { \ if (nfedebug) \ device_printf((sc)->nfe_dev, __VA_ARGS__); \ } while (0) #define DPRINTFN(sc, n, ...) do { \ if (nfedebug >= (n)) \ device_printf((sc)->nfe_dev, __VA_ARGS__); \ } while (0) #else #define DPRINTF(sc, ...) #define DPRINTFN(sc, n, ...) #endif #define NFE_LOCK(_sc) mtx_lock(&(_sc)->nfe_mtx) #define NFE_UNLOCK(_sc) mtx_unlock(&(_sc)->nfe_mtx) #define NFE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->nfe_mtx, MA_OWNED) /* Tunables. */ static int msi_disable = 0; static int msix_disable = 0; static int jumbo_disable = 0; TUNABLE_INT("hw.nfe.msi_disable", &msi_disable); TUNABLE_INT("hw.nfe.msix_disable", &msix_disable); TUNABLE_INT("hw.nfe.jumbo_disable", &jumbo_disable); static device_method_t nfe_methods[] = { /* Device interface */ DEVMETHOD(device_probe, nfe_probe), DEVMETHOD(device_attach, nfe_attach), DEVMETHOD(device_detach, nfe_detach), DEVMETHOD(device_suspend, nfe_suspend), DEVMETHOD(device_resume, nfe_resume), DEVMETHOD(device_shutdown, nfe_shutdown), /* MII interface */ DEVMETHOD(miibus_readreg, nfe_miibus_readreg), DEVMETHOD(miibus_writereg, nfe_miibus_writereg), DEVMETHOD(miibus_statchg, nfe_miibus_statchg), DEVMETHOD_END }; static driver_t nfe_driver = { "nfe", nfe_methods, sizeof(struct nfe_softc) }; static devclass_t nfe_devclass; DRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0); DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0); static struct nfe_type nfe_devs[] = { {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN, "NVIDIA nForce MCP Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN, "NVIDIA nForce2 MCP2 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1, "NVIDIA nForce2 400 MCP4 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2, "NVIDIA nForce2 400 MCP5 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1, "NVIDIA nForce3 MCP3 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN, "NVIDIA nForce3 250 MCP6 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4, "NVIDIA nForce3 MCP7 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1, "NVIDIA nForce4 CK804 MCP8 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2, "NVIDIA nForce4 CK804 MCP9 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1, "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP10 */ {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2, "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP11 */ {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1, "NVIDIA nForce 430 MCP12 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2, "NVIDIA nForce 430 MCP13 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1, "NVIDIA nForce MCP55 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2, "NVIDIA nForce MCP55 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1, "NVIDIA nForce MCP61 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2, "NVIDIA nForce MCP61 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3, "NVIDIA nForce MCP61 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4, "NVIDIA nForce MCP61 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1, "NVIDIA nForce MCP65 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2, "NVIDIA nForce MCP65 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3, "NVIDIA nForce MCP65 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4, "NVIDIA nForce MCP65 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1, "NVIDIA nForce MCP67 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2, "NVIDIA nForce MCP67 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3, "NVIDIA nForce MCP67 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4, "NVIDIA nForce MCP67 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1, "NVIDIA nForce MCP73 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2, "NVIDIA nForce MCP73 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3, "NVIDIA nForce MCP73 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4, "NVIDIA nForce MCP73 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1, "NVIDIA nForce MCP77 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2, "NVIDIA nForce MCP77 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3, "NVIDIA nForce MCP77 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4, "NVIDIA nForce MCP77 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1, "NVIDIA nForce MCP79 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2, "NVIDIA nForce MCP79 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3, "NVIDIA nForce MCP79 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4, "NVIDIA nForce MCP79 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP89_LAN, "NVIDIA nForce MCP89 Networking Adapter"}, {0, 0, NULL} }; /* Probe for supported hardware ID's */ static int nfe_probe(device_t dev) { struct nfe_type *t; t = nfe_devs; /* Check for matching PCI DEVICE ID's */ while (t->name != NULL) { if ((pci_get_vendor(dev) == t->vid_id) && (pci_get_device(dev) == t->dev_id)) { device_set_desc(dev, t->name); return (BUS_PROBE_DEFAULT); } t++; } return (ENXIO); } static void nfe_alloc_msix(struct nfe_softc *sc, int count) { int rid; rid = PCIR_BAR(2); sc->nfe_msix_res = bus_alloc_resource_any(sc->nfe_dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->nfe_msix_res == NULL) { device_printf(sc->nfe_dev, "couldn't allocate MSIX table resource\n"); return; } rid = PCIR_BAR(3); sc->nfe_msix_pba_res = bus_alloc_resource_any(sc->nfe_dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->nfe_msix_pba_res == NULL) { device_printf(sc->nfe_dev, "couldn't allocate MSIX PBA resource\n"); bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(2), sc->nfe_msix_res); sc->nfe_msix_res = NULL; return; } if (pci_alloc_msix(sc->nfe_dev, &count) == 0) { if (count == NFE_MSI_MESSAGES) { if (bootverbose) device_printf(sc->nfe_dev, "Using %d MSIX messages\n", count); sc->nfe_msix = 1; } else { if (bootverbose) device_printf(sc->nfe_dev, "couldn't allocate MSIX\n"); pci_release_msi(sc->nfe_dev); bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(3), sc->nfe_msix_pba_res); bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(2), sc->nfe_msix_res); sc->nfe_msix_pba_res = NULL; sc->nfe_msix_res = NULL; } } } static int nfe_detect_msik9(struct nfe_softc *sc) { static const char *maker = "MSI"; static const char *product = "K9N6PGM2-V2 (MS-7309)"; char *m, *p; int found; found = 0; m = kern_getenv("smbios.planar.maker"); p = kern_getenv("smbios.planar.product"); if (m != NULL && p != NULL) { if (strcmp(m, maker) == 0 && strcmp(p, product) == 0) found = 1; } if (m != NULL) freeenv(m); if (p != NULL) freeenv(p); return (found); } static int nfe_attach(device_t dev) { struct nfe_softc *sc; if_t ifp; bus_addr_t dma_addr_max; int error = 0, i, msic, phyloc, reg, rid; sc = device_get_softc(dev); sc->nfe_dev = dev; mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0); pci_enable_busmaster(dev); rid = PCIR_BAR(0); sc->nfe_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->nfe_res[0] == NULL) { device_printf(dev, "couldn't map memory resources\n"); mtx_destroy(&sc->nfe_mtx); return (ENXIO); } if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) { uint16_t v, width; v = pci_read_config(dev, reg + 0x08, 2); /* Change max. read request size to 4096. */ v &= ~(7 << 12); v |= (5 << 12); pci_write_config(dev, reg + 0x08, v, 2); v = pci_read_config(dev, reg + 0x0c, 2); /* link capability */ v = (v >> 4) & 0x0f; width = pci_read_config(dev, reg + 0x12, 2); /* negotiated link width */ width = (width >> 4) & 0x3f; if (v != width) device_printf(sc->nfe_dev, "warning, negotiated width of link(x%d) != " "max. width of link(x%d)\n", width, v); } if (nfe_can_use_msix(sc) == 0) { device_printf(sc->nfe_dev, "MSI/MSI-X capability black-listed, will use INTx\n"); msix_disable = 1; msi_disable = 1; } /* Allocate interrupt */ if (msix_disable == 0 || msi_disable == 0) { if (msix_disable == 0 && (msic = pci_msix_count(dev)) == NFE_MSI_MESSAGES) nfe_alloc_msix(sc, msic); if (msi_disable == 0 && sc->nfe_msix == 0 && (msic = pci_msi_count(dev)) == NFE_MSI_MESSAGES && pci_alloc_msi(dev, &msic) == 0) { if (msic == NFE_MSI_MESSAGES) { if (bootverbose) device_printf(dev, "Using %d MSI messages\n", msic); sc->nfe_msi = 1; } else pci_release_msi(dev); } } if (sc->nfe_msix == 0 && sc->nfe_msi == 0) { rid = 0; sc->nfe_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->nfe_irq[0] == NULL) { device_printf(dev, "couldn't allocate IRQ resources\n"); error = ENXIO; goto fail; } } else { for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) { sc->nfe_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->nfe_irq[i] == NULL) { device_printf(dev, "couldn't allocate IRQ resources for " "message %d\n", rid); error = ENXIO; goto fail; } } /* Map interrupts to vector 0. */ if (sc->nfe_msix != 0) { NFE_WRITE(sc, NFE_MSIX_MAP0, 0); NFE_WRITE(sc, NFE_MSIX_MAP1, 0); } else if (sc->nfe_msi != 0) { NFE_WRITE(sc, NFE_MSI_MAP0, 0); NFE_WRITE(sc, NFE_MSI_MAP1, 0); } } /* Set IRQ status/mask register. */ sc->nfe_irq_status = NFE_IRQ_STATUS; sc->nfe_irq_mask = NFE_IRQ_MASK; sc->nfe_intrs = NFE_IRQ_WANTED; sc->nfe_nointrs = 0; if (sc->nfe_msix != 0) { sc->nfe_irq_status = NFE_MSIX_IRQ_STATUS; sc->nfe_nointrs = NFE_IRQ_WANTED; } else if (sc->nfe_msi != 0) { sc->nfe_irq_mask = NFE_MSI_IRQ_MASK; sc->nfe_intrs = NFE_MSI_VECTOR_0_ENABLED; } sc->nfe_devid = pci_get_device(dev); sc->nfe_revid = pci_get_revid(dev); sc->nfe_flags = 0; switch (sc->nfe_devid) { case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; break; case PCI_PRODUCT_NVIDIA_MCP51_LAN1: case PCI_PRODUCT_NVIDIA_MCP51_LAN2: sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT | NFE_MIB_V1; break; case PCI_PRODUCT_NVIDIA_CK804_LAN1: case PCI_PRODUCT_NVIDIA_CK804_LAN2: case PCI_PRODUCT_NVIDIA_MCP04_LAN1: case PCI_PRODUCT_NVIDIA_MCP04_LAN2: sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_MIB_V1; break; case PCI_PRODUCT_NVIDIA_MCP55_LAN1: case PCI_PRODUCT_NVIDIA_MCP55_LAN2: sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_HW_VLAN | NFE_PWR_MGMT | NFE_TX_FLOW_CTRL | NFE_MIB_V2; break; case PCI_PRODUCT_NVIDIA_MCP61_LAN1: case PCI_PRODUCT_NVIDIA_MCP61_LAN2: case PCI_PRODUCT_NVIDIA_MCP61_LAN3: case PCI_PRODUCT_NVIDIA_MCP61_LAN4: case PCI_PRODUCT_NVIDIA_MCP67_LAN1: case PCI_PRODUCT_NVIDIA_MCP67_LAN2: case PCI_PRODUCT_NVIDIA_MCP67_LAN3: case PCI_PRODUCT_NVIDIA_MCP67_LAN4: case PCI_PRODUCT_NVIDIA_MCP73_LAN1: case PCI_PRODUCT_NVIDIA_MCP73_LAN2: case PCI_PRODUCT_NVIDIA_MCP73_LAN3: case PCI_PRODUCT_NVIDIA_MCP73_LAN4: sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL | NFE_MIB_V2; break; case PCI_PRODUCT_NVIDIA_MCP77_LAN1: case PCI_PRODUCT_NVIDIA_MCP77_LAN2: case PCI_PRODUCT_NVIDIA_MCP77_LAN3: case PCI_PRODUCT_NVIDIA_MCP77_LAN4: /* XXX flow control */ sc->nfe_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_MIB_V3; break; case PCI_PRODUCT_NVIDIA_MCP79_LAN1: case PCI_PRODUCT_NVIDIA_MCP79_LAN2: case PCI_PRODUCT_NVIDIA_MCP79_LAN3: case PCI_PRODUCT_NVIDIA_MCP79_LAN4: case PCI_PRODUCT_NVIDIA_MCP89_LAN: /* XXX flow control */ sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_MIB_V3; break; case PCI_PRODUCT_NVIDIA_MCP65_LAN1: case PCI_PRODUCT_NVIDIA_MCP65_LAN2: case PCI_PRODUCT_NVIDIA_MCP65_LAN3: case PCI_PRODUCT_NVIDIA_MCP65_LAN4: sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL | NFE_MIB_V2; break; } nfe_power(sc); /* Check for reversed ethernet address */ if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0) sc->nfe_flags |= NFE_CORRECT_MACADDR; nfe_get_macaddr(sc, sc->eaddr); /* * Allocate the parent bus DMA tag appropriate for PCI. */ dma_addr_max = BUS_SPACE_MAXADDR_32BIT; if ((sc->nfe_flags & NFE_40BIT_ADDR) != 0) dma_addr_max = NFE_DMA_MAXADDR; error = bus_dma_tag_create( bus_get_dma_tag(sc->nfe_dev), /* parent */ 1, 0, /* alignment, boundary */ dma_addr_max, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->nfe_parent_tag); if (error) goto fail; ifp = sc->nfe_ifp = if_gethandle(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_gethandle()\n"); error = ENOSPC; goto fail; } /* * Allocate Tx and Rx rings. */ if ((error = nfe_alloc_tx_ring(sc, &sc->txq)) != 0) goto fail; if ((error = nfe_alloc_rx_ring(sc, &sc->rxq)) != 0) goto fail; nfe_alloc_jrx_ring(sc, &sc->jrxq); /* Create sysctl node. */ nfe_sysctl_node(sc); if_setsoftc(ifp, sc); if_initname(ifp, device_get_name(dev), device_get_unit(dev)); if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); if_setioctlfn(ifp, nfe_ioctl); if_setstartfn(ifp, nfe_start); if_sethwassist(ifp, 0); if_setcapabilities(ifp, 0); if_setinitfn(ifp, nfe_init); if_setsendqlen(ifp, NFE_TX_RING_COUNT - 1); if_setsendqready(ifp); if (sc->nfe_flags & NFE_HW_CSUM) { if_setcapabilitiesbit(ifp, IFCAP_HWCSUM | IFCAP_TSO4, 0); if_sethwassistbits(ifp, NFE_CSUM_FEATURES | CSUM_TSO, 0); } if_setcapenable(ifp, if_getcapabilities(ifp)); sc->nfe_framesize = if_getmtu(ifp) + NFE_RX_HEADERS; /* VLAN capability setup. */ if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0); if ((sc->nfe_flags & NFE_HW_VLAN) != 0) { if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING, 0); if ((if_getcapabilities(ifp) & IFCAP_HWCSUM) != 0) if_setcapabilitiesbit(ifp, (IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO), 0); } if (pci_find_cap(dev, PCIY_PMG, ®) == 0) if_setcapabilitiesbit(ifp, IFCAP_WOL_MAGIC, 0); if_setcapenable(ifp, if_getcapabilities(ifp)); /* * Tell the upper layer(s) we support long frames. * Must appear after the call to ether_ifattach() because * ether_ifattach() sets ifi_hdrlen to the default value. */ if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); #ifdef DEVICE_POLLING if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0); #endif /* Do MII setup */ phyloc = MII_PHY_ANY; if (sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN1 || sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN2 || sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN3 || sc->nfe_devid == PCI_PRODUCT_NVIDIA_MCP61_LAN4) { if (nfe_detect_msik9(sc) != 0) phyloc = 0; } error = mii_attach(dev, &sc->nfe_miibus, ifp, (ifm_change_cb_t)nfe_ifmedia_upd, (ifm_stat_cb_t)nfe_ifmedia_sts, BMSR_DEFCAPMASK, phyloc, MII_OFFSET_ANY, MIIF_DOPAUSE); if (error != 0) { device_printf(dev, "attaching PHYs failed\n"); goto fail; } ether_ifattach(ifp, sc->eaddr); - TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc); + NET_TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc); sc->nfe_tq = taskqueue_create_fast("nfe_taskq", M_WAITOK, taskqueue_thread_enqueue, &sc->nfe_tq); taskqueue_start_threads(&sc->nfe_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->nfe_dev)); error = 0; if (sc->nfe_msi == 0 && sc->nfe_msix == 0) { error = bus_setup_intr(dev, sc->nfe_irq[0], INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc, &sc->nfe_intrhand[0]); } else { for (i = 0; i < NFE_MSI_MESSAGES; i++) { error = bus_setup_intr(dev, sc->nfe_irq[i], INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc, &sc->nfe_intrhand[i]); if (error != 0) break; } } if (error) { device_printf(dev, "couldn't set up irq\n"); taskqueue_free(sc->nfe_tq); sc->nfe_tq = NULL; ether_ifdetach(ifp); goto fail; } fail: if (error) nfe_detach(dev); return (error); } static int nfe_detach(device_t dev) { struct nfe_softc *sc; if_t ifp; uint8_t eaddr[ETHER_ADDR_LEN]; int i, rid; sc = device_get_softc(dev); KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized")); ifp = sc->nfe_ifp; #ifdef DEVICE_POLLING if (ifp != NULL && if_getcapenable(ifp) & IFCAP_POLLING) ether_poll_deregister(ifp); #endif if (device_is_attached(dev)) { NFE_LOCK(sc); nfe_stop(ifp); if_setflagbits(ifp, 0, IFF_UP); NFE_UNLOCK(sc); callout_drain(&sc->nfe_stat_ch); ether_ifdetach(ifp); } if (ifp) { /* restore ethernet address */ if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) { for (i = 0; i < ETHER_ADDR_LEN; i++) { eaddr[i] = sc->eaddr[5 - i]; } } else bcopy(sc->eaddr, eaddr, ETHER_ADDR_LEN); nfe_set_macaddr(sc, eaddr); if_free(ifp); } if (sc->nfe_miibus) device_delete_child(dev, sc->nfe_miibus); bus_generic_detach(dev); if (sc->nfe_tq != NULL) { taskqueue_drain(sc->nfe_tq, &sc->nfe_int_task); taskqueue_free(sc->nfe_tq); sc->nfe_tq = NULL; } for (i = 0; i < NFE_MSI_MESSAGES; i++) { if (sc->nfe_intrhand[i] != NULL) { bus_teardown_intr(dev, sc->nfe_irq[i], sc->nfe_intrhand[i]); sc->nfe_intrhand[i] = NULL; } } if (sc->nfe_msi == 0 && sc->nfe_msix == 0) { if (sc->nfe_irq[0] != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nfe_irq[0]); } else { for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) { if (sc->nfe_irq[i] != NULL) { bus_release_resource(dev, SYS_RES_IRQ, rid, sc->nfe_irq[i]); sc->nfe_irq[i] = NULL; } } pci_release_msi(dev); } if (sc->nfe_msix_pba_res != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(3), sc->nfe_msix_pba_res); sc->nfe_msix_pba_res = NULL; } if (sc->nfe_msix_res != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(2), sc->nfe_msix_res); sc->nfe_msix_res = NULL; } if (sc->nfe_res[0] != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), sc->nfe_res[0]); sc->nfe_res[0] = NULL; } nfe_free_tx_ring(sc, &sc->txq); nfe_free_rx_ring(sc, &sc->rxq); nfe_free_jrx_ring(sc, &sc->jrxq); if (sc->nfe_parent_tag) { bus_dma_tag_destroy(sc->nfe_parent_tag); sc->nfe_parent_tag = NULL; } mtx_destroy(&sc->nfe_mtx); return (0); } static int nfe_suspend(device_t dev) { struct nfe_softc *sc; sc = device_get_softc(dev); NFE_LOCK(sc); nfe_stop(sc->nfe_ifp); nfe_set_wol(sc); sc->nfe_suspended = 1; NFE_UNLOCK(sc); return (0); } static int nfe_resume(device_t dev) { struct nfe_softc *sc; if_t ifp; sc = device_get_softc(dev); NFE_LOCK(sc); nfe_power(sc); ifp = sc->nfe_ifp; if (if_getflags(ifp) & IFF_UP) nfe_init_locked(sc); sc->nfe_suspended = 0; NFE_UNLOCK(sc); return (0); } static int nfe_can_use_msix(struct nfe_softc *sc) { static struct msix_blacklist { char *maker; char *product; } msix_blacklists[] = { { "ASUSTeK Computer INC.", "P5N32-SLI PREMIUM" } }; struct msix_blacklist *mblp; char *maker, *product; int count, n, use_msix; /* * Search base board manufacturer and product name table * to see this system has a known MSI/MSI-X issue. */ maker = kern_getenv("smbios.planar.maker"); product = kern_getenv("smbios.planar.product"); use_msix = 1; if (maker != NULL && product != NULL) { count = nitems(msix_blacklists); mblp = msix_blacklists; for (n = 0; n < count; n++) { if (strcmp(maker, mblp->maker) == 0 && strcmp(product, mblp->product) == 0) { use_msix = 0; break; } mblp++; } } if (maker != NULL) freeenv(maker); if (product != NULL) freeenv(product); return (use_msix); } /* Take PHY/NIC out of powerdown, from Linux */ static void nfe_power(struct nfe_softc *sc) { uint32_t pwr; if ((sc->nfe_flags & NFE_PWR_MGMT) == 0) return; NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2); NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC); DELAY(100); NFE_WRITE(sc, NFE_MAC_RESET, 0); DELAY(100); NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2); pwr = NFE_READ(sc, NFE_PWR2_CTL); pwr &= ~NFE_PWR2_WAKEUP_MASK; if (sc->nfe_revid >= 0xa3 && (sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 || sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN2)) pwr |= NFE_PWR2_REVA3; NFE_WRITE(sc, NFE_PWR2_CTL, pwr); } static void nfe_miibus_statchg(device_t dev) { struct nfe_softc *sc; struct mii_data *mii; if_t ifp; uint32_t rxctl, txctl; sc = device_get_softc(dev); mii = device_get_softc(sc->nfe_miibus); ifp = sc->nfe_ifp; sc->nfe_link = 0; if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: case IFM_1000_T: sc->nfe_link = 1; break; default: break; } } nfe_mac_config(sc, mii); txctl = NFE_READ(sc, NFE_TX_CTL); rxctl = NFE_READ(sc, NFE_RX_CTL); if (sc->nfe_link != 0 && (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { txctl |= NFE_TX_START; rxctl |= NFE_RX_START; } else { txctl &= ~NFE_TX_START; rxctl &= ~NFE_RX_START; } NFE_WRITE(sc, NFE_TX_CTL, txctl); NFE_WRITE(sc, NFE_RX_CTL, rxctl); } static void nfe_mac_config(struct nfe_softc *sc, struct mii_data *mii) { uint32_t link, misc, phy, seed; uint32_t val; NFE_LOCK_ASSERT(sc); phy = NFE_READ(sc, NFE_PHY_IFACE); phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); seed = NFE_READ(sc, NFE_RNDSEED); seed &= ~NFE_SEED_MASK; misc = NFE_MISC1_MAGIC; link = NFE_MEDIA_SET; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) == 0) { phy |= NFE_PHY_HDX; /* half-duplex */ misc |= NFE_MISC1_HDX; } switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_1000_T: /* full-duplex only */ link |= NFE_MEDIA_1000T; seed |= NFE_SEED_1000T; phy |= NFE_PHY_1000T; break; case IFM_100_TX: link |= NFE_MEDIA_100TX; seed |= NFE_SEED_100TX; phy |= NFE_PHY_100TX; break; case IFM_10_T: link |= NFE_MEDIA_10T; seed |= NFE_SEED_10T; break; } if ((phy & 0x10000000) != 0) { if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) val = NFE_R1_MAGIC_1000; else val = NFE_R1_MAGIC_10_100; } else val = NFE_R1_MAGIC_DEFAULT; NFE_WRITE(sc, NFE_SETUP_R1, val); NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ NFE_WRITE(sc, NFE_PHY_IFACE, phy); NFE_WRITE(sc, NFE_MISC1, misc); NFE_WRITE(sc, NFE_LINKSPEED, link); if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { /* It seems all hardwares supports Rx pause frames. */ val = NFE_READ(sc, NFE_RXFILTER); if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) val |= NFE_PFF_RX_PAUSE; else val &= ~NFE_PFF_RX_PAUSE; NFE_WRITE(sc, NFE_RXFILTER, val); if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) { val = NFE_READ(sc, NFE_MISC1); if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) { NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_ENABLE); val |= NFE_MISC1_TX_PAUSE; } else { val &= ~NFE_MISC1_TX_PAUSE; NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE); } NFE_WRITE(sc, NFE_MISC1, val); } } else { /* disable rx/tx pause frames */ val = NFE_READ(sc, NFE_RXFILTER); val &= ~NFE_PFF_RX_PAUSE; NFE_WRITE(sc, NFE_RXFILTER, val); if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) { NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE); val = NFE_READ(sc, NFE_MISC1); val &= ~NFE_MISC1_TX_PAUSE; NFE_WRITE(sc, NFE_MISC1, val); } } } static int nfe_miibus_readreg(device_t dev, int phy, int reg) { struct nfe_softc *sc = device_get_softc(dev); uint32_t val; int ntries; NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); DELAY(100); } NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) { DELAY(100); if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) break; } if (ntries == NFE_TIMEOUT) { DPRINTFN(sc, 2, "timeout waiting for PHY\n"); return 0; } if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { DPRINTFN(sc, 2, "could not read PHY\n"); return 0; } val = NFE_READ(sc, NFE_PHY_DATA); if (val != 0xffffffff && val != 0) sc->mii_phyaddr = phy; DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val); return (val); } static int nfe_miibus_writereg(device_t dev, int phy, int reg, int val) { struct nfe_softc *sc = device_get_softc(dev); uint32_t ctl; int ntries; NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); DELAY(100); } NFE_WRITE(sc, NFE_PHY_DATA, val); ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; NFE_WRITE(sc, NFE_PHY_CTL, ctl); for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) { DELAY(100); if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) break; } #ifdef NFE_DEBUG if (nfedebug >= 2 && ntries == NFE_TIMEOUT) device_printf(sc->nfe_dev, "could not write to PHY\n"); #endif return (0); } struct nfe_dmamap_arg { bus_addr_t nfe_busaddr; }; static int nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) { struct nfe_dmamap_arg ctx; struct nfe_rx_data *data; void *desc; int i, error, descsize; if (sc->nfe_flags & NFE_40BIT_ADDR) { desc = ring->desc64; descsize = sizeof (struct nfe_desc64); } else { desc = ring->desc32; descsize = sizeof (struct nfe_desc32); } ring->cur = ring->next = 0; error = bus_dma_tag_create(sc->nfe_parent_tag, NFE_RING_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ NFE_RX_RING_COUNT * descsize, 1, /* maxsize, nsegments */ NFE_RX_RING_COUNT * descsize, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &ring->rx_desc_tag); if (error != 0) { device_printf(sc->nfe_dev, "could not create desc DMA tag\n"); goto fail; } /* allocate memory to desc */ error = bus_dmamem_alloc(ring->rx_desc_tag, &desc, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->rx_desc_map); if (error != 0) { device_printf(sc->nfe_dev, "could not create desc DMA map\n"); goto fail; } if (sc->nfe_flags & NFE_40BIT_ADDR) ring->desc64 = desc; else ring->desc32 = desc; /* map desc to device visible address space */ ctx.nfe_busaddr = 0; error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, desc, NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0); if (error != 0) { device_printf(sc->nfe_dev, "could not load desc DMA map\n"); goto fail; } ring->physaddr = ctx.nfe_busaddr; error = bus_dma_tag_create(sc->nfe_parent_tag, 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES, 1, /* maxsize, nsegments */ MCLBYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &ring->rx_data_tag); if (error != 0) { device_printf(sc->nfe_dev, "could not create Rx DMA tag\n"); goto fail; } error = bus_dmamap_create(ring->rx_data_tag, 0, &ring->rx_spare_map); if (error != 0) { device_printf(sc->nfe_dev, "could not create Rx DMA spare map\n"); goto fail; } /* * Pre-allocate Rx buffers and populate Rx ring. */ for (i = 0; i < NFE_RX_RING_COUNT; i++) { data = &sc->rxq.data[i]; data->rx_data_map = NULL; data->m = NULL; error = bus_dmamap_create(ring->rx_data_tag, 0, &data->rx_data_map); if (error != 0) { device_printf(sc->nfe_dev, "could not create Rx DMA map\n"); goto fail; } } fail: return (error); } static void nfe_alloc_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring) { struct nfe_dmamap_arg ctx; struct nfe_rx_data *data; void *desc; int i, error, descsize; if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0) return; if (jumbo_disable != 0) { device_printf(sc->nfe_dev, "disabling jumbo frame support\n"); sc->nfe_jumbo_disable = 1; return; } if (sc->nfe_flags & NFE_40BIT_ADDR) { desc = ring->jdesc64; descsize = sizeof (struct nfe_desc64); } else { desc = ring->jdesc32; descsize = sizeof (struct nfe_desc32); } ring->jcur = ring->jnext = 0; /* Create DMA tag for jumbo Rx ring. */ error = bus_dma_tag_create(sc->nfe_parent_tag, NFE_RING_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsize */ 1, /* nsegments */ NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &ring->jrx_desc_tag); if (error != 0) { device_printf(sc->nfe_dev, "could not create jumbo ring DMA tag\n"); goto fail; } /* Create DMA tag for jumbo Rx buffers. */ error = bus_dma_tag_create(sc->nfe_parent_tag, 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MJUM9BYTES, /* maxsize */ 1, /* nsegments */ MJUM9BYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &ring->jrx_data_tag); if (error != 0) { device_printf(sc->nfe_dev, "could not create jumbo Rx buffer DMA tag\n"); goto fail; } /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */ error = bus_dmamem_alloc(ring->jrx_desc_tag, &desc, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->jrx_desc_map); if (error != 0) { device_printf(sc->nfe_dev, "could not allocate DMA'able memory for jumbo Rx ring\n"); goto fail; } if (sc->nfe_flags & NFE_40BIT_ADDR) ring->jdesc64 = desc; else ring->jdesc32 = desc; ctx.nfe_busaddr = 0; error = bus_dmamap_load(ring->jrx_desc_tag, ring->jrx_desc_map, desc, NFE_JUMBO_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0); if (error != 0) { device_printf(sc->nfe_dev, "could not load DMA'able memory for jumbo Rx ring\n"); goto fail; } ring->jphysaddr = ctx.nfe_busaddr; /* Create DMA maps for jumbo Rx buffers. */ error = bus_dmamap_create(ring->jrx_data_tag, 0, &ring->jrx_spare_map); if (error != 0) { device_printf(sc->nfe_dev, "could not create jumbo Rx DMA spare map\n"); goto fail; } for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { data = &sc->jrxq.jdata[i]; data->rx_data_map = NULL; data->m = NULL; error = bus_dmamap_create(ring->jrx_data_tag, 0, &data->rx_data_map); if (error != 0) { device_printf(sc->nfe_dev, "could not create jumbo Rx DMA map\n"); goto fail; } } return; fail: /* * Running without jumbo frame support is ok for most cases * so don't fail on creating dma tag/map for jumbo frame. */ nfe_free_jrx_ring(sc, ring); device_printf(sc->nfe_dev, "disabling jumbo frame support due to " "resource shortage\n"); sc->nfe_jumbo_disable = 1; } static int nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) { void *desc; size_t descsize; int i; ring->cur = ring->next = 0; if (sc->nfe_flags & NFE_40BIT_ADDR) { desc = ring->desc64; descsize = sizeof (struct nfe_desc64); } else { desc = ring->desc32; descsize = sizeof (struct nfe_desc32); } bzero(desc, descsize * NFE_RX_RING_COUNT); for (i = 0; i < NFE_RX_RING_COUNT; i++) { if (nfe_newbuf(sc, i) != 0) return (ENOBUFS); } bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } static int nfe_init_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring) { void *desc; size_t descsize; int i; ring->jcur = ring->jnext = 0; if (sc->nfe_flags & NFE_40BIT_ADDR) { desc = ring->jdesc64; descsize = sizeof (struct nfe_desc64); } else { desc = ring->jdesc32; descsize = sizeof (struct nfe_desc32); } bzero(desc, descsize * NFE_JUMBO_RX_RING_COUNT); for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { if (nfe_jnewbuf(sc, i) != 0) return (ENOBUFS); } bus_dmamap_sync(ring->jrx_desc_tag, ring->jrx_desc_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } static void nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) { struct nfe_rx_data *data; void *desc; int i; if (sc->nfe_flags & NFE_40BIT_ADDR) desc = ring->desc64; else desc = ring->desc32; for (i = 0; i < NFE_RX_RING_COUNT; i++) { data = &ring->data[i]; if (data->rx_data_map != NULL) { bus_dmamap_destroy(ring->rx_data_tag, data->rx_data_map); data->rx_data_map = NULL; } if (data->m != NULL) { m_freem(data->m); data->m = NULL; } } if (ring->rx_data_tag != NULL) { if (ring->rx_spare_map != NULL) { bus_dmamap_destroy(ring->rx_data_tag, ring->rx_spare_map); ring->rx_spare_map = NULL; } bus_dma_tag_destroy(ring->rx_data_tag); ring->rx_data_tag = NULL; } if (desc != NULL) { bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map); bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map); ring->desc64 = NULL; ring->desc32 = NULL; } if (ring->rx_desc_tag != NULL) { bus_dma_tag_destroy(ring->rx_desc_tag); ring->rx_desc_tag = NULL; } } static void nfe_free_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring) { struct nfe_rx_data *data; void *desc; int i, descsize; if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0) return; if (sc->nfe_flags & NFE_40BIT_ADDR) { desc = ring->jdesc64; descsize = sizeof (struct nfe_desc64); } else { desc = ring->jdesc32; descsize = sizeof (struct nfe_desc32); } for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { data = &ring->jdata[i]; if (data->rx_data_map != NULL) { bus_dmamap_destroy(ring->jrx_data_tag, data->rx_data_map); data->rx_data_map = NULL; } if (data->m != NULL) { m_freem(data->m); data->m = NULL; } } if (ring->jrx_data_tag != NULL) { if (ring->jrx_spare_map != NULL) { bus_dmamap_destroy(ring->jrx_data_tag, ring->jrx_spare_map); ring->jrx_spare_map = NULL; } bus_dma_tag_destroy(ring->jrx_data_tag); ring->jrx_data_tag = NULL; } if (desc != NULL) { bus_dmamap_unload(ring->jrx_desc_tag, ring->jrx_desc_map); bus_dmamem_free(ring->jrx_desc_tag, desc, ring->jrx_desc_map); ring->jdesc64 = NULL; ring->jdesc32 = NULL; } if (ring->jrx_desc_tag != NULL) { bus_dma_tag_destroy(ring->jrx_desc_tag); ring->jrx_desc_tag = NULL; } } static int nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) { struct nfe_dmamap_arg ctx; int i, error; void *desc; int descsize; if (sc->nfe_flags & NFE_40BIT_ADDR) { desc = ring->desc64; descsize = sizeof (struct nfe_desc64); } else { desc = ring->desc32; descsize = sizeof (struct nfe_desc32); } ring->queued = 0; ring->cur = ring->next = 0; error = bus_dma_tag_create(sc->nfe_parent_tag, NFE_RING_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ NFE_TX_RING_COUNT * descsize, 1, /* maxsize, nsegments */ NFE_TX_RING_COUNT * descsize, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &ring->tx_desc_tag); if (error != 0) { device_printf(sc->nfe_dev, "could not create desc DMA tag\n"); goto fail; } error = bus_dmamem_alloc(ring->tx_desc_tag, &desc, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->tx_desc_map); if (error != 0) { device_printf(sc->nfe_dev, "could not create desc DMA map\n"); goto fail; } if (sc->nfe_flags & NFE_40BIT_ADDR) ring->desc64 = desc; else ring->desc32 = desc; ctx.nfe_busaddr = 0; error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, desc, NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0); if (error != 0) { device_printf(sc->nfe_dev, "could not load desc DMA map\n"); goto fail; } ring->physaddr = ctx.nfe_busaddr; error = bus_dma_tag_create(sc->nfe_parent_tag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, NFE_TSO_MAXSIZE, NFE_MAX_SCATTER, NFE_TSO_MAXSGSIZE, 0, NULL, NULL, &ring->tx_data_tag); if (error != 0) { device_printf(sc->nfe_dev, "could not create Tx DMA tag\n"); goto fail; } for (i = 0; i < NFE_TX_RING_COUNT; i++) { error = bus_dmamap_create(ring->tx_data_tag, 0, &ring->data[i].tx_data_map); if (error != 0) { device_printf(sc->nfe_dev, "could not create Tx DMA map\n"); goto fail; } } fail: return (error); } static void nfe_init_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) { void *desc; size_t descsize; sc->nfe_force_tx = 0; ring->queued = 0; ring->cur = ring->next = 0; if (sc->nfe_flags & NFE_40BIT_ADDR) { desc = ring->desc64; descsize = sizeof (struct nfe_desc64); } else { desc = ring->desc32; descsize = sizeof (struct nfe_desc32); } bzero(desc, descsize * NFE_TX_RING_COUNT); bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } static void nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) { struct nfe_tx_data *data; void *desc; int i, descsize; if (sc->nfe_flags & NFE_40BIT_ADDR) { desc = ring->desc64; descsize = sizeof (struct nfe_desc64); } else { desc = ring->desc32; descsize = sizeof (struct nfe_desc32); } for (i = 0; i < NFE_TX_RING_COUNT; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->tx_data_tag, data->tx_data_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->tx_data_tag, data->tx_data_map); m_freem(data->m); data->m = NULL; } if (data->tx_data_map != NULL) { bus_dmamap_destroy(ring->tx_data_tag, data->tx_data_map); data->tx_data_map = NULL; } } if (ring->tx_data_tag != NULL) { bus_dma_tag_destroy(ring->tx_data_tag); ring->tx_data_tag = NULL; } if (desc != NULL) { bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map); bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map); ring->desc64 = NULL; ring->desc32 = NULL; bus_dma_tag_destroy(ring->tx_desc_tag); ring->tx_desc_tag = NULL; } } #ifdef DEVICE_POLLING static poll_handler_t nfe_poll; static int nfe_poll(if_t ifp, enum poll_cmd cmd, int count) { struct nfe_softc *sc = if_getsoftc(ifp); uint32_t r; int rx_npkts = 0; NFE_LOCK(sc); if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { NFE_UNLOCK(sc); return (rx_npkts); } if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) rx_npkts = nfe_jrxeof(sc, count, &rx_npkts); else rx_npkts = nfe_rxeof(sc, count, &rx_npkts); nfe_txeof(sc); if (!if_sendq_empty(ifp)) nfe_start_locked(ifp); if (cmd == POLL_AND_CHECK_STATUS) { if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) { NFE_UNLOCK(sc); return (rx_npkts); } NFE_WRITE(sc, sc->nfe_irq_status, r); if (r & NFE_IRQ_LINK) { NFE_READ(sc, NFE_PHY_STATUS); NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); DPRINTF(sc, "link state changed\n"); } } NFE_UNLOCK(sc); return (rx_npkts); } #endif /* DEVICE_POLLING */ static void nfe_set_intr(struct nfe_softc *sc) { if (sc->nfe_msi != 0) NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); } /* In MSIX, a write to mask reegisters behaves as XOR. */ static __inline void nfe_enable_intr(struct nfe_softc *sc) { if (sc->nfe_msix != 0) { /* XXX Should have a better way to enable interrupts! */ if (NFE_READ(sc, sc->nfe_irq_mask) == 0) NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs); } else NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs); } static __inline void nfe_disable_intr(struct nfe_softc *sc) { if (sc->nfe_msix != 0) { /* XXX Should have a better way to disable interrupts! */ if (NFE_READ(sc, sc->nfe_irq_mask) != 0) NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs); } else NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs); } static int nfe_ioctl(if_t ifp, u_long cmd, caddr_t data) { struct nfe_softc *sc; struct ifreq *ifr; struct mii_data *mii; int error, init, mask; sc = if_getsoftc(ifp); ifr = (struct ifreq *) data; error = 0; init = 0; switch (cmd) { case SIOCSIFMTU: if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NFE_JUMBO_MTU) error = EINVAL; else if (if_getmtu(ifp) != ifr->ifr_mtu) { if ((((sc->nfe_flags & NFE_JUMBO_SUP) == 0) || (sc->nfe_jumbo_disable != 0)) && ifr->ifr_mtu > ETHERMTU) error = EINVAL; else { NFE_LOCK(sc); if_setmtu(ifp, ifr->ifr_mtu); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); nfe_init_locked(sc); } NFE_UNLOCK(sc); } } break; case SIOCSIFFLAGS: NFE_LOCK(sc); if (if_getflags(ifp) & IFF_UP) { /* * If only the PROMISC or ALLMULTI flag changes, then * don't do a full re-init of the chip, just update * the Rx filter. */ if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) && ((if_getflags(ifp) ^ sc->nfe_if_flags) & (IFF_ALLMULTI | IFF_PROMISC)) != 0) nfe_setmulti(sc); else nfe_init_locked(sc); } else { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) nfe_stop(ifp); } sc->nfe_if_flags = if_getflags(ifp); NFE_UNLOCK(sc); error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { NFE_LOCK(sc); nfe_setmulti(sc); NFE_UNLOCK(sc); error = 0; } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: mii = device_get_softc(sc->nfe_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); break; case SIOCSIFCAP: mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); #ifdef DEVICE_POLLING if ((mask & IFCAP_POLLING) != 0) { if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) { error = ether_poll_register(nfe_poll, ifp); if (error) break; NFE_LOCK(sc); nfe_disable_intr(sc); if_setcapenablebit(ifp, IFCAP_POLLING, 0); NFE_UNLOCK(sc); } else { error = ether_poll_deregister(ifp); /* Enable interrupt even in error case */ NFE_LOCK(sc); nfe_enable_intr(sc); if_setcapenablebit(ifp, 0, IFCAP_POLLING); NFE_UNLOCK(sc); } } #endif /* DEVICE_POLLING */ if ((mask & IFCAP_WOL_MAGIC) != 0 && (if_getcapabilities(ifp) & IFCAP_WOL_MAGIC) != 0) if_togglecapenable(ifp, IFCAP_WOL_MAGIC); if ((mask & IFCAP_TXCSUM) != 0 && (if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) { if_togglecapenable(ifp, IFCAP_TXCSUM); if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) if_sethwassistbits(ifp, NFE_CSUM_FEATURES, 0); else if_sethwassistbits(ifp, 0, NFE_CSUM_FEATURES); } if ((mask & IFCAP_RXCSUM) != 0 && (if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0) { if_togglecapenable(ifp, IFCAP_RXCSUM); init++; } if ((mask & IFCAP_TSO4) != 0 && (if_getcapabilities(ifp) & IFCAP_TSO4) != 0) { if_togglecapenable(ifp, IFCAP_TSO4); if ((IFCAP_TSO4 & if_getcapenable(ifp)) != 0) if_sethwassistbits(ifp, CSUM_TSO, 0); else if_sethwassistbits(ifp, 0, CSUM_TSO); } if ((mask & IFCAP_VLAN_HWTSO) != 0 && (if_getcapabilities(ifp) & IFCAP_VLAN_HWTSO) != 0) if_togglecapenable(ifp, IFCAP_VLAN_HWTSO); if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && (if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0) { if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING); if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0) if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWTSO); init++; } /* * XXX * It seems that VLAN stripping requires Rx checksum offload. * Unfortunately FreeBSD has no way to disable only Rx side * VLAN stripping. So when we know Rx checksum offload is * disabled turn entire hardware VLAN assist off. */ if ((if_getcapenable(ifp) & IFCAP_RXCSUM) == 0) { if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) init++; if_setcapenablebit(ifp, 0, (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO)); } if (init > 0 && (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); nfe_init(sc); } if_vlancap(ifp); break; default: error = ether_ioctl(ifp, cmd, data); break; } return (error); } static int nfe_intr(void *arg) { struct nfe_softc *sc; uint32_t status; sc = (struct nfe_softc *)arg; status = NFE_READ(sc, sc->nfe_irq_status); if (status == 0 || status == 0xffffffff) return (FILTER_STRAY); nfe_disable_intr(sc); taskqueue_enqueue(sc->nfe_tq, &sc->nfe_int_task); return (FILTER_HANDLED); } static void nfe_int_task(void *arg, int pending) { struct nfe_softc *sc = arg; if_t ifp = sc->nfe_ifp; uint32_t r; int domore; NFE_LOCK(sc); if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) { nfe_enable_intr(sc); NFE_UNLOCK(sc); return; /* not for us */ } NFE_WRITE(sc, sc->nfe_irq_status, r); DPRINTFN(sc, 5, "nfe_intr: interrupt register %x\n", r); #ifdef DEVICE_POLLING if (if_getcapenable(ifp) & IFCAP_POLLING) { NFE_UNLOCK(sc); return; } #endif if (r & NFE_IRQ_LINK) { NFE_READ(sc, NFE_PHY_STATUS); NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); DPRINTF(sc, "link state changed\n"); } if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { NFE_UNLOCK(sc); nfe_disable_intr(sc); return; } domore = 0; /* check Rx ring */ if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) domore = nfe_jrxeof(sc, sc->nfe_process_limit, NULL); else domore = nfe_rxeof(sc, sc->nfe_process_limit, NULL); /* check Tx ring */ nfe_txeof(sc); if (!if_sendq_empty(ifp)) nfe_start_locked(ifp); NFE_UNLOCK(sc); if (domore || (NFE_READ(sc, sc->nfe_irq_status) != 0)) { taskqueue_enqueue(sc->nfe_tq, &sc->nfe_int_task); return; } /* Reenable interrupts. */ nfe_enable_intr(sc); } static __inline void nfe_discard_rxbuf(struct nfe_softc *sc, int idx) { struct nfe_desc32 *desc32; struct nfe_desc64 *desc64; struct nfe_rx_data *data; struct mbuf *m; data = &sc->rxq.data[idx]; m = data->m; if (sc->nfe_flags & NFE_40BIT_ADDR) { desc64 = &sc->rxq.desc64[idx]; /* VLAN packet may have overwritten it. */ desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr)); desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr)); desc64->length = htole16(m->m_len); desc64->flags = htole16(NFE_RX_READY); } else { desc32 = &sc->rxq.desc32[idx]; desc32->length = htole16(m->m_len); desc32->flags = htole16(NFE_RX_READY); } } static __inline void nfe_discard_jrxbuf(struct nfe_softc *sc, int idx) { struct nfe_desc32 *desc32; struct nfe_desc64 *desc64; struct nfe_rx_data *data; struct mbuf *m; data = &sc->jrxq.jdata[idx]; m = data->m; if (sc->nfe_flags & NFE_40BIT_ADDR) { desc64 = &sc->jrxq.jdesc64[idx]; /* VLAN packet may have overwritten it. */ desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr)); desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr)); desc64->length = htole16(m->m_len); desc64->flags = htole16(NFE_RX_READY); } else { desc32 = &sc->jrxq.jdesc32[idx]; desc32->length = htole16(m->m_len); desc32->flags = htole16(NFE_RX_READY); } } static int nfe_newbuf(struct nfe_softc *sc, int idx) { struct nfe_rx_data *data; struct nfe_desc32 *desc32; struct nfe_desc64 *desc64; struct mbuf *m; bus_dma_segment_t segs[1]; bus_dmamap_t map; int nsegs; m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = MCLBYTES; m_adj(m, ETHER_ALIGN); if (bus_dmamap_load_mbuf_sg(sc->rxq.rx_data_tag, sc->rxq.rx_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) { m_freem(m); return (ENOBUFS); } KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); data = &sc->rxq.data[idx]; if (data->m != NULL) { bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rxq.rx_data_tag, data->rx_data_map); } map = data->rx_data_map; data->rx_data_map = sc->rxq.rx_spare_map; sc->rxq.rx_spare_map = map; bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map, BUS_DMASYNC_PREREAD); data->paddr = segs[0].ds_addr; data->m = m; /* update mapping address in h/w descriptor */ if (sc->nfe_flags & NFE_40BIT_ADDR) { desc64 = &sc->rxq.desc64[idx]; desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr)); desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr)); desc64->length = htole16(segs[0].ds_len); desc64->flags = htole16(NFE_RX_READY); } else { desc32 = &sc->rxq.desc32[idx]; desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr)); desc32->length = htole16(segs[0].ds_len); desc32->flags = htole16(NFE_RX_READY); } return (0); } static int nfe_jnewbuf(struct nfe_softc *sc, int idx) { struct nfe_rx_data *data; struct nfe_desc32 *desc32; struct nfe_desc64 *desc64; struct mbuf *m; bus_dma_segment_t segs[1]; bus_dmamap_t map; int nsegs; m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES); if (m == NULL) return (ENOBUFS); m->m_pkthdr.len = m->m_len = MJUM9BYTES; m_adj(m, ETHER_ALIGN); if (bus_dmamap_load_mbuf_sg(sc->jrxq.jrx_data_tag, sc->jrxq.jrx_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) { m_freem(m); return (ENOBUFS); } KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); data = &sc->jrxq.jdata[idx]; if (data->m != NULL) { bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->jrxq.jrx_data_tag, data->rx_data_map); } map = data->rx_data_map; data->rx_data_map = sc->jrxq.jrx_spare_map; sc->jrxq.jrx_spare_map = map; bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map, BUS_DMASYNC_PREREAD); data->paddr = segs[0].ds_addr; data->m = m; /* update mapping address in h/w descriptor */ if (sc->nfe_flags & NFE_40BIT_ADDR) { desc64 = &sc->jrxq.jdesc64[idx]; desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr)); desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr)); desc64->length = htole16(segs[0].ds_len); desc64->flags = htole16(NFE_RX_READY); } else { desc32 = &sc->jrxq.jdesc32[idx]; desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr)); desc32->length = htole16(segs[0].ds_len); desc32->flags = htole16(NFE_RX_READY); } return (0); } static int nfe_rxeof(struct nfe_softc *sc, int count, int *rx_npktsp) { if_t ifp = sc->nfe_ifp; struct nfe_desc32 *desc32; struct nfe_desc64 *desc64; struct nfe_rx_data *data; struct mbuf *m; uint16_t flags; int len, prog, rx_npkts; uint32_t vtag = 0; rx_npkts = 0; NFE_LOCK_ASSERT(sc); bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, BUS_DMASYNC_POSTREAD); for (prog = 0;;NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) { if (count <= 0) break; count--; data = &sc->rxq.data[sc->rxq.cur]; if (sc->nfe_flags & NFE_40BIT_ADDR) { desc64 = &sc->rxq.desc64[sc->rxq.cur]; vtag = le32toh(desc64->physaddr[1]); flags = le16toh(desc64->flags); len = le16toh(desc64->length) & NFE_RX_LEN_MASK; } else { desc32 = &sc->rxq.desc32[sc->rxq.cur]; flags = le16toh(desc32->flags); len = le16toh(desc32->length) & NFE_RX_LEN_MASK; } if (flags & NFE_RX_READY) break; prog++; if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { if (!(flags & NFE_RX_VALID_V1)) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); nfe_discard_rxbuf(sc, sc->rxq.cur); continue; } if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { flags &= ~NFE_RX_ERROR; len--; /* fix buffer length */ } } else { if (!(flags & NFE_RX_VALID_V2)) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); nfe_discard_rxbuf(sc, sc->rxq.cur); continue; } if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { flags &= ~NFE_RX_ERROR; len--; /* fix buffer length */ } } if (flags & NFE_RX_ERROR) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); nfe_discard_rxbuf(sc, sc->rxq.cur); continue; } m = data->m; if (nfe_newbuf(sc, sc->rxq.cur) != 0) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); nfe_discard_rxbuf(sc, sc->rxq.cur); continue; } if ((vtag & NFE_RX_VTAG) != 0 && (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) { m->m_pkthdr.ether_vtag = vtag & 0xffff; m->m_flags |= M_VLANTAG; } m->m_pkthdr.len = m->m_len = len; m->m_pkthdr.rcvif = ifp; if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) { if ((flags & NFE_RX_IP_CSUMOK) != 0) { m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; m->m_pkthdr.csum_flags |= CSUM_IP_VALID; if ((flags & NFE_RX_TCP_CSUMOK) != 0 || (flags & NFE_RX_UDP_CSUMOK) != 0) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } } if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); NFE_UNLOCK(sc); if_input(ifp, m); NFE_LOCK(sc); rx_npkts++; } if (prog > 0) bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); if (rx_npktsp != NULL) *rx_npktsp = rx_npkts; return (count > 0 ? 0 : EAGAIN); } static int nfe_jrxeof(struct nfe_softc *sc, int count, int *rx_npktsp) { if_t ifp = sc->nfe_ifp; struct nfe_desc32 *desc32; struct nfe_desc64 *desc64; struct nfe_rx_data *data; struct mbuf *m; uint16_t flags; int len, prog, rx_npkts; uint32_t vtag = 0; rx_npkts = 0; NFE_LOCK_ASSERT(sc); bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map, BUS_DMASYNC_POSTREAD); for (prog = 0;;NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT), vtag = 0) { if (count <= 0) break; count--; data = &sc->jrxq.jdata[sc->jrxq.jcur]; if (sc->nfe_flags & NFE_40BIT_ADDR) { desc64 = &sc->jrxq.jdesc64[sc->jrxq.jcur]; vtag = le32toh(desc64->physaddr[1]); flags = le16toh(desc64->flags); len = le16toh(desc64->length) & NFE_RX_LEN_MASK; } else { desc32 = &sc->jrxq.jdesc32[sc->jrxq.jcur]; flags = le16toh(desc32->flags); len = le16toh(desc32->length) & NFE_RX_LEN_MASK; } if (flags & NFE_RX_READY) break; prog++; if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { if (!(flags & NFE_RX_VALID_V1)) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); nfe_discard_jrxbuf(sc, sc->jrxq.jcur); continue; } if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { flags &= ~NFE_RX_ERROR; len--; /* fix buffer length */ } } else { if (!(flags & NFE_RX_VALID_V2)) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); nfe_discard_jrxbuf(sc, sc->jrxq.jcur); continue; } if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { flags &= ~NFE_RX_ERROR; len--; /* fix buffer length */ } } if (flags & NFE_RX_ERROR) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); nfe_discard_jrxbuf(sc, sc->jrxq.jcur); continue; } m = data->m; if (nfe_jnewbuf(sc, sc->jrxq.jcur) != 0) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); nfe_discard_jrxbuf(sc, sc->jrxq.jcur); continue; } if ((vtag & NFE_RX_VTAG) != 0 && (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) { m->m_pkthdr.ether_vtag = vtag & 0xffff; m->m_flags |= M_VLANTAG; } m->m_pkthdr.len = m->m_len = len; m->m_pkthdr.rcvif = ifp; if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) { if ((flags & NFE_RX_IP_CSUMOK) != 0) { m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; m->m_pkthdr.csum_flags |= CSUM_IP_VALID; if ((flags & NFE_RX_TCP_CSUMOK) != 0 || (flags & NFE_RX_UDP_CSUMOK) != 0) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } } if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); NFE_UNLOCK(sc); if_input(ifp, m); NFE_LOCK(sc); rx_npkts++; } if (prog > 0) bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); if (rx_npktsp != NULL) *rx_npktsp = rx_npkts; return (count > 0 ? 0 : EAGAIN); } static void nfe_txeof(struct nfe_softc *sc) { if_t ifp = sc->nfe_ifp; struct nfe_desc32 *desc32; struct nfe_desc64 *desc64; struct nfe_tx_data *data = NULL; uint16_t flags; int cons, prog; NFE_LOCK_ASSERT(sc); bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, BUS_DMASYNC_POSTREAD); prog = 0; for (cons = sc->txq.next; cons != sc->txq.cur; NFE_INC(cons, NFE_TX_RING_COUNT)) { if (sc->nfe_flags & NFE_40BIT_ADDR) { desc64 = &sc->txq.desc64[cons]; flags = le16toh(desc64->flags); } else { desc32 = &sc->txq.desc32[cons]; flags = le16toh(desc32->flags); } if (flags & NFE_TX_VALID) break; prog++; sc->txq.queued--; data = &sc->txq.data[cons]; if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { if ((flags & NFE_TX_LASTFRAG_V1) == 0) continue; if ((flags & NFE_TX_ERROR_V1) != 0) { device_printf(sc->nfe_dev, "tx v1 error 0x%4b\n", flags, NFE_V1_TXERR); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } else if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); } else { if ((flags & NFE_TX_LASTFRAG_V2) == 0) continue; if ((flags & NFE_TX_ERROR_V2) != 0) { device_printf(sc->nfe_dev, "tx v2 error 0x%4b\n", flags, NFE_V2_TXERR); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } else if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); } /* last fragment of the mbuf chain transmitted */ KASSERT(data->m != NULL, ("%s: freeing NULL mbuf!", __func__)); bus_dmamap_sync(sc->txq.tx_data_tag, data->tx_data_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->txq.tx_data_tag, data->tx_data_map); m_freem(data->m); data->m = NULL; } if (prog > 0) { sc->nfe_force_tx = 0; sc->txq.next = cons; if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); if (sc->txq.queued == 0) sc->nfe_watchdog_timer = 0; } } static int nfe_encap(struct nfe_softc *sc, struct mbuf **m_head) { struct nfe_desc32 *desc32 = NULL; struct nfe_desc64 *desc64 = NULL; bus_dmamap_t map; bus_dma_segment_t segs[NFE_MAX_SCATTER]; int error, i, nsegs, prod, si; uint32_t tsosegsz; uint16_t cflags, flags; struct mbuf *m; prod = si = sc->txq.cur; map = sc->txq.data[prod].tx_data_map; error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, *m_head, segs, &nsegs, BUS_DMA_NOWAIT); if (error == EFBIG) { m = m_collapse(*m_head, M_NOWAIT, NFE_MAX_SCATTER); if (m == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } *m_head = m; error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, *m_head, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } } else if (error != 0) return (error); if (nsegs == 0) { m_freem(*m_head); *m_head = NULL; return (EIO); } if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 2) { bus_dmamap_unload(sc->txq.tx_data_tag, map); return (ENOBUFS); } m = *m_head; cflags = flags = 0; tsosegsz = 0; if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { tsosegsz = (uint32_t)m->m_pkthdr.tso_segsz << NFE_TX_TSO_SHIFT; cflags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM); cflags |= NFE_TX_TSO; } else if ((m->m_pkthdr.csum_flags & NFE_CSUM_FEATURES) != 0) { if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) cflags |= NFE_TX_IP_CSUM; if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) cflags |= NFE_TX_TCP_UDP_CSUM; if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) cflags |= NFE_TX_TCP_UDP_CSUM; } for (i = 0; i < nsegs; i++) { if (sc->nfe_flags & NFE_40BIT_ADDR) { desc64 = &sc->txq.desc64[prod]; desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[i].ds_addr)); desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[i].ds_addr)); desc64->vtag = 0; desc64->length = htole16(segs[i].ds_len - 1); desc64->flags = htole16(flags); } else { desc32 = &sc->txq.desc32[prod]; desc32->physaddr = htole32(NFE_ADDR_LO(segs[i].ds_addr)); desc32->length = htole16(segs[i].ds_len - 1); desc32->flags = htole16(flags); } /* * Setting of the valid bit in the first descriptor is * deferred until the whole chain is fully setup. */ flags |= NFE_TX_VALID; sc->txq.queued++; NFE_INC(prod, NFE_TX_RING_COUNT); } /* * the whole mbuf chain has been DMA mapped, fix last/first descriptor. * csum flags, vtag and TSO belong to the first fragment only. */ if (sc->nfe_flags & NFE_40BIT_ADDR) { desc64->flags |= htole16(NFE_TX_LASTFRAG_V2); desc64 = &sc->txq.desc64[si]; if ((m->m_flags & M_VLANTAG) != 0) desc64->vtag = htole32(NFE_TX_VTAG | m->m_pkthdr.ether_vtag); if (tsosegsz != 0) { /* * XXX * The following indicates the descriptor element * is a 32bit quantity. */ desc64->length |= htole16((uint16_t)tsosegsz); desc64->flags |= htole16(tsosegsz >> 16); } /* * finally, set the valid/checksum/TSO bit in the first * descriptor. */ desc64->flags |= htole16(NFE_TX_VALID | cflags); } else { if (sc->nfe_flags & NFE_JUMBO_SUP) desc32->flags |= htole16(NFE_TX_LASTFRAG_V2); else desc32->flags |= htole16(NFE_TX_LASTFRAG_V1); desc32 = &sc->txq.desc32[si]; if (tsosegsz != 0) { /* * XXX * The following indicates the descriptor element * is a 32bit quantity. */ desc32->length |= htole16((uint16_t)tsosegsz); desc32->flags |= htole16(tsosegsz >> 16); } /* * finally, set the valid/checksum/TSO bit in the first * descriptor. */ desc32->flags |= htole16(NFE_TX_VALID | cflags); } sc->txq.cur = prod; prod = (prod + NFE_TX_RING_COUNT - 1) % NFE_TX_RING_COUNT; sc->txq.data[si].tx_data_map = sc->txq.data[prod].tx_data_map; sc->txq.data[prod].tx_data_map = map; sc->txq.data[prod].m = m; bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE); return (0); } struct nfe_hash_maddr_ctx { uint8_t addr[ETHER_ADDR_LEN]; uint8_t mask[ETHER_ADDR_LEN]; }; static u_int nfe_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { struct nfe_hash_maddr_ctx *ctx = arg; uint8_t *addrp, mcaddr; int j; addrp = LLADDR(sdl); for (j = 0; j < ETHER_ADDR_LEN; j++) { mcaddr = addrp[j]; ctx->addr[j] &= mcaddr; ctx->mask[j] &= ~mcaddr; } return (1); } static void nfe_setmulti(struct nfe_softc *sc) { if_t ifp = sc->nfe_ifp; struct nfe_hash_maddr_ctx ctx; uint32_t filter; uint8_t etherbroadcastaddr[ETHER_ADDR_LEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; int i; NFE_LOCK_ASSERT(sc); if ((if_getflags(ifp) & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { bzero(ctx.addr, ETHER_ADDR_LEN); bzero(ctx.mask, ETHER_ADDR_LEN); goto done; } bcopy(etherbroadcastaddr, ctx.addr, ETHER_ADDR_LEN); bcopy(etherbroadcastaddr, ctx.mask, ETHER_ADDR_LEN); if_foreach_llmaddr(ifp, nfe_hash_maddr, &ctx); for (i = 0; i < ETHER_ADDR_LEN; i++) { ctx.mask[i] |= ctx.addr[i]; } done: ctx.addr[0] |= 0x01; /* make sure multicast bit is set */ NFE_WRITE(sc, NFE_MULTIADDR_HI, ctx.addr[3] << 24 | ctx.addr[2] << 16 | ctx.addr[1] << 8 | ctx.addr[0]); NFE_WRITE(sc, NFE_MULTIADDR_LO, ctx.addr[5] << 8 | ctx.addr[4]); NFE_WRITE(sc, NFE_MULTIMASK_HI, ctx.mask[3] << 24 | ctx.mask[2] << 16 | ctx.mask[1] << 8 | ctx.mask[0]); NFE_WRITE(sc, NFE_MULTIMASK_LO, ctx.mask[5] << 8 | ctx.mask[4]); filter = NFE_READ(sc, NFE_RXFILTER); filter &= NFE_PFF_RX_PAUSE; filter |= NFE_RXFILTER_MAGIC; filter |= (if_getflags(ifp) & IFF_PROMISC) ? NFE_PFF_PROMISC : NFE_PFF_U2M; NFE_WRITE(sc, NFE_RXFILTER, filter); } static void nfe_start(if_t ifp) { struct nfe_softc *sc = if_getsoftc(ifp); NFE_LOCK(sc); nfe_start_locked(ifp); NFE_UNLOCK(sc); } static void nfe_start_locked(if_t ifp) { struct nfe_softc *sc = if_getsoftc(ifp); struct mbuf *m0; int enq = 0; NFE_LOCK_ASSERT(sc); if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING || sc->nfe_link == 0) return; while (!if_sendq_empty(ifp)) { m0 = if_dequeue(ifp); if (m0 == NULL) break; if (nfe_encap(sc, &m0) != 0) { if (m0 == NULL) break; if_sendq_prepend(ifp, m0); if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); break; } enq++; if_etherbpfmtap(ifp, m0); } if (enq > 0) { bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* kick Tx */ NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); /* * Set a timeout in case the chip goes out to lunch. */ sc->nfe_watchdog_timer = 5; } } static void nfe_watchdog(if_t ifp) { struct nfe_softc *sc = if_getsoftc(ifp); if (sc->nfe_watchdog_timer == 0 || --sc->nfe_watchdog_timer) return; /* Check if we've lost Tx completion interrupt. */ nfe_txeof(sc); if (sc->txq.queued == 0) { if_printf(ifp, "watchdog timeout (missed Tx interrupts) " "-- recovering\n"); if (!if_sendq_empty(ifp)) nfe_start_locked(ifp); return; } /* Check if we've lost start Tx command. */ sc->nfe_force_tx++; if (sc->nfe_force_tx <= 3) { /* * If this is the case for watchdog timeout, the following * code should go to nfe_txeof(). */ NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); return; } sc->nfe_force_tx = 0; if_printf(ifp, "watchdog timeout\n"); if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); nfe_init_locked(sc); } static void nfe_init(void *xsc) { struct nfe_softc *sc = xsc; NFE_LOCK(sc); nfe_init_locked(sc); NFE_UNLOCK(sc); } static void nfe_init_locked(void *xsc) { struct nfe_softc *sc = xsc; if_t ifp = sc->nfe_ifp; struct mii_data *mii; uint32_t val; int error; NFE_LOCK_ASSERT(sc); mii = device_get_softc(sc->nfe_miibus); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) return; nfe_stop(ifp); sc->nfe_framesize = if_getmtu(ifp) + NFE_RX_HEADERS; nfe_init_tx_ring(sc, &sc->txq); if (sc->nfe_framesize > (MCLBYTES - ETHER_HDR_LEN)) error = nfe_init_jrx_ring(sc, &sc->jrxq); else error = nfe_init_rx_ring(sc, &sc->rxq); if (error != 0) { device_printf(sc->nfe_dev, "initialization failed: no memory for rx buffers\n"); nfe_stop(ifp); return; } val = 0; if ((sc->nfe_flags & NFE_CORRECT_MACADDR) != 0) val |= NFE_MAC_ADDR_INORDER; NFE_WRITE(sc, NFE_TX_UNK, val); NFE_WRITE(sc, NFE_STATUS, 0); if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE); sc->rxtxctl = NFE_RXTX_BIT2; if (sc->nfe_flags & NFE_40BIT_ADDR) sc->rxtxctl |= NFE_RXTX_V3MAGIC; else if (sc->nfe_flags & NFE_JUMBO_SUP) sc->rxtxctl |= NFE_RXTX_V2MAGIC; if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) sc->rxtxctl |= NFE_RXTX_RXCSUM; if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP; NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); DELAY(10); NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); else NFE_WRITE(sc, NFE_VTAG_CTL, 0); NFE_WRITE(sc, NFE_SETUP_R6, 0); /* set MAC address */ nfe_set_macaddr(sc, if_getlladdr(ifp)); /* tell MAC where rings are in memory */ if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) { NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, NFE_ADDR_HI(sc->jrxq.jphysaddr)); NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, NFE_ADDR_LO(sc->jrxq.jphysaddr)); } else { NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, NFE_ADDR_HI(sc->rxq.physaddr)); NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, NFE_ADDR_LO(sc->rxq.physaddr)); } NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, NFE_ADDR_HI(sc->txq.physaddr)); NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr)); NFE_WRITE(sc, NFE_RING_SIZE, (NFE_RX_RING_COUNT - 1) << 16 | (NFE_TX_RING_COUNT - 1)); NFE_WRITE(sc, NFE_RXBUFSZ, sc->nfe_framesize); /* force MAC to wakeup */ val = NFE_READ(sc, NFE_PWR_STATE); if ((val & NFE_PWR_WAKEUP) == 0) NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_WAKEUP); DELAY(10); val = NFE_READ(sc, NFE_PWR_STATE); NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_VALID); #if 1 /* configure interrupts coalescing/mitigation */ NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); #else /* no interrupt mitigation: one interrupt per packet */ NFE_WRITE(sc, NFE_IMTIMER, 970); #endif NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC_10_100); NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); /* Disable WOL. */ NFE_WRITE(sc, NFE_WOL_CTL, 0); sc->rxtxctl &= ~NFE_RXTX_BIT2; NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); DELAY(10); NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); /* set Rx filter */ nfe_setmulti(sc); /* enable Rx */ NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); /* enable Tx */ NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); /* Clear hardware stats. */ nfe_stats_clear(sc); #ifdef DEVICE_POLLING if (if_getcapenable(ifp) & IFCAP_POLLING) nfe_disable_intr(sc); else #endif nfe_set_intr(sc); nfe_enable_intr(sc); /* enable interrupts */ if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); sc->nfe_link = 0; mii_mediachg(mii); callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc); } static void nfe_stop(if_t ifp) { struct nfe_softc *sc = if_getsoftc(ifp); struct nfe_rx_ring *rx_ring; struct nfe_jrx_ring *jrx_ring; struct nfe_tx_ring *tx_ring; struct nfe_rx_data *rdata; struct nfe_tx_data *tdata; int i; NFE_LOCK_ASSERT(sc); sc->nfe_watchdog_timer = 0; if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)); callout_stop(&sc->nfe_stat_ch); /* abort Tx */ NFE_WRITE(sc, NFE_TX_CTL, 0); /* disable Rx */ NFE_WRITE(sc, NFE_RX_CTL, 0); /* disable interrupts */ nfe_disable_intr(sc); sc->nfe_link = 0; /* free Rx and Tx mbufs still in the queues. */ rx_ring = &sc->rxq; for (i = 0; i < NFE_RX_RING_COUNT; i++) { rdata = &rx_ring->data[i]; if (rdata->m != NULL) { bus_dmamap_sync(rx_ring->rx_data_tag, rdata->rx_data_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rx_ring->rx_data_tag, rdata->rx_data_map); m_freem(rdata->m); rdata->m = NULL; } } if ((sc->nfe_flags & NFE_JUMBO_SUP) != 0) { jrx_ring = &sc->jrxq; for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { rdata = &jrx_ring->jdata[i]; if (rdata->m != NULL) { bus_dmamap_sync(jrx_ring->jrx_data_tag, rdata->rx_data_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(jrx_ring->jrx_data_tag, rdata->rx_data_map); m_freem(rdata->m); rdata->m = NULL; } } } tx_ring = &sc->txq; for (i = 0; i < NFE_RX_RING_COUNT; i++) { tdata = &tx_ring->data[i]; if (tdata->m != NULL) { bus_dmamap_sync(tx_ring->tx_data_tag, tdata->tx_data_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(tx_ring->tx_data_tag, tdata->tx_data_map); m_freem(tdata->m); tdata->m = NULL; } } /* Update hardware stats. */ nfe_stats_update(sc); } static int nfe_ifmedia_upd(if_t ifp) { struct nfe_softc *sc = if_getsoftc(ifp); struct mii_data *mii; NFE_LOCK(sc); mii = device_get_softc(sc->nfe_miibus); mii_mediachg(mii); NFE_UNLOCK(sc); return (0); } static void nfe_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) { struct nfe_softc *sc; struct mii_data *mii; sc = if_getsoftc(ifp); NFE_LOCK(sc); mii = device_get_softc(sc->nfe_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; NFE_UNLOCK(sc); } void nfe_tick(void *xsc) { struct nfe_softc *sc; struct mii_data *mii; if_t ifp; sc = (struct nfe_softc *)xsc; NFE_LOCK_ASSERT(sc); ifp = sc->nfe_ifp; mii = device_get_softc(sc->nfe_miibus); mii_tick(mii); nfe_stats_update(sc); nfe_watchdog(ifp); callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc); } static int nfe_shutdown(device_t dev) { return (nfe_suspend(dev)); } static void nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) { uint32_t val; if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) { val = NFE_READ(sc, NFE_MACADDR_LO); addr[0] = (val >> 8) & 0xff; addr[1] = (val & 0xff); val = NFE_READ(sc, NFE_MACADDR_HI); addr[2] = (val >> 24) & 0xff; addr[3] = (val >> 16) & 0xff; addr[4] = (val >> 8) & 0xff; addr[5] = (val & 0xff); } else { val = NFE_READ(sc, NFE_MACADDR_LO); addr[5] = (val >> 8) & 0xff; addr[4] = (val & 0xff); val = NFE_READ(sc, NFE_MACADDR_HI); addr[3] = (val >> 24) & 0xff; addr[2] = (val >> 16) & 0xff; addr[1] = (val >> 8) & 0xff; addr[0] = (val & 0xff); } } static void nfe_set_macaddr(struct nfe_softc *sc, uint8_t *addr) { NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] << 8 | addr[4]); NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); } /* * Map a single buffer address. */ static void nfe_dma_map_segs(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct nfe_dmamap_arg *ctx; if (error != 0) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); ctx = (struct nfe_dmamap_arg *)arg; ctx->nfe_busaddr = segs[0].ds_addr; } static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) { int error, value; if (!arg1) return (EINVAL); value = *(int *)arg1; error = sysctl_handle_int(oidp, &value, 0, req); if (error || !req->newptr) return (error); if (value < low || value > high) return (EINVAL); *(int *)arg1 = value; return (0); } static int sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS) { return (sysctl_int_range(oidp, arg1, arg2, req, NFE_PROC_MIN, NFE_PROC_MAX)); } #define NFE_SYSCTL_STAT_ADD32(c, h, n, p, d) \ SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) #define NFE_SYSCTL_STAT_ADD64(c, h, n, p, d) \ SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d) static void nfe_sysctl_node(struct nfe_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *child, *parent; struct sysctl_oid *tree; struct nfe_hw_stats *stats; int error; stats = &sc->nfe_stats; ctx = device_get_sysctl_ctx(sc->nfe_dev); child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->nfe_dev)); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW, &sc->nfe_process_limit, 0, sysctl_hw_nfe_proc_limit, "I", "max number of Rx events to process"); sc->nfe_process_limit = NFE_PROC_DEFAULT; error = resource_int_value(device_get_name(sc->nfe_dev), device_get_unit(sc->nfe_dev), "process_limit", &sc->nfe_process_limit); if (error == 0) { if (sc->nfe_process_limit < NFE_PROC_MIN || sc->nfe_process_limit > NFE_PROC_MAX) { device_printf(sc->nfe_dev, "process_limit value out of range; " "using default: %d\n", NFE_PROC_DEFAULT); sc->nfe_process_limit = NFE_PROC_DEFAULT; } } if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0) return; tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, NULL, "NFE statistics"); parent = SYSCTL_CHILDREN(tree); /* Rx statistics. */ tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, NULL, "Rx MAC statistics"); child = SYSCTL_CHILDREN(tree); NFE_SYSCTL_STAT_ADD32(ctx, child, "frame_errors", &stats->rx_frame_errors, "Framing Errors"); NFE_SYSCTL_STAT_ADD32(ctx, child, "extra_bytes", &stats->rx_extra_bytes, "Extra Bytes"); NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols", &stats->rx_late_cols, "Late Collisions"); NFE_SYSCTL_STAT_ADD32(ctx, child, "runts", &stats->rx_runts, "Runts"); NFE_SYSCTL_STAT_ADD32(ctx, child, "jumbos", &stats->rx_jumbos, "Jumbos"); NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_overuns", &stats->rx_fifo_overuns, "FIFO Overruns"); NFE_SYSCTL_STAT_ADD32(ctx, child, "crc_errors", &stats->rx_crc_errors, "CRC Errors"); NFE_SYSCTL_STAT_ADD32(ctx, child, "fae", &stats->rx_fae, "Frame Alignment Errors"); NFE_SYSCTL_STAT_ADD32(ctx, child, "len_errors", &stats->rx_len_errors, "Length Errors"); NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast", &stats->rx_unicast, "Unicast Frames"); NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast", &stats->rx_multicast, "Multicast Frames"); NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast", &stats->rx_broadcast, "Broadcast Frames"); if ((sc->nfe_flags & NFE_MIB_V2) != 0) { NFE_SYSCTL_STAT_ADD64(ctx, child, "octets", &stats->rx_octets, "Octets"); NFE_SYSCTL_STAT_ADD32(ctx, child, "pause", &stats->rx_pause, "Pause frames"); NFE_SYSCTL_STAT_ADD32(ctx, child, "drops", &stats->rx_drops, "Drop frames"); } /* Tx statistics. */ tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, NULL, "Tx MAC statistics"); child = SYSCTL_CHILDREN(tree); NFE_SYSCTL_STAT_ADD64(ctx, child, "octets", &stats->tx_octets, "Octets"); NFE_SYSCTL_STAT_ADD32(ctx, child, "zero_rexmits", &stats->tx_zero_rexmits, "Zero Retransmits"); NFE_SYSCTL_STAT_ADD32(ctx, child, "one_rexmits", &stats->tx_one_rexmits, "One Retransmits"); NFE_SYSCTL_STAT_ADD32(ctx, child, "multi_rexmits", &stats->tx_multi_rexmits, "Multiple Retransmits"); NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols", &stats->tx_late_cols, "Late Collisions"); NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_underuns", &stats->tx_fifo_underuns, "FIFO Underruns"); NFE_SYSCTL_STAT_ADD32(ctx, child, "carrier_losts", &stats->tx_carrier_losts, "Carrier Losts"); NFE_SYSCTL_STAT_ADD32(ctx, child, "excess_deferrals", &stats->tx_excess_deferals, "Excess Deferrals"); NFE_SYSCTL_STAT_ADD32(ctx, child, "retry_errors", &stats->tx_retry_errors, "Retry Errors"); if ((sc->nfe_flags & NFE_MIB_V2) != 0) { NFE_SYSCTL_STAT_ADD32(ctx, child, "deferrals", &stats->tx_deferals, "Deferrals"); NFE_SYSCTL_STAT_ADD32(ctx, child, "frames", &stats->tx_frames, "Frames"); NFE_SYSCTL_STAT_ADD32(ctx, child, "pause", &stats->tx_pause, "Pause Frames"); } if ((sc->nfe_flags & NFE_MIB_V3) != 0) { NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast", &stats->tx_deferals, "Unicast Frames"); NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast", &stats->tx_frames, "Multicast Frames"); NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast", &stats->tx_pause, "Broadcast Frames"); } } #undef NFE_SYSCTL_STAT_ADD32 #undef NFE_SYSCTL_STAT_ADD64 static void nfe_stats_clear(struct nfe_softc *sc) { int i, mib_cnt; if ((sc->nfe_flags & NFE_MIB_V1) != 0) mib_cnt = NFE_NUM_MIB_STATV1; else if ((sc->nfe_flags & (NFE_MIB_V2 | NFE_MIB_V3)) != 0) mib_cnt = NFE_NUM_MIB_STATV2; else return; for (i = 0; i < mib_cnt; i++) NFE_READ(sc, NFE_TX_OCTET + i * sizeof(uint32_t)); if ((sc->nfe_flags & NFE_MIB_V3) != 0) { NFE_READ(sc, NFE_TX_UNICAST); NFE_READ(sc, NFE_TX_MULTICAST); NFE_READ(sc, NFE_TX_BROADCAST); } } static void nfe_stats_update(struct nfe_softc *sc) { struct nfe_hw_stats *stats; NFE_LOCK_ASSERT(sc); if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0) return; stats = &sc->nfe_stats; stats->tx_octets += NFE_READ(sc, NFE_TX_OCTET); stats->tx_zero_rexmits += NFE_READ(sc, NFE_TX_ZERO_REXMIT); stats->tx_one_rexmits += NFE_READ(sc, NFE_TX_ONE_REXMIT); stats->tx_multi_rexmits += NFE_READ(sc, NFE_TX_MULTI_REXMIT); stats->tx_late_cols += NFE_READ(sc, NFE_TX_LATE_COL); stats->tx_fifo_underuns += NFE_READ(sc, NFE_TX_FIFO_UNDERUN); stats->tx_carrier_losts += NFE_READ(sc, NFE_TX_CARRIER_LOST); stats->tx_excess_deferals += NFE_READ(sc, NFE_TX_EXCESS_DEFERRAL); stats->tx_retry_errors += NFE_READ(sc, NFE_TX_RETRY_ERROR); stats->rx_frame_errors += NFE_READ(sc, NFE_RX_FRAME_ERROR); stats->rx_extra_bytes += NFE_READ(sc, NFE_RX_EXTRA_BYTES); stats->rx_late_cols += NFE_READ(sc, NFE_RX_LATE_COL); stats->rx_runts += NFE_READ(sc, NFE_RX_RUNT); stats->rx_jumbos += NFE_READ(sc, NFE_RX_JUMBO); stats->rx_fifo_overuns += NFE_READ(sc, NFE_RX_FIFO_OVERUN); stats->rx_crc_errors += NFE_READ(sc, NFE_RX_CRC_ERROR); stats->rx_fae += NFE_READ(sc, NFE_RX_FAE); stats->rx_len_errors += NFE_READ(sc, NFE_RX_LEN_ERROR); stats->rx_unicast += NFE_READ(sc, NFE_RX_UNICAST); stats->rx_multicast += NFE_READ(sc, NFE_RX_MULTICAST); stats->rx_broadcast += NFE_READ(sc, NFE_RX_BROADCAST); if ((sc->nfe_flags & NFE_MIB_V2) != 0) { stats->tx_deferals += NFE_READ(sc, NFE_TX_DEFERAL); stats->tx_frames += NFE_READ(sc, NFE_TX_FRAME); stats->rx_octets += NFE_READ(sc, NFE_RX_OCTET); stats->tx_pause += NFE_READ(sc, NFE_TX_PAUSE); stats->rx_pause += NFE_READ(sc, NFE_RX_PAUSE); stats->rx_drops += NFE_READ(sc, NFE_RX_DROP); } if ((sc->nfe_flags & NFE_MIB_V3) != 0) { stats->tx_unicast += NFE_READ(sc, NFE_TX_UNICAST); stats->tx_multicast += NFE_READ(sc, NFE_TX_MULTICAST); stats->tx_broadcast += NFE_READ(sc, NFE_TX_BROADCAST); } } static void nfe_set_linkspeed(struct nfe_softc *sc) { struct mii_softc *miisc; struct mii_data *mii; int aneg, i, phyno; NFE_LOCK_ASSERT(sc); mii = device_get_softc(sc->nfe_miibus); mii_pollstat(mii); aneg = 0; if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch IFM_SUBTYPE(mii->mii_media_active) { case IFM_10_T: case IFM_100_TX: return; case IFM_1000_T: aneg++; break; default: break; } } miisc = LIST_FIRST(&mii->mii_phys); phyno = miisc->mii_phy; LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); nfe_miibus_writereg(sc->nfe_dev, phyno, MII_100T2CR, 0); nfe_miibus_writereg(sc->nfe_dev, phyno, MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); nfe_miibus_writereg(sc->nfe_dev, phyno, MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG); DELAY(1000); if (aneg != 0) { /* * Poll link state until nfe(4) get a 10/100Mbps link. */ for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { mii_pollstat(mii); if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: nfe_mac_config(sc, mii); return; default: break; } } NFE_UNLOCK(sc); pause("nfelnk", hz); NFE_LOCK(sc); } if (i == MII_ANEGTICKS_GIGE) device_printf(sc->nfe_dev, "establishing a link failed, WOL may not work!"); } /* * No link, force MAC to have 100Mbps, full-duplex link. * This is the last resort and may/may not work. */ mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; nfe_mac_config(sc, mii); } static void nfe_set_wol(struct nfe_softc *sc) { if_t ifp; uint32_t wolctl; int pmc; uint16_t pmstat; NFE_LOCK_ASSERT(sc); if (pci_find_cap(sc->nfe_dev, PCIY_PMG, &pmc) != 0) return; ifp = sc->nfe_ifp; if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0) wolctl = NFE_WOL_MAGIC; else wolctl = 0; NFE_WRITE(sc, NFE_WOL_CTL, wolctl); if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0) { nfe_set_linkspeed(sc); if ((sc->nfe_flags & NFE_PWR_MGMT) != 0) NFE_WRITE(sc, NFE_PWR2_CTL, NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_GATE_CLOCKS); /* Enable RX. */ NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 0); NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, 0); NFE_WRITE(sc, NFE_RX_CTL, NFE_READ(sc, NFE_RX_CTL) | NFE_RX_START); } /* Request PME if WOL is requested. */ pmstat = pci_read_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, 2); pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); if ((if_getcapenable(ifp) & IFCAP_WOL) != 0) pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; pci_write_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); } diff --git a/sys/dev/qlxgbe/ql_os.c b/sys/dev/qlxgbe/ql_os.c index 137936dc62eb..ad6bab9f5ce3 100644 --- a/sys/dev/qlxgbe/ql_os.c +++ b/sys/dev/qlxgbe/ql_os.c @@ -1,2307 +1,2307 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2013-2016 Qlogic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * and ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * File: ql_os.c * Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656. */ #include __FBSDID("$FreeBSD$"); #include "ql_os.h" #include "ql_hw.h" #include "ql_def.h" #include "ql_inline.h" #include "ql_ver.h" #include "ql_glbl.h" #include "ql_dbg.h" #include /* * Some PCI Configuration Space Related Defines */ #ifndef PCI_VENDOR_QLOGIC #define PCI_VENDOR_QLOGIC 0x1077 #endif #ifndef PCI_PRODUCT_QLOGIC_ISP8030 #define PCI_PRODUCT_QLOGIC_ISP8030 0x8030 #endif #define PCI_QLOGIC_ISP8030 \ ((PCI_PRODUCT_QLOGIC_ISP8030 << 16) | PCI_VENDOR_QLOGIC) /* * static functions */ static int qla_alloc_parent_dma_tag(qla_host_t *ha); static void qla_free_parent_dma_tag(qla_host_t *ha); static int qla_alloc_xmt_bufs(qla_host_t *ha); static void qla_free_xmt_bufs(qla_host_t *ha); static int qla_alloc_rcv_bufs(qla_host_t *ha); static void qla_free_rcv_bufs(qla_host_t *ha); static void qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb); static void qla_init_ifnet(device_t dev, qla_host_t *ha); static int qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS); static void qla_release(qla_host_t *ha); static void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error); static void qla_stop(qla_host_t *ha); static void qla_get_peer(qla_host_t *ha); static void qla_error_recovery(void *context, int pending); static void qla_async_event(void *context, int pending); static void qla_stats(void *context, int pending); static int qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx, uint32_t iscsi_pdu); /* * Hooks to the Operating Systems */ static int qla_pci_probe (device_t); static int qla_pci_attach (device_t); static int qla_pci_detach (device_t); static void qla_init(void *arg); static int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); static int qla_media_change(struct ifnet *ifp); static void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr); static int qla_transmit(struct ifnet *ifp, struct mbuf *mp); static void qla_qflush(struct ifnet *ifp); static int qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp); static void qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *tx_fp); static int qla_create_fp_taskqueues(qla_host_t *ha); static void qla_destroy_fp_taskqueues(qla_host_t *ha); static void qla_drain_fp_taskqueues(qla_host_t *ha); static device_method_t qla_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, qla_pci_probe), DEVMETHOD(device_attach, qla_pci_attach), DEVMETHOD(device_detach, qla_pci_detach), { 0, 0 } }; static driver_t qla_pci_driver = { "ql", qla_pci_methods, sizeof (qla_host_t), }; static devclass_t qla83xx_devclass; DRIVER_MODULE(qla83xx, pci, qla_pci_driver, qla83xx_devclass, 0, 0); MODULE_DEPEND(qla83xx, pci, 1, 1, 1); MODULE_DEPEND(qla83xx, ether, 1, 1, 1); MALLOC_DEFINE(M_QLA83XXBUF, "qla83xxbuf", "Buffers for qla83xx driver"); #define QL_STD_REPLENISH_THRES 0 #define QL_JUMBO_REPLENISH_THRES 32 static char dev_str[64]; static char ver_str[64]; /* * Name: qla_pci_probe * Function: Validate the PCI device to be a QLA80XX device */ static int qla_pci_probe(device_t dev) { switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { case PCI_QLOGIC_ISP8030: snprintf(dev_str, sizeof(dev_str), "%s v%d.%d.%d", "Qlogic ISP 83xx PCI CNA Adapter-Ethernet Function", QLA_VERSION_MAJOR, QLA_VERSION_MINOR, QLA_VERSION_BUILD); snprintf(ver_str, sizeof(ver_str), "v%d.%d.%d", QLA_VERSION_MAJOR, QLA_VERSION_MINOR, QLA_VERSION_BUILD); device_set_desc(dev, dev_str); break; default: return (ENXIO); } if (bootverbose) printf("%s: %s\n ", __func__, dev_str); return (BUS_PROBE_DEFAULT); } static void qla_add_sysctls(qla_host_t *ha) { device_t dev = ha->pci_dev; SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "version", CTLFLAG_RD, ver_str, 0, "Driver Version"); SYSCTL_ADD_STRING(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "fw_version", CTLFLAG_RD, ha->fw_ver_str, 0, "firmware version"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "link_status", CTLTYPE_INT | CTLFLAG_RW, (void *)ha, 0, qla_sysctl_get_link_status, "I", "Link Status"); ha->dbg_level = 0; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", CTLFLAG_RW, &ha->dbg_level, ha->dbg_level, "Debug Level"); ha->enable_minidump = 1; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "enable_minidump", CTLFLAG_RW, &ha->enable_minidump, ha->enable_minidump, "Minidump retrival prior to error recovery " "is enabled only when this is set"); ha->enable_driverstate_dump = 1; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "enable_driverstate_dump", CTLFLAG_RW, &ha->enable_driverstate_dump, ha->enable_driverstate_dump, "Driver State retrival prior to error recovery " "is enabled only when this is set"); ha->enable_error_recovery = 1; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "enable_error_recovery", CTLFLAG_RW, &ha->enable_error_recovery, ha->enable_error_recovery, "when set error recovery is enabled on fatal errors " "otherwise the port is turned offline"); ha->ms_delay_after_init = 1000; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "ms_delay_after_init", CTLFLAG_RW, &ha->ms_delay_after_init, ha->ms_delay_after_init, "millisecond delay after hw_init"); ha->std_replenish = QL_STD_REPLENISH_THRES; SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "std_replenish", CTLFLAG_RW, &ha->std_replenish, ha->std_replenish, "Threshold for Replenishing Standard Frames"); SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "ipv4_lro", CTLFLAG_RD, &ha->ipv4_lro, "number of ipv4 lro completions"); SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "ipv6_lro", CTLFLAG_RD, &ha->ipv6_lro, "number of ipv6 lro completions"); SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "tx_tso_frames", CTLFLAG_RD, &ha->tx_tso_frames, "number of Tx TSO Frames"); SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "hw_vlan_tx_frames", CTLFLAG_RD, &ha->hw_vlan_tx_frames, "number of Tx VLAN Frames"); SYSCTL_ADD_QUAD(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "hw_lock_failed", CTLFLAG_RD, &ha->hw_lock_failed, "number of hw_lock failures"); return; } static void qla_watchdog(void *arg) { qla_host_t *ha = arg; qla_hw_t *hw; struct ifnet *ifp; hw = &ha->hw; ifp = ha->ifp; if (ha->qla_watchdog_exit) { ha->qla_watchdog_exited = 1; return; } ha->qla_watchdog_exited = 0; if (!ha->qla_watchdog_pause) { if (!ha->offline && (ql_hw_check_health(ha) || ha->qla_initiate_recovery || (ha->msg_from_peer == QL_PEER_MSG_RESET))) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; ql_update_link_state(ha); if (ha->enable_error_recovery) { ha->qla_watchdog_paused = 1; ha->qla_watchdog_pause = 1; ha->err_inject = 0; device_printf(ha->pci_dev, "%s: taskqueue_enqueue(err_task) \n", __func__); taskqueue_enqueue(ha->err_tq, &ha->err_task); } else { if (ifp != NULL) ifp->if_drv_flags &= ~IFF_DRV_RUNNING; ha->offline = 1; } return; } else { if (ha->qla_interface_up) { ha->watchdog_ticks++; if (ha->watchdog_ticks > 1000) ha->watchdog_ticks = 0; if (!ha->watchdog_ticks && QL_RUNNING(ifp)) { taskqueue_enqueue(ha->stats_tq, &ha->stats_task); } if (ha->async_event) { taskqueue_enqueue(ha->async_event_tq, &ha->async_event_task); } } ha->qla_watchdog_paused = 0; } } else { ha->qla_watchdog_paused = 1; } callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS, qla_watchdog, ha); } /* * Name: qla_pci_attach * Function: attaches the device to the operating system */ static int qla_pci_attach(device_t dev) { qla_host_t *ha = NULL; uint32_t rsrc_len; int i; uint32_t num_rcvq = 0; if ((ha = device_get_softc(dev)) == NULL) { device_printf(dev, "cannot get softc\n"); return (ENOMEM); } memset(ha, 0, sizeof (qla_host_t)); if (pci_get_device(dev) != PCI_PRODUCT_QLOGIC_ISP8030) { device_printf(dev, "device is not ISP8030\n"); return (ENXIO); } ha->pci_func = pci_get_function(dev) & 0x1; ha->pci_dev = dev; pci_enable_busmaster(dev); ha->reg_rid = PCIR_BAR(0); ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid, RF_ACTIVE); if (ha->pci_reg == NULL) { device_printf(dev, "unable to map any ports\n"); goto qla_pci_attach_err; } rsrc_len = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, ha->reg_rid); mtx_init(&ha->hw_lock, "qla83xx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF); mtx_init(&ha->sp_log_lock, "qla83xx_sp_log_lock", MTX_NETWORK_LOCK, MTX_DEF); ha->flags.lock_init = 1; qla_add_sysctls(ha); ha->hw.num_sds_rings = MAX_SDS_RINGS; ha->hw.num_rds_rings = MAX_RDS_RINGS; ha->hw.num_tx_rings = NUM_TX_RINGS; ha->reg_rid1 = PCIR_BAR(2); ha->pci_reg1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid1, RF_ACTIVE); ha->msix_count = pci_msix_count(dev); if (ha->msix_count < 1 ) { device_printf(dev, "%s: msix_count[%d] not enough\n", __func__, ha->msix_count); goto qla_pci_attach_err; } if (ha->msix_count < (ha->hw.num_sds_rings + 1)) { ha->hw.num_sds_rings = ha->msix_count - 1; } QL_DPRINT2(ha, (dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x" " msix_count 0x%x pci_reg %p pci_reg1 %p\n", __func__, ha, ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg, ha->pci_reg1)); /* initialize hardware */ if (ql_init_hw(ha)) { device_printf(dev, "%s: ql_init_hw failed\n", __func__); goto qla_pci_attach_err; } device_printf(dev, "%s: firmware[%d.%d.%d.%d]\n", __func__, ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub, ha->fw_ver_build); snprintf(ha->fw_ver_str, sizeof(ha->fw_ver_str), "%d.%d.%d.%d", ha->fw_ver_major, ha->fw_ver_minor, ha->fw_ver_sub, ha->fw_ver_build); if (qla_get_nic_partition(ha, NULL, &num_rcvq)) { device_printf(dev, "%s: qla_get_nic_partition failed\n", __func__); goto qla_pci_attach_err; } device_printf(dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x" " msix_count 0x%x pci_reg %p pci_reg1 %p num_rcvq = %d\n", __func__, ha, ha->pci_func, rsrc_len, ha->msix_count, ha->pci_reg, ha->pci_reg1, num_rcvq); if ((ha->msix_count < 64) || (num_rcvq != 32)) { if (ha->hw.num_sds_rings > 15) { ha->hw.num_sds_rings = 15; } } ha->hw.num_rds_rings = ha->hw.num_sds_rings; ha->hw.num_tx_rings = ha->hw.num_sds_rings; #ifdef QL_ENABLE_ISCSI_TLV ha->hw.num_tx_rings = ha->hw.num_sds_rings * 2; #endif /* #ifdef QL_ENABLE_ISCSI_TLV */ ql_hw_add_sysctls(ha); ha->msix_count = ha->hw.num_sds_rings + 1; if (pci_alloc_msix(dev, &ha->msix_count)) { device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__, ha->msix_count); ha->msix_count = 0; goto qla_pci_attach_err; } ha->mbx_irq_rid = 1; ha->mbx_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &ha->mbx_irq_rid, (RF_ACTIVE | RF_SHAREABLE)); if (ha->mbx_irq == NULL) { device_printf(dev, "could not allocate mbx interrupt\n"); goto qla_pci_attach_err; } if (bus_setup_intr(dev, ha->mbx_irq, (INTR_TYPE_NET | INTR_MPSAFE), NULL, ql_mbx_isr, ha, &ha->mbx_handle)) { device_printf(dev, "could not setup mbx interrupt\n"); goto qla_pci_attach_err; } for (i = 0; i < ha->hw.num_sds_rings; i++) { ha->irq_vec[i].sds_idx = i; ha->irq_vec[i].ha = ha; ha->irq_vec[i].irq_rid = 2 + i; ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &ha->irq_vec[i].irq_rid, (RF_ACTIVE | RF_SHAREABLE)); if (ha->irq_vec[i].irq == NULL) { device_printf(dev, "could not allocate interrupt\n"); goto qla_pci_attach_err; } if (bus_setup_intr(dev, ha->irq_vec[i].irq, (INTR_TYPE_NET | INTR_MPSAFE), NULL, ql_isr, &ha->irq_vec[i], &ha->irq_vec[i].handle)) { device_printf(dev, "could not setup interrupt\n"); goto qla_pci_attach_err; } ha->tx_fp[i].ha = ha; ha->tx_fp[i].txr_idx = i; if (qla_alloc_tx_br(ha, &ha->tx_fp[i])) { device_printf(dev, "%s: could not allocate tx_br[%d]\n", __func__, i); goto qla_pci_attach_err; } } if (qla_create_fp_taskqueues(ha) != 0) goto qla_pci_attach_err; printf("%s: mp__ncpus %d sds %d rds %d msi-x %d\n", __func__, mp_ncpus, ha->hw.num_sds_rings, ha->hw.num_rds_rings, ha->msix_count); ql_read_mac_addr(ha); /* allocate parent dma tag */ if (qla_alloc_parent_dma_tag(ha)) { device_printf(dev, "%s: qla_alloc_parent_dma_tag failed\n", __func__); goto qla_pci_attach_err; } /* alloc all dma buffers */ if (ql_alloc_dma(ha)) { device_printf(dev, "%s: ql_alloc_dma failed\n", __func__); goto qla_pci_attach_err; } qla_get_peer(ha); if (ql_minidump_init(ha) != 0) { device_printf(dev, "%s: ql_minidump_init failed\n", __func__); goto qla_pci_attach_err; } ql_alloc_drvr_state_buffer(ha); ql_alloc_sp_log_buffer(ha); /* create the o.s ethernet interface */ qla_init_ifnet(dev, ha); ha->flags.qla_watchdog_active = 1; ha->qla_watchdog_pause = 0; callout_init(&ha->tx_callout, TRUE); ha->flags.qla_callout_init = 1; /* create ioctl device interface */ if (ql_make_cdev(ha)) { device_printf(dev, "%s: ql_make_cdev failed\n", __func__); goto qla_pci_attach_err; } callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS, qla_watchdog, ha); TASK_INIT(&ha->err_task, 0, qla_error_recovery, ha); ha->err_tq = taskqueue_create("qla_errq", M_NOWAIT, taskqueue_thread_enqueue, &ha->err_tq); taskqueue_start_threads(&ha->err_tq, 1, PI_NET, "%s errq", device_get_nameunit(ha->pci_dev)); TASK_INIT(&ha->async_event_task, 0, qla_async_event, ha); ha->async_event_tq = taskqueue_create("qla_asyncq", M_NOWAIT, taskqueue_thread_enqueue, &ha->async_event_tq); taskqueue_start_threads(&ha->async_event_tq, 1, PI_NET, "%s asyncq", device_get_nameunit(ha->pci_dev)); TASK_INIT(&ha->stats_task, 0, qla_stats, ha); ha->stats_tq = taskqueue_create("qla_statsq", M_NOWAIT, taskqueue_thread_enqueue, &ha->stats_tq); taskqueue_start_threads(&ha->stats_tq, 1, PI_NET, "%s taskq", device_get_nameunit(ha->pci_dev)); QL_DPRINT2(ha, (dev, "%s: exit 0\n", __func__)); return (0); qla_pci_attach_err: qla_release(ha); if (ha->flags.lock_init) { mtx_destroy(&ha->hw_lock); mtx_destroy(&ha->sp_log_lock); } QL_DPRINT2(ha, (dev, "%s: exit ENXIO\n", __func__)); return (ENXIO); } /* * Name: qla_pci_detach * Function: Unhooks the device from the operating system */ static int qla_pci_detach(device_t dev) { qla_host_t *ha = NULL; struct ifnet *ifp; if ((ha = device_get_softc(dev)) == NULL) { device_printf(dev, "cannot get softc\n"); return (ENOMEM); } QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); ifp = ha->ifp; ifp->if_drv_flags &= ~IFF_DRV_RUNNING; QLA_LOCK(ha, __func__, -1, 0); ha->qla_detach_active = 1; qla_stop(ha); qla_release(ha); QLA_UNLOCK(ha, __func__); if (ha->flags.lock_init) { mtx_destroy(&ha->hw_lock); mtx_destroy(&ha->sp_log_lock); } QL_DPRINT2(ha, (dev, "%s: exit\n", __func__)); return (0); } /* * SYSCTL Related Callbacks */ static int qla_sysctl_get_link_status(SYSCTL_HANDLER_ARGS) { int err, ret = 0; qla_host_t *ha; err = sysctl_handle_int(oidp, &ret, 0, req); if (err || !req->newptr) return (err); if (ret == 1) { ha = (qla_host_t *)arg1; ql_hw_link_status(ha); } return (err); } /* * Name: qla_release * Function: Releases the resources allocated for the device */ static void qla_release(qla_host_t *ha) { device_t dev; int i; dev = ha->pci_dev; if (ha->async_event_tq) { taskqueue_drain_all(ha->async_event_tq); taskqueue_free(ha->async_event_tq); } if (ha->err_tq) { taskqueue_drain_all(ha->err_tq); taskqueue_free(ha->err_tq); } if (ha->stats_tq) { taskqueue_drain_all(ha->stats_tq); taskqueue_free(ha->stats_tq); } ql_del_cdev(ha); if (ha->flags.qla_watchdog_active) { ha->qla_watchdog_exit = 1; while (ha->qla_watchdog_exited == 0) qla_mdelay(__func__, 1); } if (ha->flags.qla_callout_init) callout_stop(&ha->tx_callout); if (ha->ifp != NULL) ether_ifdetach(ha->ifp); ql_free_drvr_state_buffer(ha); ql_free_sp_log_buffer(ha); ql_free_dma(ha); qla_free_parent_dma_tag(ha); if (ha->mbx_handle) (void)bus_teardown_intr(dev, ha->mbx_irq, ha->mbx_handle); if (ha->mbx_irq) (void) bus_release_resource(dev, SYS_RES_IRQ, ha->mbx_irq_rid, ha->mbx_irq); for (i = 0; i < ha->hw.num_sds_rings; i++) { if (ha->irq_vec[i].handle) { (void)bus_teardown_intr(dev, ha->irq_vec[i].irq, ha->irq_vec[i].handle); } if (ha->irq_vec[i].irq) { (void)bus_release_resource(dev, SYS_RES_IRQ, ha->irq_vec[i].irq_rid, ha->irq_vec[i].irq); } qla_free_tx_br(ha, &ha->tx_fp[i]); } qla_destroy_fp_taskqueues(ha); if (ha->msix_count) pci_release_msi(dev); if (ha->pci_reg) (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid, ha->pci_reg); if (ha->pci_reg1) (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid1, ha->pci_reg1); return; } /* * DMA Related Functions */ static void qla_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { *((bus_addr_t *)arg) = 0; if (error) { printf("%s: bus_dmamap_load failed (%d)\n", __func__, error); return; } *((bus_addr_t *)arg) = segs[0].ds_addr; return; } int ql_alloc_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf) { int ret = 0; device_t dev; bus_addr_t b_addr; dev = ha->pci_dev; QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); ret = bus_dma_tag_create( ha->parent_tag,/* parent */ dma_buf->alignment, ((bus_size_t)(1ULL << 32)),/* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ dma_buf->size, /* maxsize */ 1, /* nsegments */ dma_buf->size, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &dma_buf->dma_tag); if (ret) { device_printf(dev, "%s: could not create dma tag\n", __func__); goto ql_alloc_dmabuf_exit; } ret = bus_dmamem_alloc(dma_buf->dma_tag, (void **)&dma_buf->dma_b, (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT), &dma_buf->dma_map); if (ret) { bus_dma_tag_destroy(dma_buf->dma_tag); device_printf(dev, "%s: bus_dmamem_alloc failed\n", __func__); goto ql_alloc_dmabuf_exit; } ret = bus_dmamap_load(dma_buf->dma_tag, dma_buf->dma_map, dma_buf->dma_b, dma_buf->size, qla_dmamap_callback, &b_addr, BUS_DMA_NOWAIT); if (ret || !b_addr) { bus_dma_tag_destroy(dma_buf->dma_tag); bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map); ret = -1; goto ql_alloc_dmabuf_exit; } dma_buf->dma_addr = b_addr; ql_alloc_dmabuf_exit: QL_DPRINT2(ha, (dev, "%s: exit ret 0x%08x tag %p map %p b %p sz 0x%x\n", __func__, ret, (void *)dma_buf->dma_tag, (void *)dma_buf->dma_map, (void *)dma_buf->dma_b, dma_buf->size)); return ret; } void ql_free_dmabuf(qla_host_t *ha, qla_dma_t *dma_buf) { bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map); bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map); bus_dma_tag_destroy(dma_buf->dma_tag); } static int qla_alloc_parent_dma_tag(qla_host_t *ha) { int ret; device_t dev; dev = ha->pci_dev; /* * Allocate parent DMA Tag */ ret = bus_dma_tag_create( bus_get_dma_tag(dev), /* parent */ 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 0, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &ha->parent_tag); if (ret) { device_printf(dev, "%s: could not create parent dma tag\n", __func__); return (-1); } ha->flags.parent_tag = 1; return (0); } static void qla_free_parent_dma_tag(qla_host_t *ha) { if (ha->flags.parent_tag) { bus_dma_tag_destroy(ha->parent_tag); ha->flags.parent_tag = 0; } } /* * Name: qla_init_ifnet * Function: Creates the Network Device Interface and Registers it with the O.S */ static void qla_init_ifnet(device_t dev, qla_host_t *ha) { struct ifnet *ifp; QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); ifp = ha->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) panic("%s: cannot if_alloc()\n", device_get_nameunit(dev)); if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_baudrate = IF_Gbps(10); ifp->if_capabilities = IFCAP_LINKSTATE; ifp->if_mtu = ETHERMTU; ifp->if_init = qla_init; ifp->if_softc = ha; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = qla_ioctl; ifp->if_transmit = qla_transmit; ifp->if_qflush = qla_qflush; IFQ_SET_MAXLEN(&ifp->if_snd, qla_get_ifq_snd_maxlen(ha)); ifp->if_snd.ifq_drv_maxlen = qla_get_ifq_snd_maxlen(ha); IFQ_SET_READY(&ifp->if_snd); ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; ether_ifattach(ifp, qla_get_mac_addr(ha)); ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_JUMBO_MTU | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_VLAN_HWTSO | IFCAP_LRO; ifp->if_capenable = ifp->if_capabilities; ifp->if_hdrlen = sizeof(struct ether_vlan_header); ifmedia_init(&ha->media, IFM_IMASK, qla_media_change, qla_media_status); ifmedia_add(&ha->media, (IFM_ETHER | qla_get_optics(ha) | IFM_FDX), 0, NULL); ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL); ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO)); QL_DPRINT2(ha, (dev, "%s: exit\n", __func__)); return; } static void qla_init_locked(qla_host_t *ha) { struct ifnet *ifp = ha->ifp; ql_sp_log(ha, 14, 0, 0, 0, 0, 0, 0); qla_stop(ha); if (qla_alloc_xmt_bufs(ha) != 0) return; qla_confirm_9kb_enable(ha); if (qla_alloc_rcv_bufs(ha) != 0) return; bcopy(IF_LLADDR(ha->ifp), ha->hw.mac_addr, ETHER_ADDR_LEN); ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_TSO; ifp->if_hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6; ha->stop_rcv = 0; if (ql_init_hw_if(ha) == 0) { ifp = ha->ifp; ifp->if_drv_flags |= IFF_DRV_RUNNING; ha->hw_vlan_tx_frames = 0; ha->tx_tso_frames = 0; ha->qla_interface_up = 1; ql_update_link_state(ha); } else { if (ha->hw.sp_log_stop_events & Q8_SP_LOG_STOP_IF_START_FAILURE) ha->hw.sp_log_stop = -1; } ha->qla_watchdog_pause = 0; return; } static void qla_init(void *arg) { qla_host_t *ha; ha = (qla_host_t *)arg; QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); if (QLA_LOCK(ha, __func__, -1, 0) != 0) return; qla_init_locked(ha); QLA_UNLOCK(ha, __func__); QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); } static u_int qla_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt) { uint8_t *mta = arg; if (mcnt == Q8_MAX_NUM_MULTICAST_ADDRS) return (0); bcopy(LLADDR(sdl), &mta[mcnt * Q8_MAC_ADDR_LEN], Q8_MAC_ADDR_LEN); return (1); } static int qla_set_multi(qla_host_t *ha, uint32_t add_multi) { uint8_t mta[Q8_MAX_NUM_MULTICAST_ADDRS * Q8_MAC_ADDR_LEN]; int mcnt = 0; struct ifnet *ifp = ha->ifp; int ret = 0; mcnt = if_foreach_llmaddr(ifp, qla_copy_maddr, mta); if (QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, QLA_LOCK_NO_SLEEP) != 0) return (-1); ql_sp_log(ha, 12, 4, ifp->if_drv_flags, (ifp->if_drv_flags & IFF_DRV_RUNNING), add_multi, (uint32_t)mcnt, 0); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { if (!add_multi) { ret = qla_hw_del_all_mcast(ha); if (ret) device_printf(ha->pci_dev, "%s: qla_hw_del_all_mcast() failed\n", __func__); } if (!ret) ret = ql_hw_set_multi(ha, mta, mcnt, 1); } QLA_UNLOCK(ha, __func__); return (ret); } static int qla_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { int ret = 0; struct ifreq *ifr = (struct ifreq *)data; struct ifaddr *ifa = (struct ifaddr *)data; qla_host_t *ha; ha = (qla_host_t *)ifp->if_softc; if (ha->offline || ha->qla_initiate_recovery) return (ret); switch (cmd) { case SIOCSIFADDR: QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n", __func__, cmd)); if (ifa->ifa_addr->sa_family == AF_INET) { ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, QLA_LOCK_NO_SLEEP); if (ret) break; ifp->if_flags |= IFF_UP; ql_sp_log(ha, 8, 3, ifp->if_drv_flags, (ifp->if_drv_flags & IFF_DRV_RUNNING), ntohl(IA_SIN(ifa)->sin_addr.s_addr), 0, 0); if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { qla_init_locked(ha); } QLA_UNLOCK(ha, __func__); QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n", __func__, cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr))); arp_ifinit(ifp, ifa); } else { ether_ioctl(ifp, cmd, data); } break; case SIOCSIFMTU: QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n", __func__, cmd)); if (ifr->ifr_mtu > QLA_MAX_MTU) { ret = EINVAL; } else { ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, QLA_LOCK_NO_SLEEP); if (ret) break; ifp->if_mtu = ifr->ifr_mtu; ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; ql_sp_log(ha, 9, 4, ifp->if_drv_flags, (ifp->if_drv_flags & IFF_DRV_RUNNING), ha->max_frame_size, ifp->if_mtu, 0); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { qla_init_locked(ha); } if (ifp->if_mtu > ETHERMTU) ha->std_replenish = QL_JUMBO_REPLENISH_THRES; else ha->std_replenish = QL_STD_REPLENISH_THRES; QLA_UNLOCK(ha, __func__); } break; case SIOCSIFFLAGS: QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n", __func__, cmd)); ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, QLA_LOCK_NO_SLEEP); if (ret) break; ql_sp_log(ha, 10, 4, ifp->if_drv_flags, (ifp->if_drv_flags & IFF_DRV_RUNNING), ha->if_flags, ifp->if_flags, 0); if (ifp->if_flags & IFF_UP) { ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; qla_init_locked(ha); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { if ((ifp->if_flags ^ ha->if_flags) & IFF_PROMISC) { ret = ql_set_promisc(ha); } else if ((ifp->if_flags ^ ha->if_flags) & IFF_ALLMULTI) { ret = ql_set_allmulti(ha); } } } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) qla_stop(ha); ha->if_flags = ifp->if_flags; } QLA_UNLOCK(ha, __func__); break; case SIOCADDMULTI: QL_DPRINT4(ha, (ha->pci_dev, "%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd)); if (qla_set_multi(ha, 1)) ret = EINVAL; break; case SIOCDELMULTI: QL_DPRINT4(ha, (ha->pci_dev, "%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd)); if (qla_set_multi(ha, 0)) ret = EINVAL; break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", __func__, cmd)); ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd); break; case SIOCSIFCAP: { int mask = ifr->ifr_reqcap ^ ifp->if_capenable; QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n", __func__, cmd)); if (mask & IFCAP_HWCSUM) ifp->if_capenable ^= IFCAP_HWCSUM; if (mask & IFCAP_TSO4) ifp->if_capenable ^= IFCAP_TSO4; if (mask & IFCAP_TSO6) ifp->if_capenable ^= IFCAP_TSO6; if (mask & IFCAP_VLAN_HWTAGGING) ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; if (mask & IFCAP_VLAN_HWTSO) ifp->if_capenable ^= IFCAP_VLAN_HWTSO; if (mask & IFCAP_LRO) ifp->if_capenable ^= IFCAP_LRO; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { ret = QLA_LOCK(ha, __func__, QLA_LOCK_DEFAULT_MS_TIMEOUT, QLA_LOCK_NO_SLEEP); if (ret) break; ql_sp_log(ha, 11, 4, ifp->if_drv_flags, (ifp->if_drv_flags & IFF_DRV_RUNNING), mask, ifp->if_capenable, 0); qla_init_locked(ha); QLA_UNLOCK(ha, __func__); } VLAN_CAPABILITIES(ifp); break; } default: QL_DPRINT4(ha, (ha->pci_dev, "%s: default (0x%lx)\n", __func__, cmd)); ret = ether_ioctl(ifp, cmd, data); break; } return (ret); } static int qla_media_change(struct ifnet *ifp) { qla_host_t *ha; struct ifmedia *ifm; int ret = 0; ha = (qla_host_t *)ifp->if_softc; QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); ifm = &ha->media; if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) ret = EINVAL; QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); return (ret); } static void qla_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) { qla_host_t *ha; ha = (qla_host_t *)ifp->if_softc; QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; ql_update_link_state(ha); if (ha->hw.link_up) { ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_active |= (IFM_FDX | qla_get_optics(ha)); } QL_DPRINT2(ha, (ha->pci_dev, "%s: exit (%s)\n", __func__,\ (ha->hw.link_up ? "link_up" : "link_down"))); return; } static int qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx, uint32_t iscsi_pdu) { bus_dma_segment_t segs[QLA_MAX_SEGMENTS]; bus_dmamap_t map; int nsegs; int ret = -1; uint32_t tx_idx; struct mbuf *m_head = *m_headp; QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__)); tx_idx = ha->hw.tx_cntxt[txr_idx].txr_next; if ((NULL != ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head) || (QL_ERR_INJECT(ha, INJCT_TXBUF_MBUF_NON_NULL))){ QL_ASSERT(ha, 0, ("%s [%d]: txr_idx = %d tx_idx = %d "\ "mbuf = %p\n", __func__, __LINE__, txr_idx, tx_idx,\ ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head)); device_printf(ha->pci_dev, "%s [%d]: txr_idx = %d tx_idx = %d " "mbuf = %p\n", __func__, __LINE__, txr_idx, tx_idx, ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head); if (m_head) m_freem(m_head); *m_headp = NULL; QL_INITIATE_RECOVERY(ha); return (ret); } map = ha->tx_ring[txr_idx].tx_buf[tx_idx].map; ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs, BUS_DMA_NOWAIT); if (ret == EFBIG) { struct mbuf *m; QL_DPRINT8(ha, (ha->pci_dev, "%s: EFBIG [%d]\n", __func__, m_head->m_pkthdr.len)); m = m_defrag(m_head, M_NOWAIT); if (m == NULL) { ha->err_tx_defrag++; m_freem(m_head); *m_headp = NULL; device_printf(ha->pci_dev, "%s: m_defrag() = NULL [%d]\n", __func__, ret); return (ENOBUFS); } m_head = m; *m_headp = m_head; if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs, BUS_DMA_NOWAIT))) { ha->err_tx_dmamap_load++; device_printf(ha->pci_dev, "%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n", __func__, ret, m_head->m_pkthdr.len); if (ret != ENOMEM) { m_freem(m_head); *m_headp = NULL; } return (ret); } } else if (ret) { ha->err_tx_dmamap_load++; device_printf(ha->pci_dev, "%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n", __func__, ret, m_head->m_pkthdr.len); if (ret != ENOMEM) { m_freem(m_head); *m_headp = NULL; } return (ret); } QL_ASSERT(ha, (nsegs != 0), ("qla_send: empty packet")); bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE); if (!(ret = ql_hw_send(ha, segs, nsegs, tx_idx, m_head, txr_idx, iscsi_pdu))) { ha->tx_ring[txr_idx].count++; if (iscsi_pdu) ha->tx_ring[txr_idx].iscsi_pkt_count++; ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head = m_head; } else { bus_dmamap_unload(ha->tx_tag, map); if (ret == EINVAL) { if (m_head) m_freem(m_head); *m_headp = NULL; } } QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__)); return (ret); } static int qla_alloc_tx_br(qla_host_t *ha, qla_tx_fp_t *fp) { snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name), "qla%d_fp%d_tx_mq_lock", ha->pci_func, fp->txr_idx); mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF); fp->tx_br = buf_ring_alloc(NUM_TX_DESCRIPTORS, M_DEVBUF, M_NOWAIT, &fp->tx_mtx); if (fp->tx_br == NULL) { QL_DPRINT1(ha, (ha->pci_dev, "buf_ring_alloc failed for " " fp[%d, %d]\n", ha->pci_func, fp->txr_idx)); return (-ENOMEM); } return 0; } static void qla_free_tx_br(qla_host_t *ha, qla_tx_fp_t *fp) { struct mbuf *mp; struct ifnet *ifp = ha->ifp; if (mtx_initialized(&fp->tx_mtx)) { if (fp->tx_br != NULL) { mtx_lock(&fp->tx_mtx); while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { m_freem(mp); } mtx_unlock(&fp->tx_mtx); buf_ring_free(fp->tx_br, M_DEVBUF); fp->tx_br = NULL; } mtx_destroy(&fp->tx_mtx); } return; } static void qla_fp_taskqueue(void *context, int pending) { qla_tx_fp_t *fp; qla_host_t *ha; struct ifnet *ifp; struct mbuf *mp = NULL; int ret = 0; uint32_t txr_idx; uint32_t iscsi_pdu = 0; uint32_t rx_pkts_left = -1; fp = context; if (fp == NULL) return; ha = (qla_host_t *)fp->ha; ifp = ha->ifp; txr_idx = fp->txr_idx; mtx_lock(&fp->tx_mtx); if (!(ifp->if_drv_flags & IFF_DRV_RUNNING) || (!ha->hw.link_up)) { mtx_unlock(&fp->tx_mtx); goto qla_fp_taskqueue_exit; } while (rx_pkts_left && !ha->stop_rcv && (ifp->if_drv_flags & IFF_DRV_RUNNING) && ha->hw.link_up) { rx_pkts_left = ql_rcv_isr(ha, fp->txr_idx, 64); #ifdef QL_ENABLE_ISCSI_TLV ql_hw_tx_done_locked(ha, fp->txr_idx); ql_hw_tx_done_locked(ha, (fp->txr_idx + (ha->hw.num_tx_rings >> 1))); #else ql_hw_tx_done_locked(ha, fp->txr_idx); #endif /* #ifdef QL_ENABLE_ISCSI_TLV */ mp = drbr_peek(ifp, fp->tx_br); while (mp != NULL) { if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) { #ifdef QL_ENABLE_ISCSI_TLV if (ql_iscsi_pdu(ha, mp) == 0) { txr_idx = txr_idx + (ha->hw.num_tx_rings >> 1); iscsi_pdu = 1; } else { iscsi_pdu = 0; txr_idx = fp->txr_idx; } #endif /* #ifdef QL_ENABLE_ISCSI_TLV */ } ret = qla_send(ha, &mp, txr_idx, iscsi_pdu); if (ret) { if (mp != NULL) drbr_putback(ifp, fp->tx_br, mp); else { drbr_advance(ifp, fp->tx_br); } mtx_unlock(&fp->tx_mtx); goto qla_fp_taskqueue_exit0; } else { drbr_advance(ifp, fp->tx_br); } /* Send a copy of the frame to the BPF listener */ ETHER_BPF_MTAP(ifp, mp); if (((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) || (!ha->hw.link_up)) break; mp = drbr_peek(ifp, fp->tx_br); } } mtx_unlock(&fp->tx_mtx); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) goto qla_fp_taskqueue_exit; qla_fp_taskqueue_exit0: if (rx_pkts_left || ((mp != NULL) && ret)) { taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); } else { if (!ha->stop_rcv) { QL_ENABLE_INTERRUPTS(ha, fp->txr_idx); } } qla_fp_taskqueue_exit: QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret)); return; } static int qla_create_fp_taskqueues(qla_host_t *ha) { int i; uint8_t tq_name[32]; for (i = 0; i < ha->hw.num_sds_rings; i++) { qla_tx_fp_t *fp = &ha->tx_fp[i]; bzero(tq_name, sizeof (tq_name)); snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i); - TASK_INIT(&fp->fp_task, 0, qla_fp_taskqueue, fp); + NET_TASK_INIT(&fp->fp_task, 0, qla_fp_taskqueue, fp); fp->fp_taskqueue = taskqueue_create_fast(tq_name, M_NOWAIT, taskqueue_thread_enqueue, &fp->fp_taskqueue); if (fp->fp_taskqueue == NULL) return (-1); taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s", tq_name); QL_DPRINT1(ha, (ha->pci_dev, "%s: %p\n", __func__, fp->fp_taskqueue)); } return (0); } static void qla_destroy_fp_taskqueues(qla_host_t *ha) { int i; for (i = 0; i < ha->hw.num_sds_rings; i++) { qla_tx_fp_t *fp = &ha->tx_fp[i]; if (fp->fp_taskqueue != NULL) { taskqueue_drain_all(fp->fp_taskqueue); taskqueue_free(fp->fp_taskqueue); fp->fp_taskqueue = NULL; } } return; } static void qla_drain_fp_taskqueues(qla_host_t *ha) { int i; for (i = 0; i < ha->hw.num_sds_rings; i++) { qla_tx_fp_t *fp = &ha->tx_fp[i]; if (fp->fp_taskqueue != NULL) { taskqueue_drain_all(fp->fp_taskqueue); } } return; } static int qla_transmit(struct ifnet *ifp, struct mbuf *mp) { qla_host_t *ha = (qla_host_t *)ifp->if_softc; qla_tx_fp_t *fp; int rss_id = 0; int ret = 0; QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); #if __FreeBSD_version >= 1100000 if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) #else if (mp->m_flags & M_FLOWID) #endif rss_id = (mp->m_pkthdr.flowid & Q8_RSS_IND_TBL_MAX_IDX) % ha->hw.num_sds_rings; fp = &ha->tx_fp[rss_id]; if (fp->tx_br == NULL) { ret = EINVAL; goto qla_transmit_exit; } if (mp != NULL) { ret = drbr_enqueue(ifp, fp->tx_br, mp); } if (fp->fp_taskqueue != NULL) taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); ret = 0; qla_transmit_exit: QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret)); return ret; } static void qla_qflush(struct ifnet *ifp) { int i; qla_tx_fp_t *fp; struct mbuf *mp; qla_host_t *ha; ha = (qla_host_t *)ifp->if_softc; QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); for (i = 0; i < ha->hw.num_sds_rings; i++) { fp = &ha->tx_fp[i]; if (fp == NULL) continue; if (fp->tx_br) { mtx_lock(&fp->tx_mtx); while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { m_freem(mp); } mtx_unlock(&fp->tx_mtx); } } QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); return; } static void qla_stop(qla_host_t *ha) { struct ifnet *ifp = ha->ifp; device_t dev; int i = 0; ql_sp_log(ha, 13, 0, 0, 0, 0, 0, 0); dev = ha->pci_dev; ifp->if_drv_flags &= ~IFF_DRV_RUNNING; ha->qla_watchdog_pause = 1; for (i = 0; i < ha->hw.num_sds_rings; i++) { qla_tx_fp_t *fp; fp = &ha->tx_fp[i]; if (fp == NULL) continue; if (fp->tx_br != NULL) { mtx_lock(&fp->tx_mtx); mtx_unlock(&fp->tx_mtx); } } while (!ha->qla_watchdog_paused) qla_mdelay(__func__, 1); ha->qla_interface_up = 0; qla_drain_fp_taskqueues(ha); ql_del_hw_if(ha); qla_free_xmt_bufs(ha); qla_free_rcv_bufs(ha); return; } /* * Buffer Management Functions for Transmit and Receive Rings */ static int qla_alloc_xmt_bufs(qla_host_t *ha) { int ret = 0; uint32_t i, j; qla_tx_buf_t *txb; if (bus_dma_tag_create(NULL, /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ QLA_MAX_TSO_FRAME_SIZE, /* maxsize */ QLA_MAX_SEGMENTS, /* nsegments */ PAGE_SIZE, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &ha->tx_tag)) { device_printf(ha->pci_dev, "%s: tx_tag alloc failed\n", __func__); return (ENOMEM); } for (i = 0; i < ha->hw.num_tx_rings; i++) { bzero((void *)ha->tx_ring[i].tx_buf, (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS)); } for (j = 0; j < ha->hw.num_tx_rings; j++) { for (i = 0; i < NUM_TX_DESCRIPTORS; i++) { txb = &ha->tx_ring[j].tx_buf[i]; if ((ret = bus_dmamap_create(ha->tx_tag, BUS_DMA_NOWAIT, &txb->map))) { ha->err_tx_dmamap_create++; device_printf(ha->pci_dev, "%s: bus_dmamap_create failed[%d]\n", __func__, ret); qla_free_xmt_bufs(ha); return (ret); } } } return 0; } /* * Release mbuf after it sent on the wire */ static void qla_clear_tx_buf(qla_host_t *ha, qla_tx_buf_t *txb) { QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); if (txb->m_head) { bus_dmamap_sync(ha->tx_tag, txb->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ha->tx_tag, txb->map); m_freem(txb->m_head); txb->m_head = NULL; bus_dmamap_destroy(ha->tx_tag, txb->map); txb->map = NULL; } if (txb->map) { bus_dmamap_unload(ha->tx_tag, txb->map); bus_dmamap_destroy(ha->tx_tag, txb->map); txb->map = NULL; } QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); } static void qla_free_xmt_bufs(qla_host_t *ha) { int i, j; for (j = 0; j < ha->hw.num_tx_rings; j++) { for (i = 0; i < NUM_TX_DESCRIPTORS; i++) qla_clear_tx_buf(ha, &ha->tx_ring[j].tx_buf[i]); } if (ha->tx_tag != NULL) { bus_dma_tag_destroy(ha->tx_tag); ha->tx_tag = NULL; } for (i = 0; i < ha->hw.num_tx_rings; i++) { bzero((void *)ha->tx_ring[i].tx_buf, (sizeof(qla_tx_buf_t) * NUM_TX_DESCRIPTORS)); } return; } static int qla_alloc_rcv_std(qla_host_t *ha) { int i, j, k, r, ret = 0; qla_rx_buf_t *rxb; qla_rx_ring_t *rx_ring; for (r = 0; r < ha->hw.num_rds_rings; r++) { rx_ring = &ha->rx_ring[r]; for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { rxb = &rx_ring->rx_buf[i]; ret = bus_dmamap_create(ha->rx_tag, BUS_DMA_NOWAIT, &rxb->map); if (ret) { device_printf(ha->pci_dev, "%s: dmamap[%d, %d] failed\n", __func__, r, i); for (k = 0; k < r; k++) { for (j = 0; j < NUM_RX_DESCRIPTORS; j++) { rxb = &ha->rx_ring[k].rx_buf[j]; bus_dmamap_destroy(ha->rx_tag, rxb->map); } } for (j = 0; j < i; j++) { bus_dmamap_destroy(ha->rx_tag, rx_ring->rx_buf[j].map); } goto qla_alloc_rcv_std_err; } } } qla_init_hw_rcv_descriptors(ha); for (r = 0; r < ha->hw.num_rds_rings; r++) { rx_ring = &ha->rx_ring[r]; for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { rxb = &rx_ring->rx_buf[i]; rxb->handle = i; if (!(ret = ql_get_mbuf(ha, rxb, NULL))) { /* * set the physical address in the * corresponding descriptor entry in the * receive ring/queue for the hba */ qla_set_hw_rcv_desc(ha, r, i, rxb->handle, rxb->paddr, (rxb->m_head)->m_pkthdr.len); } else { device_printf(ha->pci_dev, "%s: ql_get_mbuf [%d, %d] failed\n", __func__, r, i); bus_dmamap_destroy(ha->rx_tag, rxb->map); goto qla_alloc_rcv_std_err; } } } return 0; qla_alloc_rcv_std_err: return (-1); } static void qla_free_rcv_std(qla_host_t *ha) { int i, r; qla_rx_buf_t *rxb; for (r = 0; r < ha->hw.num_rds_rings; r++) { for (i = 0; i < NUM_RX_DESCRIPTORS; i++) { rxb = &ha->rx_ring[r].rx_buf[i]; if (rxb->m_head != NULL) { bus_dmamap_unload(ha->rx_tag, rxb->map); bus_dmamap_destroy(ha->rx_tag, rxb->map); m_freem(rxb->m_head); rxb->m_head = NULL; } } } return; } static int qla_alloc_rcv_bufs(qla_host_t *ha) { int i, ret = 0; if (bus_dma_tag_create(NULL, /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MJUM9BYTES, /* maxsize */ 1, /* nsegments */ MJUM9BYTES, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &ha->rx_tag)) { device_printf(ha->pci_dev, "%s: rx_tag alloc failed\n", __func__); return (ENOMEM); } bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS)); for (i = 0; i < ha->hw.num_sds_rings; i++) { ha->hw.sds[i].sdsr_next = 0; ha->hw.sds[i].rxb_free = NULL; ha->hw.sds[i].rx_free = 0; } ret = qla_alloc_rcv_std(ha); return (ret); } static void qla_free_rcv_bufs(qla_host_t *ha) { int i; qla_free_rcv_std(ha); if (ha->rx_tag != NULL) { bus_dma_tag_destroy(ha->rx_tag); ha->rx_tag = NULL; } bzero((void *)ha->rx_ring, (sizeof(qla_rx_ring_t) * MAX_RDS_RINGS)); for (i = 0; i < ha->hw.num_sds_rings; i++) { ha->hw.sds[i].sdsr_next = 0; ha->hw.sds[i].rxb_free = NULL; ha->hw.sds[i].rx_free = 0; } return; } int ql_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp) { register struct mbuf *mp = nmp; struct ifnet *ifp; int ret = 0; uint32_t offset; bus_dma_segment_t segs[1]; int nsegs, mbuf_size; QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); ifp = ha->ifp; if (ha->hw.enable_9kb) mbuf_size = MJUM9BYTES; else mbuf_size = MCLBYTES; if (mp == NULL) { if (QL_ERR_INJECT(ha, INJCT_M_GETCL_M_GETJCL_FAILURE)) return(-1); if (ha->hw.enable_9kb) mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, mbuf_size); else mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (mp == NULL) { ha->err_m_getcl++; ret = ENOBUFS; device_printf(ha->pci_dev, "%s: m_getcl failed\n", __func__); goto exit_ql_get_mbuf; } mp->m_len = mp->m_pkthdr.len = mbuf_size; } else { mp->m_len = mp->m_pkthdr.len = mbuf_size; mp->m_data = mp->m_ext.ext_buf; mp->m_next = NULL; } offset = (uint32_t)((unsigned long long)mp->m_data & 0x7ULL); if (offset) { offset = 8 - offset; m_adj(mp, offset); } /* * Using memory from the mbuf cluster pool, invoke the bus_dma * machinery to arrange the memory mapping. */ ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, rxb->map, mp, segs, &nsegs, BUS_DMA_NOWAIT); rxb->paddr = segs[0].ds_addr; if (ret || !rxb->paddr || (nsegs != 1)) { m_free(mp); rxb->m_head = NULL; device_printf(ha->pci_dev, "%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n", __func__, ret, (long long unsigned int)rxb->paddr, nsegs); ret = -1; goto exit_ql_get_mbuf; } rxb->m_head = mp; bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_PREREAD); exit_ql_get_mbuf: QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = 0x%08x\n", __func__, ret)); return (ret); } static void qla_get_peer(qla_host_t *ha) { device_t *peers; int count, i, slot; int my_slot = pci_get_slot(ha->pci_dev); if (device_get_children(device_get_parent(ha->pci_dev), &peers, &count)) return; for (i = 0; i < count; i++) { slot = pci_get_slot(peers[i]); if ((slot >= 0) && (slot == my_slot) && (pci_get_device(peers[i]) == pci_get_device(ha->pci_dev))) { if (ha->pci_dev != peers[i]) ha->peer_dev = peers[i]; } } } static void qla_send_msg_to_peer(qla_host_t *ha, uint32_t msg_to_peer) { qla_host_t *ha_peer; if (ha->peer_dev) { if ((ha_peer = device_get_softc(ha->peer_dev)) != NULL) { ha_peer->msg_from_peer = msg_to_peer; } } } void qla_set_error_recovery(qla_host_t *ha) { struct ifnet *ifp = ha->ifp; if (!cold && ha->enable_error_recovery) { if (ifp) ifp->if_drv_flags &= ~IFF_DRV_RUNNING; ha->qla_initiate_recovery = 1; } else ha->offline = 1; return; } static void qla_error_recovery(void *context, int pending) { qla_host_t *ha = context; uint32_t msecs_100 = 400; struct ifnet *ifp = ha->ifp; int i = 0; device_printf(ha->pci_dev, "%s: enter\n", __func__); ha->hw.imd_compl = 1; taskqueue_drain_all(ha->stats_tq); taskqueue_drain_all(ha->async_event_tq); if (QLA_LOCK(ha, __func__, -1, 0) != 0) return; device_printf(ha->pci_dev, "%s: ts_usecs = %ld start\n", __func__, qla_get_usec_timestamp()); if (ha->qla_interface_up) { qla_mdelay(__func__, 300); //ifp->if_drv_flags &= ~IFF_DRV_RUNNING; for (i = 0; i < ha->hw.num_sds_rings; i++) { qla_tx_fp_t *fp; fp = &ha->tx_fp[i]; if (fp == NULL) continue; if (fp->tx_br != NULL) { mtx_lock(&fp->tx_mtx); mtx_unlock(&fp->tx_mtx); } } } qla_drain_fp_taskqueues(ha); if ((ha->pci_func & 0x1) == 0) { if (!ha->msg_from_peer) { qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET); while ((ha->msg_from_peer != QL_PEER_MSG_ACK) && msecs_100--) qla_mdelay(__func__, 100); } ha->msg_from_peer = 0; if (ha->enable_minidump) ql_minidump(ha); if (ha->enable_driverstate_dump) ql_capture_drvr_state(ha); if (ql_init_hw(ha)) { device_printf(ha->pci_dev, "%s: ts_usecs = %ld exit: ql_init_hw failed\n", __func__, qla_get_usec_timestamp()); ha->offline = 1; goto qla_error_recovery_exit; } if (ha->qla_interface_up) { qla_free_xmt_bufs(ha); qla_free_rcv_bufs(ha); } if (!QL_ERR_INJECT(ha, INJCT_PEER_PORT_FAILURE_ERR_RECOVERY)) qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK); } else { if (ha->msg_from_peer == QL_PEER_MSG_RESET) { ha->msg_from_peer = 0; if (!QL_ERR_INJECT(ha, INJCT_PEER_PORT_FAILURE_ERR_RECOVERY)) qla_send_msg_to_peer(ha, QL_PEER_MSG_ACK); } else { qla_send_msg_to_peer(ha, QL_PEER_MSG_RESET); } while ((ha->msg_from_peer != QL_PEER_MSG_ACK) && msecs_100--) qla_mdelay(__func__, 100); ha->msg_from_peer = 0; if (ha->enable_driverstate_dump) ql_capture_drvr_state(ha); if (msecs_100 == 0) { device_printf(ha->pci_dev, "%s: ts_usecs = %ld exit: QL_PEER_MSG_ACK not received\n", __func__, qla_get_usec_timestamp()); ha->offline = 1; goto qla_error_recovery_exit; } if (ql_init_hw(ha)) { device_printf(ha->pci_dev, "%s: ts_usecs = %ld exit: ql_init_hw failed\n", __func__, qla_get_usec_timestamp()); ha->offline = 1; goto qla_error_recovery_exit; } if (ha->qla_interface_up) { qla_free_xmt_bufs(ha); qla_free_rcv_bufs(ha); } } qla_mdelay(__func__, ha->ms_delay_after_init); *((uint32_t *)&ha->hw.flags) = 0; ha->qla_initiate_recovery = 0; if (ha->qla_interface_up) { if (qla_alloc_xmt_bufs(ha) != 0) { ha->offline = 1; goto qla_error_recovery_exit; } qla_confirm_9kb_enable(ha); if (qla_alloc_rcv_bufs(ha) != 0) { ha->offline = 1; goto qla_error_recovery_exit; } ha->stop_rcv = 0; if (ql_init_hw_if(ha) == 0) { ifp = ha->ifp; ifp->if_drv_flags |= IFF_DRV_RUNNING; ha->qla_watchdog_pause = 0; ql_update_link_state(ha); } else { ha->offline = 1; if (ha->hw.sp_log_stop_events & Q8_SP_LOG_STOP_IF_START_FAILURE) ha->hw.sp_log_stop = -1; } } else { ha->qla_watchdog_pause = 0; } qla_error_recovery_exit: if (ha->offline ) { device_printf(ha->pci_dev, "%s: ts_usecs = %ld port offline\n", __func__, qla_get_usec_timestamp()); if (ha->hw.sp_log_stop_events & Q8_SP_LOG_STOP_ERR_RECOVERY_FAILURE) ha->hw.sp_log_stop = -1; } QLA_UNLOCK(ha, __func__); if (!ha->offline) callout_reset(&ha->tx_callout, QLA_WATCHDOG_CALLOUT_TICKS, qla_watchdog, ha); device_printf(ha->pci_dev, "%s: ts_usecs = %ld exit\n", __func__, qla_get_usec_timestamp()); return; } static void qla_async_event(void *context, int pending) { qla_host_t *ha = context; if (QLA_LOCK(ha, __func__, -1, 0) != 0) return; if (ha->async_event) { ha->async_event = 0; qla_hw_async_event(ha); } QLA_UNLOCK(ha, __func__); return; } static void qla_stats(void *context, int pending) { qla_host_t *ha; ha = context; ql_get_stats(ha); return; } diff --git a/sys/dev/re/if_re.c b/sys/dev/re/if_re.c index 76fa06f63585..66fe980dfc1d 100644 --- a/sys/dev/re/if_re.c +++ b/sys/dev/re/if_re.c @@ -1,4155 +1,4151 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 1997, 1998-2003 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * RealTek 8139C+/8169/8169S/8110S/8168/8111/8101E PCI NIC driver * * Written by Bill Paul * Senior Networking Software Engineer * Wind River Systems */ /* * This driver is designed to support RealTek's next generation of * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently * seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S, * RTL8110S, the RTL8168, the RTL8111 and the RTL8101E. * * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible * with the older 8139 family, however it also supports a special * C+ mode of operation that provides several new performance enhancing * features. These include: * * o Descriptor based DMA mechanism. Each descriptor represents * a single packet fragment. Data buffers may be aligned on * any byte boundary. * * o 64-bit DMA * * o TCP/IP checksum offload for both RX and TX * * o High and normal priority transmit DMA rings * * o VLAN tag insertion and extraction * * o TCP large send (segmentation offload) * * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+ * programming API is fairly straightforward. The RX filtering, EEPROM * access and PHY access is the same as it is on the older 8139 series * chips. * * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the * same programming API and feature set as the 8139C+ with the following * differences and additions: * * o 1000Mbps mode * * o Jumbo frames * * o GMII and TBI ports/registers for interfacing with copper * or fiber PHYs * * o RX and TX DMA rings can have up to 1024 descriptors * (the 8139C+ allows a maximum of 64) * * o Slight differences in register layout from the 8139C+ * * The TX start and timer interrupt registers are at different locations * on the 8169 than they are on the 8139C+. Also, the status word in the * RX descriptor has a slightly different bit layout. The 8169 does not * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska' * copper gigE PHY. * * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs * (the 'S' stands for 'single-chip'). These devices have the same * programming API as the older 8169, but also have some vendor-specific * registers for the on-board PHY. The 8110S is a LAN-on-motherboard * part designed to be pin-compatible with the RealTek 8100 10/100 chip. * * This driver takes advantage of the RX and TX checksum offload and * VLAN tag insertion/extraction features. It also implements TX * interrupt moderation using the timer interrupt registers, which * significantly reduces TX interrupt load. There is also support * for jumbo frames, however the 8169/8169S/8110S can not transmit * jumbo frames larger than 7440, so the max MTU possible with this * driver is 7422 bytes. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MODULE_DEPEND(re, pci, 1, 1, 1); MODULE_DEPEND(re, ether, 1, 1, 1); MODULE_DEPEND(re, miibus, 1, 1, 1); /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" /* Tunables. */ static int intr_filter = 0; TUNABLE_INT("hw.re.intr_filter", &intr_filter); static int msi_disable = 0; TUNABLE_INT("hw.re.msi_disable", &msi_disable); static int msix_disable = 0; TUNABLE_INT("hw.re.msix_disable", &msix_disable); static int prefer_iomap = 0; TUNABLE_INT("hw.re.prefer_iomap", &prefer_iomap); #define RE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) /* * Various supported device vendors/types and their names. */ static const struct rl_type re_devs[] = { { DLINK_VENDORID, DLINK_DEVICEID_528T, 0, "D-Link DGE-528(T) Gigabit Ethernet Adapter" }, { DLINK_VENDORID, DLINK_DEVICEID_530T_REVC, 0, "D-Link DGE-530(T) Gigabit Ethernet Adapter" }, { RT_VENDORID, RT_DEVICEID_8139, 0, "RealTek 8139C+ 10/100BaseTX" }, { RT_VENDORID, RT_DEVICEID_8101E, 0, "RealTek 810xE PCIe 10/100baseTX" }, { RT_VENDORID, RT_DEVICEID_8168, 0, "RealTek 8168/8111 B/C/CP/D/DP/E/F/G PCIe Gigabit Ethernet" }, { NCUBE_VENDORID, RT_DEVICEID_8168, 0, "TP-Link TG-3468 v2 (RTL8168) Gigabit Ethernet" }, { RT_VENDORID, RT_DEVICEID_8169, 0, "RealTek 8169/8169S/8169SB(L)/8110S/8110SB(L) Gigabit Ethernet" }, { RT_VENDORID, RT_DEVICEID_8169SC, 0, "RealTek 8169SC/8110SC Single-chip Gigabit Ethernet" }, { COREGA_VENDORID, COREGA_DEVICEID_CGLAPCIGT, 0, "Corega CG-LAPCIGT (RTL8169S) Gigabit Ethernet" }, { LINKSYS_VENDORID, LINKSYS_DEVICEID_EG1032, 0, "Linksys EG1032 (RTL8169S) Gigabit Ethernet" }, { USR_VENDORID, USR_DEVICEID_997902, 0, "US Robotics 997902 (RTL8169S) Gigabit Ethernet" } }; static const struct rl_hwrev re_hwrevs[] = { { RL_HWREV_8139, RL_8139, "", RL_MTU }, { RL_HWREV_8139A, RL_8139, "A", RL_MTU }, { RL_HWREV_8139AG, RL_8139, "A-G", RL_MTU }, { RL_HWREV_8139B, RL_8139, "B", RL_MTU }, { RL_HWREV_8130, RL_8139, "8130", RL_MTU }, { RL_HWREV_8139C, RL_8139, "C", RL_MTU }, { RL_HWREV_8139D, RL_8139, "8139D/8100B/8100C", RL_MTU }, { RL_HWREV_8139CPLUS, RL_8139CPLUS, "C+", RL_MTU }, { RL_HWREV_8168B_SPIN1, RL_8169, "8168", RL_JUMBO_MTU }, { RL_HWREV_8169, RL_8169, "8169", RL_JUMBO_MTU }, { RL_HWREV_8169S, RL_8169, "8169S", RL_JUMBO_MTU }, { RL_HWREV_8110S, RL_8169, "8110S", RL_JUMBO_MTU }, { RL_HWREV_8169_8110SB, RL_8169, "8169SB/8110SB", RL_JUMBO_MTU }, { RL_HWREV_8169_8110SC, RL_8169, "8169SC/8110SC", RL_JUMBO_MTU }, { RL_HWREV_8169_8110SBL, RL_8169, "8169SBL/8110SBL", RL_JUMBO_MTU }, { RL_HWREV_8169_8110SCE, RL_8169, "8169SC/8110SC", RL_JUMBO_MTU }, { RL_HWREV_8100, RL_8139, "8100", RL_MTU }, { RL_HWREV_8101, RL_8139, "8101", RL_MTU }, { RL_HWREV_8100E, RL_8169, "8100E", RL_MTU }, { RL_HWREV_8101E, RL_8169, "8101E", RL_MTU }, { RL_HWREV_8102E, RL_8169, "8102E", RL_MTU }, { RL_HWREV_8102EL, RL_8169, "8102EL", RL_MTU }, { RL_HWREV_8102EL_SPIN1, RL_8169, "8102EL", RL_MTU }, { RL_HWREV_8103E, RL_8169, "8103E", RL_MTU }, { RL_HWREV_8401E, RL_8169, "8401E", RL_MTU }, { RL_HWREV_8402, RL_8169, "8402", RL_MTU }, { RL_HWREV_8105E, RL_8169, "8105E", RL_MTU }, { RL_HWREV_8105E_SPIN1, RL_8169, "8105E", RL_MTU }, { RL_HWREV_8106E, RL_8169, "8106E", RL_MTU }, { RL_HWREV_8168B_SPIN2, RL_8169, "8168", RL_JUMBO_MTU }, { RL_HWREV_8168B_SPIN3, RL_8169, "8168", RL_JUMBO_MTU }, { RL_HWREV_8168C, RL_8169, "8168C/8111C", RL_JUMBO_MTU_6K }, { RL_HWREV_8168C_SPIN2, RL_8169, "8168C/8111C", RL_JUMBO_MTU_6K }, { RL_HWREV_8168CP, RL_8169, "8168CP/8111CP", RL_JUMBO_MTU_6K }, { RL_HWREV_8168D, RL_8169, "8168D/8111D", RL_JUMBO_MTU_9K }, { RL_HWREV_8168DP, RL_8169, "8168DP/8111DP", RL_JUMBO_MTU_9K }, { RL_HWREV_8168E, RL_8169, "8168E/8111E", RL_JUMBO_MTU_9K}, { RL_HWREV_8168E_VL, RL_8169, "8168E/8111E-VL", RL_JUMBO_MTU_6K}, { RL_HWREV_8168EP, RL_8169, "8168EP/8111EP", RL_JUMBO_MTU_9K}, { RL_HWREV_8168F, RL_8169, "8168F/8111F", RL_JUMBO_MTU_9K}, { RL_HWREV_8168G, RL_8169, "8168G/8111G", RL_JUMBO_MTU_9K}, { RL_HWREV_8168GU, RL_8169, "8168GU/8111GU", RL_JUMBO_MTU_9K}, { RL_HWREV_8168H, RL_8169, "8168H/8111H", RL_JUMBO_MTU_9K}, { RL_HWREV_8411, RL_8169, "8411", RL_JUMBO_MTU_9K}, { RL_HWREV_8411B, RL_8169, "8411B", RL_JUMBO_MTU_9K}, { 0, 0, NULL, 0 } }; static int re_probe (device_t); static int re_attach (device_t); static int re_detach (device_t); static int re_encap (struct rl_softc *, struct mbuf **); static void re_dma_map_addr (void *, bus_dma_segment_t *, int, int); static int re_allocmem (device_t, struct rl_softc *); static __inline void re_discard_rxbuf (struct rl_softc *, int); static int re_newbuf (struct rl_softc *, int); static int re_jumbo_newbuf (struct rl_softc *, int); static int re_rx_list_init (struct rl_softc *); static int re_jrx_list_init (struct rl_softc *); static int re_tx_list_init (struct rl_softc *); #ifdef RE_FIXUP_RX static __inline void re_fixup_rx (struct mbuf *); #endif static int re_rxeof (struct rl_softc *, int *); static void re_txeof (struct rl_softc *); #ifdef DEVICE_POLLING static int re_poll (struct ifnet *, enum poll_cmd, int); static int re_poll_locked (struct ifnet *, enum poll_cmd, int); #endif static int re_intr (void *); static void re_intr_msi (void *); static void re_tick (void *); static void re_int_task (void *, int); static void re_start (struct ifnet *); static void re_start_locked (struct ifnet *); static void re_start_tx (struct rl_softc *); static int re_ioctl (struct ifnet *, u_long, caddr_t); static void re_init (void *); static void re_init_locked (struct rl_softc *); static void re_stop (struct rl_softc *); static void re_watchdog (struct rl_softc *); static int re_suspend (device_t); static int re_resume (device_t); static int re_shutdown (device_t); static int re_ifmedia_upd (struct ifnet *); static void re_ifmedia_sts (struct ifnet *, struct ifmediareq *); static void re_eeprom_putbyte (struct rl_softc *, int); static void re_eeprom_getword (struct rl_softc *, int, u_int16_t *); static void re_read_eeprom (struct rl_softc *, caddr_t, int, int); static int re_gmii_readreg (device_t, int, int); static int re_gmii_writereg (device_t, int, int, int); static int re_miibus_readreg (device_t, int, int); static int re_miibus_writereg (device_t, int, int, int); static void re_miibus_statchg (device_t); static void re_set_jumbo (struct rl_softc *, int); static void re_set_rxmode (struct rl_softc *); static void re_reset (struct rl_softc *); static void re_setwol (struct rl_softc *); static void re_clrwol (struct rl_softc *); static void re_set_linkspeed (struct rl_softc *); DEBUGNET_DEFINE(re); #ifdef DEV_NETMAP /* see ixgbe.c for details */ #include MODULE_DEPEND(re, netmap, 1, 1, 1); #endif /* !DEV_NETMAP */ #ifdef RE_DIAG static int re_diag (struct rl_softc *); #endif static void re_add_sysctls (struct rl_softc *); static int re_sysctl_stats (SYSCTL_HANDLER_ARGS); static int sysctl_int_range (SYSCTL_HANDLER_ARGS, int, int); static int sysctl_hw_re_int_mod (SYSCTL_HANDLER_ARGS); static device_method_t re_methods[] = { /* Device interface */ DEVMETHOD(device_probe, re_probe), DEVMETHOD(device_attach, re_attach), DEVMETHOD(device_detach, re_detach), DEVMETHOD(device_suspend, re_suspend), DEVMETHOD(device_resume, re_resume), DEVMETHOD(device_shutdown, re_shutdown), /* MII interface */ DEVMETHOD(miibus_readreg, re_miibus_readreg), DEVMETHOD(miibus_writereg, re_miibus_writereg), DEVMETHOD(miibus_statchg, re_miibus_statchg), DEVMETHOD_END }; static driver_t re_driver = { "re", re_methods, sizeof(struct rl_softc) }; static devclass_t re_devclass; DRIVER_MODULE(re, pci, re_driver, re_devclass, 0, 0); DRIVER_MODULE(miibus, re, miibus_driver, miibus_devclass, 0, 0); #define EE_SET(x) \ CSR_WRITE_1(sc, RL_EECMD, \ CSR_READ_1(sc, RL_EECMD) | x) #define EE_CLR(x) \ CSR_WRITE_1(sc, RL_EECMD, \ CSR_READ_1(sc, RL_EECMD) & ~x) /* * Send a read command and address to the EEPROM, check for ACK. */ static void re_eeprom_putbyte(struct rl_softc *sc, int addr) { int d, i; d = addr | (RL_9346_READ << sc->rl_eewidth); /* * Feed in each bit and strobe the clock. */ for (i = 1 << (sc->rl_eewidth + 3); i; i >>= 1) { if (d & i) { EE_SET(RL_EE_DATAIN); } else { EE_CLR(RL_EE_DATAIN); } DELAY(100); EE_SET(RL_EE_CLK); DELAY(150); EE_CLR(RL_EE_CLK); DELAY(100); } } /* * Read a word of data stored in the EEPROM at address 'addr.' */ static void re_eeprom_getword(struct rl_softc *sc, int addr, u_int16_t *dest) { int i; u_int16_t word = 0; /* * Send address of word we want to read. */ re_eeprom_putbyte(sc, addr); /* * Start reading bits from EEPROM. */ for (i = 0x8000; i; i >>= 1) { EE_SET(RL_EE_CLK); DELAY(100); if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT) word |= i; EE_CLR(RL_EE_CLK); DELAY(100); } *dest = word; } /* * Read a sequence of words from the EEPROM. */ static void re_read_eeprom(struct rl_softc *sc, caddr_t dest, int off, int cnt) { int i; u_int16_t word = 0, *ptr; CSR_SETBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); DELAY(100); for (i = 0; i < cnt; i++) { CSR_SETBIT_1(sc, RL_EECMD, RL_EE_SEL); re_eeprom_getword(sc, off + i, &word); CSR_CLRBIT_1(sc, RL_EECMD, RL_EE_SEL); ptr = (u_int16_t *)(dest + (i * 2)); *ptr = word; } CSR_CLRBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); } static int re_gmii_readreg(device_t dev, int phy, int reg) { struct rl_softc *sc; u_int32_t rval; int i; sc = device_get_softc(dev); /* Let the rgephy driver read the GMEDIASTAT register */ if (reg == RL_GMEDIASTAT) { rval = CSR_READ_1(sc, RL_GMEDIASTAT); return (rval); } CSR_WRITE_4(sc, RL_PHYAR, reg << 16); for (i = 0; i < RL_PHY_TIMEOUT; i++) { rval = CSR_READ_4(sc, RL_PHYAR); if (rval & RL_PHYAR_BUSY) break; DELAY(25); } if (i == RL_PHY_TIMEOUT) { device_printf(sc->rl_dev, "PHY read failed\n"); return (0); } /* * Controller requires a 20us delay to process next MDIO request. */ DELAY(20); return (rval & RL_PHYAR_PHYDATA); } static int re_gmii_writereg(device_t dev, int phy, int reg, int data) { struct rl_softc *sc; u_int32_t rval; int i; sc = device_get_softc(dev); CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) | (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY); for (i = 0; i < RL_PHY_TIMEOUT; i++) { rval = CSR_READ_4(sc, RL_PHYAR); if (!(rval & RL_PHYAR_BUSY)) break; DELAY(25); } if (i == RL_PHY_TIMEOUT) { device_printf(sc->rl_dev, "PHY write failed\n"); return (0); } /* * Controller requires a 20us delay to process next MDIO request. */ DELAY(20); return (0); } static int re_miibus_readreg(device_t dev, int phy, int reg) { struct rl_softc *sc; u_int16_t rval = 0; u_int16_t re8139_reg = 0; sc = device_get_softc(dev); if (sc->rl_type == RL_8169) { rval = re_gmii_readreg(dev, phy, reg); return (rval); } switch (reg) { case MII_BMCR: re8139_reg = RL_BMCR; break; case MII_BMSR: re8139_reg = RL_BMSR; break; case MII_ANAR: re8139_reg = RL_ANAR; break; case MII_ANER: re8139_reg = RL_ANER; break; case MII_ANLPAR: re8139_reg = RL_LPAR; break; case MII_PHYIDR1: case MII_PHYIDR2: return (0); /* * Allow the rlphy driver to read the media status * register. If we have a link partner which does not * support NWAY, this is the register which will tell * us the results of parallel detection. */ case RL_MEDIASTAT: rval = CSR_READ_1(sc, RL_MEDIASTAT); return (rval); default: device_printf(sc->rl_dev, "bad phy register\n"); return (0); } rval = CSR_READ_2(sc, re8139_reg); if (sc->rl_type == RL_8139CPLUS && re8139_reg == RL_BMCR) { /* 8139C+ has different bit layout. */ rval &= ~(BMCR_LOOP | BMCR_ISO); } return (rval); } static int re_miibus_writereg(device_t dev, int phy, int reg, int data) { struct rl_softc *sc; u_int16_t re8139_reg = 0; int rval = 0; sc = device_get_softc(dev); if (sc->rl_type == RL_8169) { rval = re_gmii_writereg(dev, phy, reg, data); return (rval); } switch (reg) { case MII_BMCR: re8139_reg = RL_BMCR; if (sc->rl_type == RL_8139CPLUS) { /* 8139C+ has different bit layout. */ data &= ~(BMCR_LOOP | BMCR_ISO); } break; case MII_BMSR: re8139_reg = RL_BMSR; break; case MII_ANAR: re8139_reg = RL_ANAR; break; case MII_ANER: re8139_reg = RL_ANER; break; case MII_ANLPAR: re8139_reg = RL_LPAR; break; case MII_PHYIDR1: case MII_PHYIDR2: return (0); break; default: device_printf(sc->rl_dev, "bad phy register\n"); return (0); } CSR_WRITE_2(sc, re8139_reg, data); return (0); } static void re_miibus_statchg(device_t dev) { struct rl_softc *sc; struct ifnet *ifp; struct mii_data *mii; sc = device_get_softc(dev); mii = device_get_softc(sc->rl_miibus); ifp = sc->rl_ifp; if (mii == NULL || ifp == NULL || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; sc->rl_flags &= ~RL_FLAG_LINK; if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: sc->rl_flags |= RL_FLAG_LINK; break; case IFM_1000_T: if ((sc->rl_flags & RL_FLAG_FASTETHER) != 0) break; sc->rl_flags |= RL_FLAG_LINK; break; default: break; } } /* * RealTek controllers do not provide any interface to the RX/TX * MACs for resolved speed, duplex and flow-control parameters. */ } static u_int re_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { uint32_t h, *hashes = arg; h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26; if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); return (1); } /* * Set the RX configuration and 64-bit multicast hash filter. */ static void re_set_rxmode(struct rl_softc *sc) { struct ifnet *ifp; uint32_t h, hashes[2] = { 0, 0 }; uint32_t rxfilt; RL_LOCK_ASSERT(sc); ifp = sc->rl_ifp; rxfilt = RL_RXCFG_CONFIG | RL_RXCFG_RX_INDIV | RL_RXCFG_RX_BROAD; if ((sc->rl_flags & RL_FLAG_EARLYOFF) != 0) rxfilt |= RL_RXCFG_EARLYOFF; else if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0) rxfilt |= RL_RXCFG_EARLYOFFV2; if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) { if (ifp->if_flags & IFF_PROMISC) rxfilt |= RL_RXCFG_RX_ALLPHYS; /* * Unlike other hardwares, we have to explicitly set * RL_RXCFG_RX_MULTI to receive multicast frames in * promiscuous mode. */ rxfilt |= RL_RXCFG_RX_MULTI; hashes[0] = hashes[1] = 0xffffffff; goto done; } if_foreach_llmaddr(ifp, re_hash_maddr, hashes); if (hashes[0] != 0 || hashes[1] != 0) { /* * For some unfathomable reason, RealTek decided to * reverse the order of the multicast hash registers * in the PCI Express parts. This means we have to * write the hash pattern in reverse order for those * devices. */ if ((sc->rl_flags & RL_FLAG_PCIE) != 0) { h = bswap32(hashes[0]); hashes[0] = bswap32(hashes[1]); hashes[1] = h; } rxfilt |= RL_RXCFG_RX_MULTI; } if (sc->rl_hwrev->rl_rev == RL_HWREV_8168F) { /* Disable multicast filtering due to silicon bug. */ hashes[0] = 0xffffffff; hashes[1] = 0xffffffff; } done: CSR_WRITE_4(sc, RL_MAR0, hashes[0]); CSR_WRITE_4(sc, RL_MAR4, hashes[1]); CSR_WRITE_4(sc, RL_RXCFG, rxfilt); } static void re_reset(struct rl_softc *sc) { int i; RL_LOCK_ASSERT(sc); CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET); for (i = 0; i < RL_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET)) break; } if (i == RL_TIMEOUT) device_printf(sc->rl_dev, "reset never completed!\n"); if ((sc->rl_flags & RL_FLAG_MACRESET) != 0) CSR_WRITE_1(sc, 0x82, 1); if (sc->rl_hwrev->rl_rev == RL_HWREV_8169S) re_gmii_writereg(sc->rl_dev, 1, 0x0b, 0); } #ifdef RE_DIAG /* * The following routine is designed to test for a defect on some * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64# * lines connected to the bus, however for a 32-bit only card, they * should be pulled high. The result of this defect is that the * NIC will not work right if you plug it into a 64-bit slot: DMA * operations will be done with 64-bit transfers, which will fail * because the 64-bit data lines aren't connected. * * There's no way to work around this (short of talking a soldering * iron to the board), however we can detect it. The method we use * here is to put the NIC into digital loopback mode, set the receiver * to promiscuous mode, and then try to send a frame. We then compare * the frame data we sent to what was received. If the data matches, * then the NIC is working correctly, otherwise we know the user has * a defective NIC which has been mistakenly plugged into a 64-bit PCI * slot. In the latter case, there's no way the NIC can work correctly, * so we print out a message on the console and abort the device attach. */ static int re_diag(struct rl_softc *sc) { struct ifnet *ifp = sc->rl_ifp; struct mbuf *m0; struct ether_header *eh; struct rl_desc *cur_rx; u_int16_t status; u_int32_t rxstat; int total_len, i, error = 0, phyaddr; u_int8_t dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' }; u_int8_t src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' }; /* Allocate a single mbuf */ MGETHDR(m0, M_NOWAIT, MT_DATA); if (m0 == NULL) return (ENOBUFS); RL_LOCK(sc); /* * Initialize the NIC in test mode. This sets the chip up * so that it can send and receive frames, but performs the * following special functions: * - Puts receiver in promiscuous mode * - Enables digital loopback mode * - Leaves interrupts turned off */ ifp->if_flags |= IFF_PROMISC; sc->rl_testmode = 1; ifp->if_drv_flags &= ~IFF_DRV_RUNNING; re_init_locked(sc); sc->rl_flags |= RL_FLAG_LINK; if (sc->rl_type == RL_8169) phyaddr = 1; else phyaddr = 0; re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_RESET); for (i = 0; i < RL_TIMEOUT; i++) { status = re_miibus_readreg(sc->rl_dev, phyaddr, MII_BMCR); if (!(status & BMCR_RESET)) break; } re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_LOOP); CSR_WRITE_2(sc, RL_ISR, RL_INTRS); DELAY(100000); /* Put some data in the mbuf */ eh = mtod(m0, struct ether_header *); bcopy ((char *)&dst, eh->ether_dhost, ETHER_ADDR_LEN); bcopy ((char *)&src, eh->ether_shost, ETHER_ADDR_LEN); eh->ether_type = htons(ETHERTYPE_IP); m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN; /* * Queue the packet, start transmission. * Note: IF_HANDOFF() ultimately calls re_start() for us. */ CSR_WRITE_2(sc, RL_ISR, 0xFFFF); RL_UNLOCK(sc); /* XXX: re_diag must not be called when in ALTQ mode */ IF_HANDOFF(&ifp->if_snd, m0, ifp); RL_LOCK(sc); m0 = NULL; /* Wait for it to propagate through the chip */ DELAY(100000); for (i = 0; i < RL_TIMEOUT; i++) { status = CSR_READ_2(sc, RL_ISR); CSR_WRITE_2(sc, RL_ISR, status); if ((status & (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) == (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) break; DELAY(10); } if (i == RL_TIMEOUT) { device_printf(sc->rl_dev, "diagnostic failed, failed to receive packet in" " loopback mode\n"); error = EIO; goto done; } /* * The packet should have been dumped into the first * entry in the RX DMA ring. Grab it from there. */ bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, BUS_DMASYNC_POSTREAD); bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, sc->rl_ldata.rl_rx_desc[0].rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, sc->rl_ldata.rl_rx_desc[0].rx_dmamap); m0 = sc->rl_ldata.rl_rx_desc[0].rx_m; sc->rl_ldata.rl_rx_desc[0].rx_m = NULL; eh = mtod(m0, struct ether_header *); cur_rx = &sc->rl_ldata.rl_rx_list[0]; total_len = RL_RXBYTES(cur_rx); rxstat = le32toh(cur_rx->rl_cmdstat); if (total_len != ETHER_MIN_LEN) { device_printf(sc->rl_dev, "diagnostic failed, received short packet\n"); error = EIO; goto done; } /* Test that the received packet data matches what we sent. */ if (bcmp((char *)&eh->ether_dhost, (char *)&dst, ETHER_ADDR_LEN) || bcmp((char *)&eh->ether_shost, (char *)&src, ETHER_ADDR_LEN) || ntohs(eh->ether_type) != ETHERTYPE_IP) { device_printf(sc->rl_dev, "WARNING, DMA FAILURE!\n"); device_printf(sc->rl_dev, "expected TX data: %6D/%6D/0x%x\n", dst, ":", src, ":", ETHERTYPE_IP); device_printf(sc->rl_dev, "received RX data: %6D/%6D/0x%x\n", eh->ether_dhost, ":", eh->ether_shost, ":", ntohs(eh->ether_type)); device_printf(sc->rl_dev, "You may have a defective 32-bit " "NIC plugged into a 64-bit PCI slot.\n"); device_printf(sc->rl_dev, "Please re-install the NIC in a " "32-bit slot for proper operation.\n"); device_printf(sc->rl_dev, "Read the re(4) man page for more " "details.\n"); error = EIO; } done: /* Turn interface off, release resources */ sc->rl_testmode = 0; sc->rl_flags &= ~RL_FLAG_LINK; ifp->if_flags &= ~IFF_PROMISC; re_stop(sc); if (m0 != NULL) m_freem(m0); RL_UNLOCK(sc); return (error); } #endif /* * Probe for a RealTek 8139C+/8169/8110 chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int re_probe(device_t dev) { const struct rl_type *t; uint16_t devid, vendor; uint16_t revid, sdevid; int i; vendor = pci_get_vendor(dev); devid = pci_get_device(dev); revid = pci_get_revid(dev); sdevid = pci_get_subdevice(dev); if (vendor == LINKSYS_VENDORID && devid == LINKSYS_DEVICEID_EG1032) { if (sdevid != LINKSYS_SUBDEVICE_EG1032_REV3) { /* * Only attach to rev. 3 of the Linksys EG1032 adapter. * Rev. 2 is supported by sk(4). */ return (ENXIO); } } if (vendor == RT_VENDORID && devid == RT_DEVICEID_8139) { if (revid != 0x20) { /* 8139, let rl(4) take care of this device. */ return (ENXIO); } } t = re_devs; for (i = 0; i < nitems(re_devs); i++, t++) { if (vendor == t->rl_vid && devid == t->rl_did) { device_set_desc(dev, t->rl_name); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } /* * Map a single buffer address. */ static void re_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *addr; if (error) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); addr = arg; *addr = segs->ds_addr; } static int re_allocmem(device_t dev, struct rl_softc *sc) { bus_addr_t lowaddr; bus_size_t rx_list_size, tx_list_size; int error; int i; rx_list_size = sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc); tx_list_size = sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc); /* * Allocate the parent bus DMA tag appropriate for PCI. * In order to use DAC, RL_CPLUSCMD_PCI_DAC bit of RL_CPLUS_CMD * register should be set. However some RealTek chips are known * to be buggy on DAC handling, therefore disable DAC by limiting * DMA address space to 32bit. PCIe variants of RealTek chips * may not have the limitation. */ lowaddr = BUS_SPACE_MAXADDR; if ((sc->rl_flags & RL_FLAG_PCIE) == 0) lowaddr = BUS_SPACE_MAXADDR_32BIT; error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, lowaddr, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->rl_parent_tag); if (error) { device_printf(dev, "could not allocate parent DMA tag\n"); return (error); } /* * Allocate map for TX mbufs. */ error = bus_dma_tag_create(sc->rl_parent_tag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * RL_NTXSEGS, RL_NTXSEGS, 4096, 0, NULL, NULL, &sc->rl_ldata.rl_tx_mtag); if (error) { device_printf(dev, "could not allocate TX DMA tag\n"); return (error); } /* * Allocate map for RX mbufs. */ if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t), 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MJUM9BYTES, 1, MJUM9BYTES, 0, NULL, NULL, &sc->rl_ldata.rl_jrx_mtag); if (error) { device_printf(dev, "could not allocate jumbo RX DMA tag\n"); return (error); } } error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t), 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->rl_ldata.rl_rx_mtag); if (error) { device_printf(dev, "could not allocate RX DMA tag\n"); return (error); } /* * Allocate map for TX descriptor list. */ error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, tx_list_size, 1, tx_list_size, 0, NULL, NULL, &sc->rl_ldata.rl_tx_list_tag); if (error) { device_printf(dev, "could not allocate TX DMA ring tag\n"); return (error); } /* Allocate DMA'able memory for the TX ring */ error = bus_dmamem_alloc(sc->rl_ldata.rl_tx_list_tag, (void **)&sc->rl_ldata.rl_tx_list, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->rl_ldata.rl_tx_list_map); if (error) { device_printf(dev, "could not allocate TX DMA ring\n"); return (error); } /* Load the map for the TX ring. */ sc->rl_ldata.rl_tx_list_addr = 0; error = bus_dmamap_load(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list, tx_list_size, re_dma_map_addr, &sc->rl_ldata.rl_tx_list_addr, BUS_DMA_NOWAIT); if (error != 0 || sc->rl_ldata.rl_tx_list_addr == 0) { device_printf(dev, "could not load TX DMA ring\n"); return (ENOMEM); } /* Create DMA maps for TX buffers */ for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { error = bus_dmamap_create(sc->rl_ldata.rl_tx_mtag, 0, &sc->rl_ldata.rl_tx_desc[i].tx_dmamap); if (error) { device_printf(dev, "could not create DMA map for TX\n"); return (error); } } /* * Allocate map for RX descriptor list. */ error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, rx_list_size, 1, rx_list_size, 0, NULL, NULL, &sc->rl_ldata.rl_rx_list_tag); if (error) { device_printf(dev, "could not create RX DMA ring tag\n"); return (error); } /* Allocate DMA'able memory for the RX ring */ error = bus_dmamem_alloc(sc->rl_ldata.rl_rx_list_tag, (void **)&sc->rl_ldata.rl_rx_list, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->rl_ldata.rl_rx_list_map); if (error) { device_printf(dev, "could not allocate RX DMA ring\n"); return (error); } /* Load the map for the RX ring. */ sc->rl_ldata.rl_rx_list_addr = 0; error = bus_dmamap_load(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list, rx_list_size, re_dma_map_addr, &sc->rl_ldata.rl_rx_list_addr, BUS_DMA_NOWAIT); if (error != 0 || sc->rl_ldata.rl_rx_list_addr == 0) { device_printf(dev, "could not load RX DMA ring\n"); return (ENOMEM); } /* Create DMA maps for RX buffers */ if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0, &sc->rl_ldata.rl_jrx_sparemap); if (error) { device_printf(dev, "could not create spare DMA map for jumbo RX\n"); return (error); } for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { error = bus_dmamap_create(sc->rl_ldata.rl_jrx_mtag, 0, &sc->rl_ldata.rl_jrx_desc[i].rx_dmamap); if (error) { device_printf(dev, "could not create DMA map for jumbo RX\n"); return (error); } } } error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0, &sc->rl_ldata.rl_rx_sparemap); if (error) { device_printf(dev, "could not create spare DMA map for RX\n"); return (error); } for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0, &sc->rl_ldata.rl_rx_desc[i].rx_dmamap); if (error) { device_printf(dev, "could not create DMA map for RX\n"); return (error); } } /* Create DMA map for statistics. */ error = bus_dma_tag_create(sc->rl_parent_tag, RL_DUMP_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct rl_stats), 1, sizeof(struct rl_stats), 0, NULL, NULL, &sc->rl_ldata.rl_stag); if (error) { device_printf(dev, "could not create statistics DMA tag\n"); return (error); } /* Allocate DMA'able memory for statistics. */ error = bus_dmamem_alloc(sc->rl_ldata.rl_stag, (void **)&sc->rl_ldata.rl_stats, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->rl_ldata.rl_smap); if (error) { device_printf(dev, "could not allocate statistics DMA memory\n"); return (error); } /* Load the map for statistics. */ sc->rl_ldata.rl_stats_addr = 0; error = bus_dmamap_load(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_smap, sc->rl_ldata.rl_stats, sizeof(struct rl_stats), re_dma_map_addr, &sc->rl_ldata.rl_stats_addr, BUS_DMA_NOWAIT); if (error != 0 || sc->rl_ldata.rl_stats_addr == 0) { device_printf(dev, "could not load statistics DMA memory\n"); return (ENOMEM); } return (0); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int re_attach(device_t dev) { u_char eaddr[ETHER_ADDR_LEN]; u_int16_t as[ETHER_ADDR_LEN / 2]; struct rl_softc *sc; struct ifnet *ifp; const struct rl_hwrev *hw_rev; int capmask, error = 0, hwrev, i, msic, msixc, phy, reg, rid; u_int32_t cap, ctl; u_int16_t devid, re_did = 0; uint8_t cfg; sc = device_get_softc(dev); sc->rl_dev = dev; mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->rl_stat_callout, &sc->rl_mtx, 0); /* * Map control/status registers. */ pci_enable_busmaster(dev); devid = pci_get_device(dev); /* * Prefer memory space register mapping over IO space. * Because RTL8169SC does not seem to work when memory mapping * is used always activate io mapping. */ if (devid == RT_DEVICEID_8169SC) prefer_iomap = 1; if (prefer_iomap == 0) { sc->rl_res_id = PCIR_BAR(1); sc->rl_res_type = SYS_RES_MEMORY; /* RTL8168/8101E seems to use different BARs. */ if (devid == RT_DEVICEID_8168 || devid == RT_DEVICEID_8101E) sc->rl_res_id = PCIR_BAR(2); } else { sc->rl_res_id = PCIR_BAR(0); sc->rl_res_type = SYS_RES_IOPORT; } sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type, &sc->rl_res_id, RF_ACTIVE); if (sc->rl_res == NULL && prefer_iomap == 0) { sc->rl_res_id = PCIR_BAR(0); sc->rl_res_type = SYS_RES_IOPORT; sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type, &sc->rl_res_id, RF_ACTIVE); } if (sc->rl_res == NULL) { device_printf(dev, "couldn't map ports/memory\n"); error = ENXIO; goto fail; } sc->rl_btag = rman_get_bustag(sc->rl_res); sc->rl_bhandle = rman_get_bushandle(sc->rl_res); msic = pci_msi_count(dev); msixc = pci_msix_count(dev); if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) { sc->rl_flags |= RL_FLAG_PCIE; sc->rl_expcap = reg; } if (bootverbose) { device_printf(dev, "MSI count : %d\n", msic); device_printf(dev, "MSI-X count : %d\n", msixc); } if (msix_disable > 0) msixc = 0; if (msi_disable > 0) msic = 0; /* Prefer MSI-X to MSI. */ if (msixc > 0) { msixc = RL_MSI_MESSAGES; rid = PCIR_BAR(4); sc->rl_res_pba = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->rl_res_pba == NULL) { device_printf(sc->rl_dev, "could not allocate MSI-X PBA resource\n"); } if (sc->rl_res_pba != NULL && pci_alloc_msix(dev, &msixc) == 0) { if (msixc == RL_MSI_MESSAGES) { device_printf(dev, "Using %d MSI-X message\n", msixc); sc->rl_flags |= RL_FLAG_MSIX; } else pci_release_msi(dev); } if ((sc->rl_flags & RL_FLAG_MSIX) == 0) { if (sc->rl_res_pba != NULL) bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->rl_res_pba); sc->rl_res_pba = NULL; msixc = 0; } } /* Prefer MSI to INTx. */ if (msixc == 0 && msic > 0) { msic = RL_MSI_MESSAGES; if (pci_alloc_msi(dev, &msic) == 0) { if (msic == RL_MSI_MESSAGES) { device_printf(dev, "Using %d MSI message\n", msic); sc->rl_flags |= RL_FLAG_MSI; /* Explicitly set MSI enable bit. */ CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); cfg = CSR_READ_1(sc, RL_CFG2); cfg |= RL_CFG2_MSI; CSR_WRITE_1(sc, RL_CFG2, cfg); CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); } else pci_release_msi(dev); } if ((sc->rl_flags & RL_FLAG_MSI) == 0) msic = 0; } /* Allocate interrupt */ if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0) { rid = 0; sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->rl_irq[0] == NULL) { device_printf(dev, "couldn't allocate IRQ resources\n"); error = ENXIO; goto fail; } } else { for (i = 0, rid = 1; i < RL_MSI_MESSAGES; i++, rid++) { sc->rl_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->rl_irq[i] == NULL) { device_printf(dev, "couldn't allocate IRQ resources for " "message %d\n", rid); error = ENXIO; goto fail; } } } if ((sc->rl_flags & RL_FLAG_MSI) == 0) { CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); cfg = CSR_READ_1(sc, RL_CFG2); if ((cfg & RL_CFG2_MSI) != 0) { device_printf(dev, "turning off MSI enable bit.\n"); cfg &= ~RL_CFG2_MSI; CSR_WRITE_1(sc, RL_CFG2, cfg); } CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); } /* Disable ASPM L0S/L1 and CLKREQ. */ if (sc->rl_expcap != 0) { cap = pci_read_config(dev, sc->rl_expcap + PCIER_LINK_CAP, 2); if ((cap & PCIEM_LINK_CAP_ASPM) != 0) { ctl = pci_read_config(dev, sc->rl_expcap + PCIER_LINK_CTL, 2); if ((ctl & (PCIEM_LINK_CTL_ECPM | PCIEM_LINK_CTL_ASPMC))!= 0) { ctl &= ~(PCIEM_LINK_CTL_ECPM | PCIEM_LINK_CTL_ASPMC); pci_write_config(dev, sc->rl_expcap + PCIER_LINK_CTL, ctl, 2); device_printf(dev, "ASPM disabled\n"); } } else device_printf(dev, "no ASPM capability\n"); } hw_rev = re_hwrevs; hwrev = CSR_READ_4(sc, RL_TXCFG); switch (hwrev & 0x70000000) { case 0x00000000: case 0x10000000: device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0xfc800000); hwrev &= (RL_TXCFG_HWREV | 0x80000000); break; default: device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0x7c800000); sc->rl_macrev = hwrev & 0x00700000; hwrev &= RL_TXCFG_HWREV; break; } device_printf(dev, "MAC rev. 0x%08x\n", sc->rl_macrev); while (hw_rev->rl_desc != NULL) { if (hw_rev->rl_rev == hwrev) { sc->rl_type = hw_rev->rl_type; sc->rl_hwrev = hw_rev; break; } hw_rev++; } if (hw_rev->rl_desc == NULL) { device_printf(dev, "Unknown H/W revision: 0x%08x\n", hwrev); error = ENXIO; goto fail; } switch (hw_rev->rl_rev) { case RL_HWREV_8139CPLUS: sc->rl_flags |= RL_FLAG_FASTETHER | RL_FLAG_AUTOPAD; break; case RL_HWREV_8100E: case RL_HWREV_8101E: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_FASTETHER; break; case RL_HWREV_8102E: case RL_HWREV_8102EL: case RL_HWREV_8102EL_SPIN1: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD; break; case RL_HWREV_8103E: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_MACSLEEP; break; case RL_HWREV_8401E: case RL_HWREV_8105E: case RL_HWREV_8105E_SPIN1: case RL_HWREV_8106E: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD; break; case RL_HWREV_8402: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_CMDSTOP_WAIT_TXQ; break; case RL_HWREV_8168B_SPIN1: case RL_HWREV_8168B_SPIN2: sc->rl_flags |= RL_FLAG_WOLRXENB; /* FALLTHROUGH */ case RL_HWREV_8168B_SPIN3: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_MACSTAT; break; case RL_HWREV_8168C_SPIN2: sc->rl_flags |= RL_FLAG_MACSLEEP; /* FALLTHROUGH */ case RL_HWREV_8168C: if (sc->rl_macrev == 0x00200000) sc->rl_flags |= RL_FLAG_MACSLEEP; /* FALLTHROUGH */ case RL_HWREV_8168CP: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK; break; case RL_HWREV_8168D: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK; break; case RL_HWREV_8168DP: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_WAIT_TXPOLL | RL_FLAG_WOL_MANLINK; break; case RL_HWREV_8168E: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PHYWAKE_PM | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK; break; case RL_HWREV_8168E_VL: case RL_HWREV_8168F: sc->rl_flags |= RL_FLAG_EARLYOFF; /* FALLTHROUGH */ case RL_HWREV_8411: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_CMDSTOP_WAIT_TXQ | RL_FLAG_WOL_MANLINK; break; case RL_HWREV_8168EP: case RL_HWREV_8168G: case RL_HWREV_8411B: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_JUMBOV2 | RL_FLAG_CMDSTOP_WAIT_TXQ | RL_FLAG_WOL_MANLINK | RL_FLAG_8168G_PLUS; break; case RL_HWREV_8168GU: case RL_HWREV_8168H: if (pci_get_device(dev) == RT_DEVICEID_8101E) { /* RTL8106E(US), RTL8107E */ sc->rl_flags |= RL_FLAG_FASTETHER; } else sc->rl_flags |= RL_FLAG_JUMBOV2 | RL_FLAG_WOL_MANLINK; sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD | RL_FLAG_CMDSTOP_WAIT_TXQ | RL_FLAG_8168G_PLUS; break; case RL_HWREV_8169_8110SB: case RL_HWREV_8169_8110SBL: case RL_HWREV_8169_8110SC: case RL_HWREV_8169_8110SCE: sc->rl_flags |= RL_FLAG_PHYWAKE; /* FALLTHROUGH */ case RL_HWREV_8169: case RL_HWREV_8169S: case RL_HWREV_8110S: sc->rl_flags |= RL_FLAG_MACRESET; break; default: break; } if (sc->rl_hwrev->rl_rev == RL_HWREV_8139CPLUS) { sc->rl_cfg0 = RL_8139_CFG0; sc->rl_cfg1 = RL_8139_CFG1; sc->rl_cfg2 = 0; sc->rl_cfg3 = RL_8139_CFG3; sc->rl_cfg4 = RL_8139_CFG4; sc->rl_cfg5 = RL_8139_CFG5; } else { sc->rl_cfg0 = RL_CFG0; sc->rl_cfg1 = RL_CFG1; sc->rl_cfg2 = RL_CFG2; sc->rl_cfg3 = RL_CFG3; sc->rl_cfg4 = RL_CFG4; sc->rl_cfg5 = RL_CFG5; } /* Reset the adapter. */ RL_LOCK(sc); re_reset(sc); RL_UNLOCK(sc); /* Enable PME. */ CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); cfg = CSR_READ_1(sc, sc->rl_cfg1); cfg |= RL_CFG1_PME; CSR_WRITE_1(sc, sc->rl_cfg1, cfg); cfg = CSR_READ_1(sc, sc->rl_cfg5); cfg &= RL_CFG5_PME_STS; CSR_WRITE_1(sc, sc->rl_cfg5, cfg); CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); if ((sc->rl_flags & RL_FLAG_PAR) != 0) { /* * XXX Should have a better way to extract station * address from EEPROM. */ for (i = 0; i < ETHER_ADDR_LEN; i++) eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i); } else { sc->rl_eewidth = RL_9356_ADDR_LEN; re_read_eeprom(sc, (caddr_t)&re_did, 0, 1); if (re_did != 0x8129) sc->rl_eewidth = RL_9346_ADDR_LEN; /* * Get station address from the EEPROM. */ re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3); for (i = 0; i < ETHER_ADDR_LEN / 2; i++) as[i] = le16toh(as[i]); bcopy(as, eaddr, ETHER_ADDR_LEN); } if (sc->rl_type == RL_8169) { /* Set RX length mask and number of descriptors. */ sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN; sc->rl_txstart = RL_GTXSTART; sc->rl_ldata.rl_tx_desc_cnt = RL_8169_TX_DESC_CNT; sc->rl_ldata.rl_rx_desc_cnt = RL_8169_RX_DESC_CNT; } else { /* Set RX length mask and number of descriptors. */ sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN; sc->rl_txstart = RL_TXSTART; sc->rl_ldata.rl_tx_desc_cnt = RL_8139_TX_DESC_CNT; sc->rl_ldata.rl_rx_desc_cnt = RL_8139_RX_DESC_CNT; } error = re_allocmem(dev, sc); if (error) goto fail; re_add_sysctls(sc); ifp = sc->rl_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); error = ENOSPC; goto fail; } /* Take controller out of deep sleep mode. */ if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) { if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80) CSR_WRITE_1(sc, RL_GPIO, CSR_READ_1(sc, RL_GPIO) | 0x01); else CSR_WRITE_1(sc, RL_GPIO, CSR_READ_1(sc, RL_GPIO) & ~0x01); } /* Take PHY out of power down mode. */ if ((sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0) { CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) | 0x80); if (hw_rev->rl_rev == RL_HWREV_8401E) CSR_WRITE_1(sc, 0xD1, CSR_READ_1(sc, 0xD1) & ~0x08); } if ((sc->rl_flags & RL_FLAG_PHYWAKE) != 0) { re_gmii_writereg(dev, 1, 0x1f, 0); re_gmii_writereg(dev, 1, 0x0e, 0); } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = re_ioctl; ifp->if_start = re_start; /* * RTL8168/8111C generates wrong IP checksummed frame if the * packet has IP options so disable TX checksum offloading. */ if (sc->rl_hwrev->rl_rev == RL_HWREV_8168C || sc->rl_hwrev->rl_rev == RL_HWREV_8168C_SPIN2 || sc->rl_hwrev->rl_rev == RL_HWREV_8168CP) { ifp->if_hwassist = 0; ifp->if_capabilities = IFCAP_RXCSUM | IFCAP_TSO4; } else { ifp->if_hwassist = CSUM_IP | CSUM_TCP | CSUM_UDP; ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_TSO4; } ifp->if_hwassist |= CSUM_TSO; ifp->if_capenable = ifp->if_capabilities; ifp->if_init = re_init; IFQ_SET_MAXLEN(&ifp->if_snd, RL_IFQ_MAXLEN); ifp->if_snd.ifq_drv_maxlen = RL_IFQ_MAXLEN; IFQ_SET_READY(&ifp->if_snd); - TASK_INIT(&sc->rl_inttask, 0, re_int_task, sc); + NET_TASK_INIT(&sc->rl_inttask, 0, re_int_task, sc); #define RE_PHYAD_INTERNAL 0 /* Do MII setup. */ phy = RE_PHYAD_INTERNAL; if (sc->rl_type == RL_8169) phy = 1; capmask = BMSR_DEFCAPMASK; if ((sc->rl_flags & RL_FLAG_FASTETHER) != 0) capmask &= ~BMSR_EXTSTAT; error = mii_attach(dev, &sc->rl_miibus, ifp, re_ifmedia_upd, re_ifmedia_sts, capmask, phy, MII_OFFSET_ANY, MIIF_DOPAUSE); if (error != 0) { device_printf(dev, "attaching PHYs failed\n"); goto fail; } /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); /* VLAN capability setup */ ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; if (ifp->if_capabilities & IFCAP_HWCSUM) ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; /* Enable WOL if PM is supported. */ if (pci_find_cap(sc->rl_dev, PCIY_PMG, ®) == 0) ifp->if_capabilities |= IFCAP_WOL; ifp->if_capenable = ifp->if_capabilities; ifp->if_capenable &= ~(IFCAP_WOL_UCAST | IFCAP_WOL_MCAST); /* * Don't enable TSO by default. It is known to generate * corrupted TCP segments(bad TCP options) under certain * circumstances. */ ifp->if_hwassist &= ~CSUM_TSO; ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_VLAN_HWTSO); #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif /* * Tell the upper layer(s) we support long frames. * Must appear after the call to ether_ifattach() because * ether_ifattach() sets ifi_hdrlen to the default value. */ ifp->if_hdrlen = sizeof(struct ether_vlan_header); #ifdef DEV_NETMAP re_netmap_attach(sc); #endif /* DEV_NETMAP */ #ifdef RE_DIAG /* * Perform hardware diagnostic on the original RTL8169. * Some 32-bit cards were incorrectly wired and would * malfunction if plugged into a 64-bit slot. */ if (hwrev == RL_HWREV_8169) { error = re_diag(sc); if (error) { device_printf(dev, "attach aborted due to hardware diag failure\n"); ether_ifdetach(ifp); goto fail; } } #endif #ifdef RE_TX_MODERATION intr_filter = 1; #endif /* Hook interrupt last to avoid having to lock softc */ if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0 && intr_filter == 0) { error = bus_setup_intr(dev, sc->rl_irq[0], INTR_TYPE_NET | INTR_MPSAFE, NULL, re_intr_msi, sc, &sc->rl_intrhand[0]); } else { error = bus_setup_intr(dev, sc->rl_irq[0], INTR_TYPE_NET | INTR_MPSAFE, re_intr, NULL, sc, &sc->rl_intrhand[0]); } if (error) { device_printf(dev, "couldn't set up irq\n"); ether_ifdetach(ifp); goto fail; } DEBUGNET_SET(ifp, re); fail: if (error) re_detach(dev); return (error); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int re_detach(device_t dev) { struct rl_softc *sc; struct ifnet *ifp; int i, rid; sc = device_get_softc(dev); ifp = sc->rl_ifp; KASSERT(mtx_initialized(&sc->rl_mtx), ("re mutex not initialized")); /* These should only be active if attach succeeded */ if (device_is_attached(dev)) { #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(ifp); #endif RL_LOCK(sc); #if 0 sc->suspended = 1; #endif re_stop(sc); RL_UNLOCK(sc); callout_drain(&sc->rl_stat_callout); taskqueue_drain(taskqueue_fast, &sc->rl_inttask); /* * Force off the IFF_UP flag here, in case someone * still had a BPF descriptor attached to this * interface. If they do, ether_ifdetach() will cause * the BPF code to try and clear the promisc mode * flag, which will bubble down to re_ioctl(), * which will try to call re_init() again. This will * turn the NIC back on and restart the MII ticker, * which will panic the system when the kernel tries * to invoke the re_tick() function that isn't there * anymore. */ ifp->if_flags &= ~IFF_UP; ether_ifdetach(ifp); } if (sc->rl_miibus) device_delete_child(dev, sc->rl_miibus); bus_generic_detach(dev); /* * The rest is resource deallocation, so we should already be * stopped here. */ if (sc->rl_intrhand[0] != NULL) { bus_teardown_intr(dev, sc->rl_irq[0], sc->rl_intrhand[0]); sc->rl_intrhand[0] = NULL; } if (ifp != NULL) { #ifdef DEV_NETMAP netmap_detach(ifp); #endif /* DEV_NETMAP */ if_free(ifp); } if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0) rid = 0; else rid = 1; if (sc->rl_irq[0] != NULL) { bus_release_resource(dev, SYS_RES_IRQ, rid, sc->rl_irq[0]); sc->rl_irq[0] = NULL; } if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0) pci_release_msi(dev); if (sc->rl_res_pba) { rid = PCIR_BAR(4); bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->rl_res_pba); } if (sc->rl_res) bus_release_resource(dev, sc->rl_res_type, sc->rl_res_id, sc->rl_res); /* Unload and free the RX DMA ring memory and map */ if (sc->rl_ldata.rl_rx_list_tag) { if (sc->rl_ldata.rl_rx_list_addr) bus_dmamap_unload(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map); if (sc->rl_ldata.rl_rx_list) bus_dmamem_free(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list, sc->rl_ldata.rl_rx_list_map); bus_dma_tag_destroy(sc->rl_ldata.rl_rx_list_tag); } /* Unload and free the TX DMA ring memory and map */ if (sc->rl_ldata.rl_tx_list_tag) { if (sc->rl_ldata.rl_tx_list_addr) bus_dmamap_unload(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list_map); if (sc->rl_ldata.rl_tx_list) bus_dmamem_free(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list, sc->rl_ldata.rl_tx_list_map); bus_dma_tag_destroy(sc->rl_ldata.rl_tx_list_tag); } /* Destroy all the RX and TX buffer maps */ if (sc->rl_ldata.rl_tx_mtag) { for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { if (sc->rl_ldata.rl_tx_desc[i].tx_dmamap) bus_dmamap_destroy(sc->rl_ldata.rl_tx_mtag, sc->rl_ldata.rl_tx_desc[i].tx_dmamap); } bus_dma_tag_destroy(sc->rl_ldata.rl_tx_mtag); } if (sc->rl_ldata.rl_rx_mtag) { for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { if (sc->rl_ldata.rl_rx_desc[i].rx_dmamap) bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag, sc->rl_ldata.rl_rx_desc[i].rx_dmamap); } if (sc->rl_ldata.rl_rx_sparemap) bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag, sc->rl_ldata.rl_rx_sparemap); bus_dma_tag_destroy(sc->rl_ldata.rl_rx_mtag); } if (sc->rl_ldata.rl_jrx_mtag) { for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { if (sc->rl_ldata.rl_jrx_desc[i].rx_dmamap) bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag, sc->rl_ldata.rl_jrx_desc[i].rx_dmamap); } if (sc->rl_ldata.rl_jrx_sparemap) bus_dmamap_destroy(sc->rl_ldata.rl_jrx_mtag, sc->rl_ldata.rl_jrx_sparemap); bus_dma_tag_destroy(sc->rl_ldata.rl_jrx_mtag); } /* Unload and free the stats buffer and map */ if (sc->rl_ldata.rl_stag) { if (sc->rl_ldata.rl_stats_addr) bus_dmamap_unload(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_smap); if (sc->rl_ldata.rl_stats) bus_dmamem_free(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_stats, sc->rl_ldata.rl_smap); bus_dma_tag_destroy(sc->rl_ldata.rl_stag); } if (sc->rl_parent_tag) bus_dma_tag_destroy(sc->rl_parent_tag); mtx_destroy(&sc->rl_mtx); return (0); } static __inline void re_discard_rxbuf(struct rl_softc *sc, int idx) { struct rl_desc *desc; struct rl_rxdesc *rxd; uint32_t cmdstat; if (sc->rl_ifp->if_mtu > RL_MTU && (sc->rl_flags & RL_FLAG_JUMBOV2) != 0) rxd = &sc->rl_ldata.rl_jrx_desc[idx]; else rxd = &sc->rl_ldata.rl_rx_desc[idx]; desc = &sc->rl_ldata.rl_rx_list[idx]; desc->rl_vlanctl = 0; cmdstat = rxd->rx_size; if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1) cmdstat |= RL_RDESC_CMD_EOR; desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN); } static int re_newbuf(struct rl_softc *sc, int idx) { struct mbuf *m; struct rl_rxdesc *rxd; bus_dma_segment_t segs[1]; bus_dmamap_t map; struct rl_desc *desc; uint32_t cmdstat; int error, nsegs; m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = MCLBYTES; #ifdef RE_FIXUP_RX /* * This is part of an evil trick to deal with non-x86 platforms. * The RealTek chip requires RX buffers to be aligned on 64-bit * boundaries, but that will hose non-x86 machines. To get around * this, we leave some empty space at the start of each buffer * and for non-x86 hosts, we copy the buffer back six bytes * to achieve word alignment. This is slightly more efficient * than allocating a new buffer, copying the contents, and * discarding the old buffer. */ m_adj(m, RE_ETHER_ALIGN); #endif error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_rx_mtag, sc->rl_ldata.rl_rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { m_freem(m); return (ENOBUFS); } KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs)); rxd = &sc->rl_ldata.rl_rx_desc[idx]; if (rxd->rx_m != NULL) { bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap); } rxd->rx_m = m; map = rxd->rx_dmamap; rxd->rx_dmamap = sc->rl_ldata.rl_rx_sparemap; rxd->rx_size = segs[0].ds_len; sc->rl_ldata.rl_rx_sparemap = map; bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap, BUS_DMASYNC_PREREAD); desc = &sc->rl_ldata.rl_rx_list[idx]; desc->rl_vlanctl = 0; desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr)); desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr)); cmdstat = segs[0].ds_len; if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1) cmdstat |= RL_RDESC_CMD_EOR; desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN); return (0); } static int re_jumbo_newbuf(struct rl_softc *sc, int idx) { struct mbuf *m; struct rl_rxdesc *rxd; bus_dma_segment_t segs[1]; bus_dmamap_t map; struct rl_desc *desc; uint32_t cmdstat; int error, nsegs; m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = MJUM9BYTES; #ifdef RE_FIXUP_RX m_adj(m, RE_ETHER_ALIGN); #endif error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_jrx_mtag, sc->rl_ldata.rl_jrx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { m_freem(m); return (ENOBUFS); } KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs)); rxd = &sc->rl_ldata.rl_jrx_desc[idx]; if (rxd->rx_m != NULL) { bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap); } rxd->rx_m = m; map = rxd->rx_dmamap; rxd->rx_dmamap = sc->rl_ldata.rl_jrx_sparemap; rxd->rx_size = segs[0].ds_len; sc->rl_ldata.rl_jrx_sparemap = map; bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap, BUS_DMASYNC_PREREAD); desc = &sc->rl_ldata.rl_rx_list[idx]; desc->rl_vlanctl = 0; desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr)); desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr)); cmdstat = segs[0].ds_len; if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1) cmdstat |= RL_RDESC_CMD_EOR; desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN); return (0); } #ifdef RE_FIXUP_RX static __inline void re_fixup_rx(struct mbuf *m) { int i; uint16_t *src, *dst; src = mtod(m, uint16_t *); dst = src - (RE_ETHER_ALIGN - ETHER_ALIGN) / sizeof *src; for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) *dst++ = *src++; m->m_data -= RE_ETHER_ALIGN - ETHER_ALIGN; } #endif static int re_tx_list_init(struct rl_softc *sc) { struct rl_desc *desc; int i; RL_LOCK_ASSERT(sc); bzero(sc->rl_ldata.rl_tx_list, sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc)); for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) sc->rl_ldata.rl_tx_desc[i].tx_m = NULL; #ifdef DEV_NETMAP re_netmap_tx_init(sc); #endif /* DEV_NETMAP */ /* Set EOR. */ desc = &sc->rl_ldata.rl_tx_list[sc->rl_ldata.rl_tx_desc_cnt - 1]; desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOR); bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); sc->rl_ldata.rl_tx_prodidx = 0; sc->rl_ldata.rl_tx_considx = 0; sc->rl_ldata.rl_tx_free = sc->rl_ldata.rl_tx_desc_cnt; return (0); } static int re_rx_list_init(struct rl_softc *sc) { int error, i; bzero(sc->rl_ldata.rl_rx_list, sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)); for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { sc->rl_ldata.rl_rx_desc[i].rx_m = NULL; if ((error = re_newbuf(sc, i)) != 0) return (error); } #ifdef DEV_NETMAP re_netmap_rx_init(sc); #endif /* DEV_NETMAP */ /* Flush the RX descriptors */ bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); sc->rl_ldata.rl_rx_prodidx = 0; sc->rl_head = sc->rl_tail = NULL; sc->rl_int_rx_act = 0; return (0); } static int re_jrx_list_init(struct rl_softc *sc) { int error, i; bzero(sc->rl_ldata.rl_rx_list, sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)); for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { sc->rl_ldata.rl_jrx_desc[i].rx_m = NULL; if ((error = re_jumbo_newbuf(sc, i)) != 0) return (error); } bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); sc->rl_ldata.rl_rx_prodidx = 0; sc->rl_head = sc->rl_tail = NULL; sc->rl_int_rx_act = 0; return (0); } /* * RX handler for C+ and 8169. For the gigE chips, we support * the reception of jumbo frames that have been fragmented * across multiple 2K mbuf cluster buffers. */ static int re_rxeof(struct rl_softc *sc, int *rx_npktsp) { struct mbuf *m; struct ifnet *ifp; int i, rxerr, total_len; struct rl_desc *cur_rx; u_int32_t rxstat, rxvlan; int jumbo, maxpkt = 16, rx_npkts = 0; RL_LOCK_ASSERT(sc); ifp = sc->rl_ifp; #ifdef DEV_NETMAP if (netmap_rx_irq(ifp, 0, &rx_npkts)) return 0; #endif /* DEV_NETMAP */ if (ifp->if_mtu > RL_MTU && (sc->rl_flags & RL_FLAG_JUMBOV2) != 0) jumbo = 1; else jumbo = 0; /* Invalidate the descriptor memory */ bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); for (i = sc->rl_ldata.rl_rx_prodidx; maxpkt > 0; i = RL_RX_DESC_NXT(sc, i)) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) break; cur_rx = &sc->rl_ldata.rl_rx_list[i]; rxstat = le32toh(cur_rx->rl_cmdstat); if ((rxstat & RL_RDESC_STAT_OWN) != 0) break; total_len = rxstat & sc->rl_rxlenmask; rxvlan = le32toh(cur_rx->rl_vlanctl); if (jumbo != 0) m = sc->rl_ldata.rl_jrx_desc[i].rx_m; else m = sc->rl_ldata.rl_rx_desc[i].rx_m; if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 && (rxstat & (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) != (RL_RDESC_STAT_SOF | RL_RDESC_STAT_EOF)) { /* * RTL8168C or later controllers do not * support multi-fragment packet. */ re_discard_rxbuf(sc, i); continue; } else if ((rxstat & RL_RDESC_STAT_EOF) == 0) { if (re_newbuf(sc, i) != 0) { /* * If this is part of a multi-fragment packet, * discard all the pieces. */ if (sc->rl_head != NULL) { m_freem(sc->rl_head); sc->rl_head = sc->rl_tail = NULL; } re_discard_rxbuf(sc, i); continue; } m->m_len = RE_RX_DESC_BUFLEN; if (sc->rl_head == NULL) sc->rl_head = sc->rl_tail = m; else { m->m_flags &= ~M_PKTHDR; sc->rl_tail->m_next = m; sc->rl_tail = m; } continue; } /* * NOTE: for the 8139C+, the frame length field * is always 12 bits in size, but for the gigE chips, * it is 13 bits (since the max RX frame length is 16K). * Unfortunately, all 32 bits in the status word * were already used, so to make room for the extra * length bit, RealTek took out the 'frame alignment * error' bit and shifted the other status bits * over one slot. The OWN, EOR, FS and LS bits are * still in the same places. We have already extracted * the frame length and checked the OWN bit, so rather * than using an alternate bit mapping, we shift the * status bits one space to the right so we can evaluate * them using the 8169 status as though it was in the * same format as that of the 8139C+. */ if (sc->rl_type == RL_8169) rxstat >>= 1; /* * if total_len > 2^13-1, both _RXERRSUM and _GIANT will be * set, but if CRC is clear, it will still be a valid frame. */ if ((rxstat & RL_RDESC_STAT_RXERRSUM) != 0) { rxerr = 1; if ((sc->rl_flags & RL_FLAG_JUMBOV2) == 0 && total_len > 8191 && (rxstat & RL_RDESC_STAT_ERRS) == RL_RDESC_STAT_GIANT) rxerr = 0; if (rxerr != 0) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); /* * If this is part of a multi-fragment packet, * discard all the pieces. */ if (sc->rl_head != NULL) { m_freem(sc->rl_head); sc->rl_head = sc->rl_tail = NULL; } re_discard_rxbuf(sc, i); continue; } } /* * If allocating a replacement mbuf fails, * reload the current one. */ if (jumbo != 0) rxerr = re_jumbo_newbuf(sc, i); else rxerr = re_newbuf(sc, i); if (rxerr != 0) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); if (sc->rl_head != NULL) { m_freem(sc->rl_head); sc->rl_head = sc->rl_tail = NULL; } re_discard_rxbuf(sc, i); continue; } if (sc->rl_head != NULL) { if (jumbo != 0) m->m_len = total_len; else { m->m_len = total_len % RE_RX_DESC_BUFLEN; if (m->m_len == 0) m->m_len = RE_RX_DESC_BUFLEN; } /* * Special case: if there's 4 bytes or less * in this buffer, the mbuf can be discarded: * the last 4 bytes is the CRC, which we don't * care about anyway. */ if (m->m_len <= ETHER_CRC_LEN) { sc->rl_tail->m_len -= (ETHER_CRC_LEN - m->m_len); m_freem(m); } else { m->m_len -= ETHER_CRC_LEN; m->m_flags &= ~M_PKTHDR; sc->rl_tail->m_next = m; } m = sc->rl_head; sc->rl_head = sc->rl_tail = NULL; m->m_pkthdr.len = total_len - ETHER_CRC_LEN; } else m->m_pkthdr.len = m->m_len = (total_len - ETHER_CRC_LEN); #ifdef RE_FIXUP_RX re_fixup_rx(m); #endif if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); m->m_pkthdr.rcvif = ifp; /* Do RX checksumming if enabled */ if (ifp->if_capenable & IFCAP_RXCSUM) { if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) { /* Check IP header checksum */ if (rxstat & RL_RDESC_STAT_PROTOID) m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if (!(rxstat & RL_RDESC_STAT_IPSUMBAD)) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; /* Check TCP/UDP checksum */ if ((RL_TCPPKT(rxstat) && !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || (RL_UDPPKT(rxstat) && !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID|CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } else { /* * RTL8168C/RTL816CP/RTL8111C/RTL8111CP */ if ((rxstat & RL_RDESC_STAT_PROTOID) && (rxvlan & RL_RDESC_IPV4)) m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if (!(rxstat & RL_RDESC_STAT_IPSUMBAD) && (rxvlan & RL_RDESC_IPV4)) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; if (((rxstat & RL_RDESC_STAT_TCP) && !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || ((rxstat & RL_RDESC_STAT_UDP) && !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID|CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } } maxpkt--; if (rxvlan & RL_RDESC_VLANCTL_TAG) { m->m_pkthdr.ether_vtag = bswap16((rxvlan & RL_RDESC_VLANCTL_DATA)); m->m_flags |= M_VLANTAG; } RL_UNLOCK(sc); (*ifp->if_input)(ifp, m); RL_LOCK(sc); rx_npkts++; } /* Flush the RX DMA ring */ bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); sc->rl_ldata.rl_rx_prodidx = i; if (rx_npktsp != NULL) *rx_npktsp = rx_npkts; if (maxpkt) return (EAGAIN); return (0); } static void re_txeof(struct rl_softc *sc) { struct ifnet *ifp; struct rl_txdesc *txd; u_int32_t txstat; int cons; cons = sc->rl_ldata.rl_tx_considx; if (cons == sc->rl_ldata.rl_tx_prodidx) return; ifp = sc->rl_ifp; #ifdef DEV_NETMAP if (netmap_tx_irq(ifp, 0)) return; #endif /* DEV_NETMAP */ /* Invalidate the TX descriptor list */ bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); for (; cons != sc->rl_ldata.rl_tx_prodidx; cons = RL_TX_DESC_NXT(sc, cons)) { txstat = le32toh(sc->rl_ldata.rl_tx_list[cons].rl_cmdstat); if (txstat & RL_TDESC_STAT_OWN) break; /* * We only stash mbufs in the last descriptor * in a fragment chain, which also happens to * be the only place where the TX status bits * are valid. */ if (txstat & RL_TDESC_CMD_EOF) { txd = &sc->rl_ldata.rl_tx_desc[cons]; bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap); KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbufs!", __func__)); m_freem(txd->tx_m); txd->tx_m = NULL; if (txstat & (RL_TDESC_STAT_EXCESSCOL| RL_TDESC_STAT_COLCNT)) if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); if (txstat & RL_TDESC_STAT_TXERRSUM) if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); else if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); } sc->rl_ldata.rl_tx_free++; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } sc->rl_ldata.rl_tx_considx = cons; /* No changes made to the TX ring, so no flush needed */ if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt) { #ifdef RE_TX_MODERATION /* * If not all descriptors have been reaped yet, reload * the timer so that we will eventually get another * interrupt that will cause us to re-enter this routine. * This is done in case the transmitter has gone idle. */ CSR_WRITE_4(sc, RL_TIMERCNT, 1); #endif } else sc->rl_watchdog_timer = 0; } static void re_tick(void *xsc) { struct rl_softc *sc; struct mii_data *mii; sc = xsc; RL_LOCK_ASSERT(sc); mii = device_get_softc(sc->rl_miibus); mii_tick(mii); if ((sc->rl_flags & RL_FLAG_LINK) == 0) re_miibus_statchg(sc->rl_dev); /* * Reclaim transmitted frames here. Technically it is not * necessary to do here but it ensures periodic reclamation * regardless of Tx completion interrupt which seems to be * lost on PCIe based controllers under certain situations. */ re_txeof(sc); re_watchdog(sc); callout_reset(&sc->rl_stat_callout, hz, re_tick, sc); } #ifdef DEVICE_POLLING static int re_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct rl_softc *sc = ifp->if_softc; int rx_npkts = 0; RL_LOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) rx_npkts = re_poll_locked(ifp, cmd, count); RL_UNLOCK(sc); return (rx_npkts); } static int re_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct rl_softc *sc = ifp->if_softc; int rx_npkts; RL_LOCK_ASSERT(sc); sc->rxcycles = count; re_rxeof(sc, &rx_npkts); re_txeof(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) re_start_locked(ifp); if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ u_int16_t status; status = CSR_READ_2(sc, RL_ISR); if (status == 0xffff) return (rx_npkts); if (status) CSR_WRITE_2(sc, RL_ISR, status); if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) && (sc->rl_flags & RL_FLAG_PCIE)) CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); /* * XXX check behaviour on receiver stalls. */ if (status & RL_ISR_SYSTEM_ERR) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; re_init_locked(sc); } } return (rx_npkts); } #endif /* DEVICE_POLLING */ static int re_intr(void *arg) { struct rl_softc *sc; uint16_t status; sc = arg; status = CSR_READ_2(sc, RL_ISR); if (status == 0xFFFF || (status & RL_INTRS_CPLUS) == 0) return (FILTER_STRAY); CSR_WRITE_2(sc, RL_IMR, 0); taskqueue_enqueue(taskqueue_fast, &sc->rl_inttask); return (FILTER_HANDLED); } static void re_int_task(void *arg, int npending) { - struct epoch_tracker et; struct rl_softc *sc; struct ifnet *ifp; u_int16_t status; int rval = 0; sc = arg; ifp = sc->rl_ifp; RL_LOCK(sc); status = CSR_READ_2(sc, RL_ISR); CSR_WRITE_2(sc, RL_ISR, status); if (sc->suspended || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { RL_UNLOCK(sc); return; } #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { RL_UNLOCK(sc); return; } #endif - if (status & (RL_ISR_RX_OK|RL_ISR_RX_ERR|RL_ISR_FIFO_OFLOW)) { - NET_EPOCH_ENTER(et); + if (status & (RL_ISR_RX_OK|RL_ISR_RX_ERR|RL_ISR_FIFO_OFLOW)) rval = re_rxeof(sc, NULL); - NET_EPOCH_EXIT(et); - } /* * Some chips will ignore a second TX request issued * while an existing transmission is in progress. If * the transmitter goes idle but there are still * packets waiting to be sent, we need to restart the * channel here to flush them out. This only seems to * be required with the PCIe devices. */ if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) && (sc->rl_flags & RL_FLAG_PCIE)) CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); if (status & ( #ifdef RE_TX_MODERATION RL_ISR_TIMEOUT_EXPIRED| #else RL_ISR_TX_OK| #endif RL_ISR_TX_ERR|RL_ISR_TX_DESC_UNAVAIL)) re_txeof(sc); if (status & RL_ISR_SYSTEM_ERR) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; re_init_locked(sc); } if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) re_start_locked(ifp); RL_UNLOCK(sc); if ((CSR_READ_2(sc, RL_ISR) & RL_INTRS_CPLUS) || rval) { taskqueue_enqueue(taskqueue_fast, &sc->rl_inttask); return; } CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); } static void re_intr_msi(void *xsc) { struct rl_softc *sc; struct ifnet *ifp; uint16_t intrs, status; sc = xsc; RL_LOCK(sc); ifp = sc->rl_ifp; #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { RL_UNLOCK(sc); return; } #endif /* Disable interrupts. */ CSR_WRITE_2(sc, RL_IMR, 0); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { RL_UNLOCK(sc); return; } intrs = RL_INTRS_CPLUS; status = CSR_READ_2(sc, RL_ISR); CSR_WRITE_2(sc, RL_ISR, status); if (sc->rl_int_rx_act > 0) { intrs &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN); status &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN); } if (status & (RL_ISR_TIMEOUT_EXPIRED | RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN)) { re_rxeof(sc, NULL); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { if (sc->rl_int_rx_mod != 0 && (status & (RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN)) != 0) { /* Rearm one-shot timer. */ CSR_WRITE_4(sc, RL_TIMERCNT, 1); intrs &= ~(RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN); sc->rl_int_rx_act = 1; } else { intrs |= RL_ISR_RX_OK | RL_ISR_RX_ERR | RL_ISR_FIFO_OFLOW | RL_ISR_RX_OVERRUN; sc->rl_int_rx_act = 0; } } } /* * Some chips will ignore a second TX request issued * while an existing transmission is in progress. If * the transmitter goes idle but there are still * packets waiting to be sent, we need to restart the * channel here to flush them out. This only seems to * be required with the PCIe devices. */ if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) && (sc->rl_flags & RL_FLAG_PCIE)) CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); if (status & (RL_ISR_TX_OK | RL_ISR_TX_ERR | RL_ISR_TX_DESC_UNAVAIL)) re_txeof(sc); if (status & RL_ISR_SYSTEM_ERR) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; re_init_locked(sc); } if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) re_start_locked(ifp); CSR_WRITE_2(sc, RL_IMR, intrs); } RL_UNLOCK(sc); } static int re_encap(struct rl_softc *sc, struct mbuf **m_head) { struct rl_txdesc *txd, *txd_last; bus_dma_segment_t segs[RL_NTXSEGS]; bus_dmamap_t map; struct mbuf *m_new; struct rl_desc *desc; int nsegs, prod; int i, error, ei, si; int padlen; uint32_t cmdstat, csum_flags, vlanctl; RL_LOCK_ASSERT(sc); M_ASSERTPKTHDR((*m_head)); /* * With some of the RealTek chips, using the checksum offload * support in conjunction with the autopadding feature results * in the transmission of corrupt frames. For example, if we * need to send a really small IP fragment that's less than 60 * bytes in size, and IP header checksumming is enabled, the * resulting ethernet frame that appears on the wire will * have garbled payload. To work around this, if TX IP checksum * offload is enabled, we always manually pad short frames out * to the minimum ethernet frame size. */ if ((sc->rl_flags & RL_FLAG_AUTOPAD) == 0 && (*m_head)->m_pkthdr.len < RL_IP4CSUMTX_PADLEN && ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) != 0) { padlen = RL_MIN_FRAMELEN - (*m_head)->m_pkthdr.len; if (M_WRITABLE(*m_head) == 0) { /* Get a writable copy. */ m_new = m_dup(*m_head, M_NOWAIT); m_freem(*m_head); if (m_new == NULL) { *m_head = NULL; return (ENOBUFS); } *m_head = m_new; } if ((*m_head)->m_next != NULL || M_TRAILINGSPACE(*m_head) < padlen) { m_new = m_defrag(*m_head, M_NOWAIT); if (m_new == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } } else m_new = *m_head; /* * Manually pad short frames, and zero the pad space * to avoid leaking data. */ bzero(mtod(m_new, char *) + m_new->m_pkthdr.len, padlen); m_new->m_pkthdr.len += padlen; m_new->m_len = m_new->m_pkthdr.len; *m_head = m_new; } prod = sc->rl_ldata.rl_tx_prodidx; txd = &sc->rl_ldata.rl_tx_desc[prod]; error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap, *m_head, segs, &nsegs, BUS_DMA_NOWAIT); if (error == EFBIG) { m_new = m_collapse(*m_head, M_NOWAIT, RL_NTXSEGS); if (m_new == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } *m_head = m_new; error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap, *m_head, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { m_freem(*m_head); *m_head = NULL; return (error); } } else if (error != 0) return (error); if (nsegs == 0) { m_freem(*m_head); *m_head = NULL; return (EIO); } /* Check for number of available descriptors. */ if (sc->rl_ldata.rl_tx_free - nsegs <= 1) { bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap); return (ENOBUFS); } bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap, BUS_DMASYNC_PREWRITE); /* * Set up checksum offload. Note: checksum offload bits must * appear in all descriptors of a multi-descriptor transmit * attempt. This is according to testing done with an 8169 * chip. This is a requirement. */ vlanctl = 0; csum_flags = 0; if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) { if ((sc->rl_flags & RL_FLAG_DESCV2) != 0) { csum_flags |= RL_TDESC_CMD_LGSEND; vlanctl |= ((uint32_t)(*m_head)->m_pkthdr.tso_segsz << RL_TDESC_CMD_MSSVALV2_SHIFT); } else { csum_flags |= RL_TDESC_CMD_LGSEND | ((uint32_t)(*m_head)->m_pkthdr.tso_segsz << RL_TDESC_CMD_MSSVAL_SHIFT); } } else { /* * Unconditionally enable IP checksum if TCP or UDP * checksum is required. Otherwise, TCP/UDP checksum * doesn't make effects. */ if (((*m_head)->m_pkthdr.csum_flags & RE_CSUM_FEATURES) != 0) { if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) { csum_flags |= RL_TDESC_CMD_IPCSUM; if (((*m_head)->m_pkthdr.csum_flags & CSUM_TCP) != 0) csum_flags |= RL_TDESC_CMD_TCPCSUM; if (((*m_head)->m_pkthdr.csum_flags & CSUM_UDP) != 0) csum_flags |= RL_TDESC_CMD_UDPCSUM; } else { vlanctl |= RL_TDESC_CMD_IPCSUMV2; if (((*m_head)->m_pkthdr.csum_flags & CSUM_TCP) != 0) vlanctl |= RL_TDESC_CMD_TCPCSUMV2; if (((*m_head)->m_pkthdr.csum_flags & CSUM_UDP) != 0) vlanctl |= RL_TDESC_CMD_UDPCSUMV2; } } } /* * Set up hardware VLAN tagging. Note: vlan tag info must * appear in all descriptors of a multi-descriptor * transmission attempt. */ if ((*m_head)->m_flags & M_VLANTAG) vlanctl |= bswap16((*m_head)->m_pkthdr.ether_vtag) | RL_TDESC_VLANCTL_TAG; si = prod; for (i = 0; i < nsegs; i++, prod = RL_TX_DESC_NXT(sc, prod)) { desc = &sc->rl_ldata.rl_tx_list[prod]; desc->rl_vlanctl = htole32(vlanctl); desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[i].ds_addr)); desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[i].ds_addr)); cmdstat = segs[i].ds_len; if (i != 0) cmdstat |= RL_TDESC_CMD_OWN; if (prod == sc->rl_ldata.rl_tx_desc_cnt - 1) cmdstat |= RL_TDESC_CMD_EOR; desc->rl_cmdstat = htole32(cmdstat | csum_flags); sc->rl_ldata.rl_tx_free--; } /* Update producer index. */ sc->rl_ldata.rl_tx_prodidx = prod; /* Set EOF on the last descriptor. */ ei = RL_TX_DESC_PRV(sc, prod); desc = &sc->rl_ldata.rl_tx_list[ei]; desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF); desc = &sc->rl_ldata.rl_tx_list[si]; /* Set SOF and transfer ownership of packet to the chip. */ desc->rl_cmdstat |= htole32(RL_TDESC_CMD_OWN | RL_TDESC_CMD_SOF); /* * Insure that the map for this transmission * is placed at the array index of the last descriptor * in this chain. (Swap last and first dmamaps.) */ txd_last = &sc->rl_ldata.rl_tx_desc[ei]; map = txd->tx_dmamap; txd->tx_dmamap = txd_last->tx_dmamap; txd_last->tx_dmamap = map; txd_last->tx_m = *m_head; return (0); } static void re_start(struct ifnet *ifp) { struct rl_softc *sc; sc = ifp->if_softc; RL_LOCK(sc); re_start_locked(ifp); RL_UNLOCK(sc); } /* * Main transmit routine for C+ and gigE NICs. */ static void re_start_locked(struct ifnet *ifp) { struct rl_softc *sc; struct mbuf *m_head; int queued; sc = ifp->if_softc; #ifdef DEV_NETMAP /* XXX is this necessary ? */ if (ifp->if_capenable & IFCAP_NETMAP) { struct netmap_kring *kring = NA(ifp)->tx_rings[0]; if (sc->rl_ldata.rl_tx_prodidx != kring->nr_hwcur) { /* kick the tx unit */ CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); #ifdef RE_TX_MODERATION CSR_WRITE_4(sc, RL_TIMERCNT, 1); #endif sc->rl_watchdog_timer = 5; } return; } #endif /* DEV_NETMAP */ if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0) return; for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->rl_ldata.rl_tx_free > 1;) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; if (re_encap(sc, &m_head) != 0) { if (m_head == NULL) break; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ ETHER_BPF_MTAP(ifp, m_head); queued++; } if (queued == 0) { #ifdef RE_TX_MODERATION if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt) CSR_WRITE_4(sc, RL_TIMERCNT, 1); #endif return; } re_start_tx(sc); } static void re_start_tx(struct rl_softc *sc) { /* Flush the TX descriptors */ bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list_map, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); #ifdef RE_TX_MODERATION /* * Use the countdown timer for interrupt moderation. * 'TX done' interrupts are disabled. Instead, we reset the * countdown timer, which will begin counting until it hits * the value in the TIMERINT register, and then trigger an * interrupt. Each time we write to the TIMERCNT register, * the timer count is reset to 0. */ CSR_WRITE_4(sc, RL_TIMERCNT, 1); #endif /* * Set a timeout in case the chip goes out to lunch. */ sc->rl_watchdog_timer = 5; } static void re_set_jumbo(struct rl_softc *sc, int jumbo) { if (sc->rl_hwrev->rl_rev == RL_HWREV_8168E_VL) { pci_set_max_read_req(sc->rl_dev, 4096); return; } CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); if (jumbo != 0) { CSR_WRITE_1(sc, sc->rl_cfg3, CSR_READ_1(sc, sc->rl_cfg3) | RL_CFG3_JUMBO_EN0); switch (sc->rl_hwrev->rl_rev) { case RL_HWREV_8168DP: break; case RL_HWREV_8168E: CSR_WRITE_1(sc, sc->rl_cfg4, CSR_READ_1(sc, sc->rl_cfg4) | 0x01); break; default: CSR_WRITE_1(sc, sc->rl_cfg4, CSR_READ_1(sc, sc->rl_cfg4) | RL_CFG4_JUMBO_EN1); } } else { CSR_WRITE_1(sc, sc->rl_cfg3, CSR_READ_1(sc, sc->rl_cfg3) & ~RL_CFG3_JUMBO_EN0); switch (sc->rl_hwrev->rl_rev) { case RL_HWREV_8168DP: break; case RL_HWREV_8168E: CSR_WRITE_1(sc, sc->rl_cfg4, CSR_READ_1(sc, sc->rl_cfg4) & ~0x01); break; default: CSR_WRITE_1(sc, sc->rl_cfg4, CSR_READ_1(sc, sc->rl_cfg4) & ~RL_CFG4_JUMBO_EN1); } } CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); switch (sc->rl_hwrev->rl_rev) { case RL_HWREV_8168DP: pci_set_max_read_req(sc->rl_dev, 4096); break; default: if (jumbo != 0) pci_set_max_read_req(sc->rl_dev, 512); else pci_set_max_read_req(sc->rl_dev, 4096); } } static void re_init(void *xsc) { struct rl_softc *sc = xsc; RL_LOCK(sc); re_init_locked(sc); RL_UNLOCK(sc); } static void re_init_locked(struct rl_softc *sc) { struct ifnet *ifp = sc->rl_ifp; struct mii_data *mii; uint32_t reg; uint16_t cfg; union { uint32_t align_dummy; u_char eaddr[ETHER_ADDR_LEN]; } eaddr; RL_LOCK_ASSERT(sc); mii = device_get_softc(sc->rl_miibus); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) return; /* * Cancel pending I/O and free all RX/TX buffers. */ re_stop(sc); /* Put controller into known state. */ re_reset(sc); /* * For C+ mode, initialize the RX descriptors and mbufs. */ if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { if (ifp->if_mtu > RL_MTU) { if (re_jrx_list_init(sc) != 0) { device_printf(sc->rl_dev, "no memory for jumbo RX buffers\n"); re_stop(sc); return; } /* Disable checksum offloading for jumbo frames. */ ifp->if_capenable &= ~(IFCAP_HWCSUM | IFCAP_TSO4); ifp->if_hwassist &= ~(RE_CSUM_FEATURES | CSUM_TSO); } else { if (re_rx_list_init(sc) != 0) { device_printf(sc->rl_dev, "no memory for RX buffers\n"); re_stop(sc); return; } } re_set_jumbo(sc, ifp->if_mtu > RL_MTU); } else { if (re_rx_list_init(sc) != 0) { device_printf(sc->rl_dev, "no memory for RX buffers\n"); re_stop(sc); return; } if ((sc->rl_flags & RL_FLAG_PCIE) != 0 && pci_get_device(sc->rl_dev) != RT_DEVICEID_8101E) { if (ifp->if_mtu > RL_MTU) pci_set_max_read_req(sc->rl_dev, 512); else pci_set_max_read_req(sc->rl_dev, 4096); } } re_tx_list_init(sc); /* * Enable C+ RX and TX mode, as well as VLAN stripping and * RX checksum offload. We must configure the C+ register * before all others. */ cfg = RL_CPLUSCMD_PCI_MRW; if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) cfg |= RL_CPLUSCMD_RXCSUM_ENB; if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) cfg |= RL_CPLUSCMD_VLANSTRIP; if ((sc->rl_flags & RL_FLAG_MACSTAT) != 0) { cfg |= RL_CPLUSCMD_MACSTAT_DIS; /* XXX magic. */ cfg |= 0x0001; } else cfg |= RL_CPLUSCMD_RXENB | RL_CPLUSCMD_TXENB; CSR_WRITE_2(sc, RL_CPLUS_CMD, cfg); if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SC || sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE) { reg = 0x000fff00; if ((CSR_READ_1(sc, sc->rl_cfg2) & RL_CFG2_PCI66MHZ) != 0) reg |= 0x000000ff; if (sc->rl_hwrev->rl_rev == RL_HWREV_8169_8110SCE) reg |= 0x00f00000; CSR_WRITE_4(sc, 0x7c, reg); /* Disable interrupt mitigation. */ CSR_WRITE_2(sc, 0xe2, 0); } /* * Disable TSO if interface MTU size is greater than MSS * allowed in controller. */ if (ifp->if_mtu > RL_TSO_MTU && (ifp->if_capenable & IFCAP_TSO4) != 0) { ifp->if_capenable &= ~IFCAP_TSO4; ifp->if_hwassist &= ~CSUM_TSO; } /* * Init our MAC address. Even though the chipset * documentation doesn't mention it, we need to enter "Config * register write enable" mode to modify the ID registers. */ /* Copy MAC address on stack to align. */ bcopy(IF_LLADDR(ifp), eaddr.eaddr, ETHER_ADDR_LEN); CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); CSR_WRITE_4(sc, RL_IDR0, htole32(*(u_int32_t *)(&eaddr.eaddr[0]))); CSR_WRITE_4(sc, RL_IDR4, htole32(*(u_int32_t *)(&eaddr.eaddr[4]))); CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); /* * Load the addresses of the RX and TX lists into the chip. */ CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI, RL_ADDR_HI(sc->rl_ldata.rl_rx_list_addr)); CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO, RL_ADDR_LO(sc->rl_ldata.rl_rx_list_addr)); CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI, RL_ADDR_HI(sc->rl_ldata.rl_tx_list_addr)); CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO, RL_ADDR_LO(sc->rl_ldata.rl_tx_list_addr)); if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0) { /* Disable RXDV gate. */ CSR_WRITE_4(sc, RL_MISC, CSR_READ_4(sc, RL_MISC) & ~0x00080000); } /* * Enable transmit and receive for pre-RTL8168G controllers. * RX/TX MACs should be enabled before RX/TX configuration. */ if ((sc->rl_flags & RL_FLAG_8168G_PLUS) == 0) CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB | RL_CMD_RX_ENB); /* * Set the initial TX configuration. */ if (sc->rl_testmode) { if (sc->rl_type == RL_8169) CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG|RL_LOOPTEST_ON); else CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG|RL_LOOPTEST_ON_CPLUS); } else CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16); /* * Set the initial RX configuration. */ re_set_rxmode(sc); /* Configure interrupt moderation. */ if (sc->rl_type == RL_8169) { /* Magic from vendor. */ CSR_WRITE_2(sc, RL_INTRMOD, 0x5100); } /* * Enable transmit and receive for RTL8168G and later controllers. * RX/TX MACs should be enabled after RX/TX configuration. */ if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0) CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB | RL_CMD_RX_ENB); #ifdef DEVICE_POLLING /* * Disable interrupts if we are polling. */ if (ifp->if_capenable & IFCAP_POLLING) CSR_WRITE_2(sc, RL_IMR, 0); else /* otherwise ... */ #endif /* * Enable interrupts. */ if (sc->rl_testmode) CSR_WRITE_2(sc, RL_IMR, 0); else CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); CSR_WRITE_2(sc, RL_ISR, RL_INTRS_CPLUS); /* Set initial TX threshold */ sc->rl_txthresh = RL_TX_THRESH_INIT; /* Start RX/TX process. */ CSR_WRITE_4(sc, RL_MISSEDPKT, 0); /* * Initialize the timer interrupt register so that * a timer interrupt will be generated once the timer * reaches a certain number of ticks. The timer is * reloaded on each transmit. */ #ifdef RE_TX_MODERATION /* * Use timer interrupt register to moderate TX interrupt * moderation, which dramatically improves TX frame rate. */ if (sc->rl_type == RL_8169) CSR_WRITE_4(sc, RL_TIMERINT_8169, 0x800); else CSR_WRITE_4(sc, RL_TIMERINT, 0x400); #else /* * Use timer interrupt register to moderate RX interrupt * moderation. */ if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) != 0 && intr_filter == 0) { if (sc->rl_type == RL_8169) CSR_WRITE_4(sc, RL_TIMERINT_8169, RL_USECS(sc->rl_int_rx_mod)); } else { if (sc->rl_type == RL_8169) CSR_WRITE_4(sc, RL_TIMERINT_8169, RL_USECS(0)); } #endif /* * For 8169 gigE NICs, set the max allowed RX packet * size so we can receive jumbo frames. */ if (sc->rl_type == RL_8169) { if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { /* * For controllers that use new jumbo frame scheme, * set maximum size of jumbo frame depending on * controller revisions. */ if (ifp->if_mtu > RL_MTU) CSR_WRITE_2(sc, RL_MAXRXPKTLEN, sc->rl_hwrev->rl_max_mtu + ETHER_VLAN_ENCAP_LEN + ETHER_HDR_LEN + ETHER_CRC_LEN); else CSR_WRITE_2(sc, RL_MAXRXPKTLEN, RE_RX_DESC_BUFLEN); } else if ((sc->rl_flags & RL_FLAG_PCIE) != 0 && sc->rl_hwrev->rl_max_mtu == RL_MTU) { /* RTL810x has no jumbo frame support. */ CSR_WRITE_2(sc, RL_MAXRXPKTLEN, RE_RX_DESC_BUFLEN); } else CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383); } if (sc->rl_testmode) return; CSR_WRITE_1(sc, sc->rl_cfg1, CSR_READ_1(sc, sc->rl_cfg1) | RL_CFG1_DRVLOAD); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; sc->rl_flags &= ~RL_FLAG_LINK; mii_mediachg(mii); sc->rl_watchdog_timer = 0; callout_reset(&sc->rl_stat_callout, hz, re_tick, sc); } /* * Set media options. */ static int re_ifmedia_upd(struct ifnet *ifp) { struct rl_softc *sc; struct mii_data *mii; int error; sc = ifp->if_softc; mii = device_get_softc(sc->rl_miibus); RL_LOCK(sc); error = mii_mediachg(mii); RL_UNLOCK(sc); return (error); } /* * Report current media status. */ static void re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct rl_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->rl_miibus); RL_LOCK(sc); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; RL_UNLOCK(sc); } static int re_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct rl_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii; int error = 0; switch (command) { case SIOCSIFMTU: if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > sc->rl_hwrev->rl_max_mtu || ((sc->rl_flags & RL_FLAG_FASTETHER) != 0 && ifr->ifr_mtu > RL_MTU)) { error = EINVAL; break; } RL_LOCK(sc); if (ifp->if_mtu != ifr->ifr_mtu) { ifp->if_mtu = ifr->ifr_mtu; if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; re_init_locked(sc); } if (ifp->if_mtu > RL_TSO_MTU && (ifp->if_capenable & IFCAP_TSO4) != 0) { ifp->if_capenable &= ~(IFCAP_TSO4 | IFCAP_VLAN_HWTSO); ifp->if_hwassist &= ~CSUM_TSO; } VLAN_CAPABILITIES(ifp); } RL_UNLOCK(sc); break; case SIOCSIFFLAGS: RL_LOCK(sc); if ((ifp->if_flags & IFF_UP) != 0) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { if (((ifp->if_flags ^ sc->rl_if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) re_set_rxmode(sc); } else re_init_locked(sc); } else { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) re_stop(sc); } sc->rl_if_flags = ifp->if_flags; RL_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: RL_LOCK(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) re_set_rxmode(sc); RL_UNLOCK(sc); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->rl_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; case SIOCSIFCAP: { int mask, reinit; mask = ifr->ifr_reqcap ^ ifp->if_capenable; reinit = 0; #ifdef DEVICE_POLLING if (mask & IFCAP_POLLING) { if (ifr->ifr_reqcap & IFCAP_POLLING) { error = ether_poll_register(re_poll, ifp); if (error) return (error); RL_LOCK(sc); /* Disable interrupts */ CSR_WRITE_2(sc, RL_IMR, 0x0000); ifp->if_capenable |= IFCAP_POLLING; RL_UNLOCK(sc); } else { error = ether_poll_deregister(ifp); /* Enable interrupts. */ RL_LOCK(sc); CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); ifp->if_capenable &= ~IFCAP_POLLING; RL_UNLOCK(sc); } } #endif /* DEVICE_POLLING */ RL_LOCK(sc); if ((mask & IFCAP_TXCSUM) != 0 && (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { ifp->if_capenable ^= IFCAP_TXCSUM; if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) ifp->if_hwassist |= RE_CSUM_FEATURES; else ifp->if_hwassist &= ~RE_CSUM_FEATURES; reinit = 1; } if ((mask & IFCAP_RXCSUM) != 0 && (ifp->if_capabilities & IFCAP_RXCSUM) != 0) { ifp->if_capenable ^= IFCAP_RXCSUM; reinit = 1; } if ((mask & IFCAP_TSO4) != 0 && (ifp->if_capabilities & IFCAP_TSO4) != 0) { ifp->if_capenable ^= IFCAP_TSO4; if ((IFCAP_TSO4 & ifp->if_capenable) != 0) ifp->if_hwassist |= CSUM_TSO; else ifp->if_hwassist &= ~CSUM_TSO; if (ifp->if_mtu > RL_TSO_MTU && (ifp->if_capenable & IFCAP_TSO4) != 0) { ifp->if_capenable &= ~IFCAP_TSO4; ifp->if_hwassist &= ~CSUM_TSO; } } if ((mask & IFCAP_VLAN_HWTSO) != 0 && (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0) ifp->if_capenable ^= IFCAP_VLAN_HWTSO; if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; /* TSO over VLAN requires VLAN hardware tagging. */ if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) ifp->if_capenable &= ~IFCAP_VLAN_HWTSO; reinit = 1; } if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0 && (mask & (IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWTSO)) != 0) reinit = 1; if ((mask & IFCAP_WOL) != 0 && (ifp->if_capabilities & IFCAP_WOL) != 0) { if ((mask & IFCAP_WOL_UCAST) != 0) ifp->if_capenable ^= IFCAP_WOL_UCAST; if ((mask & IFCAP_WOL_MCAST) != 0) ifp->if_capenable ^= IFCAP_WOL_MCAST; if ((mask & IFCAP_WOL_MAGIC) != 0) ifp->if_capenable ^= IFCAP_WOL_MAGIC; } if (reinit && ifp->if_drv_flags & IFF_DRV_RUNNING) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; re_init_locked(sc); } RL_UNLOCK(sc); VLAN_CAPABILITIES(ifp); } break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } static void re_watchdog(struct rl_softc *sc) { struct ifnet *ifp; RL_LOCK_ASSERT(sc); if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer != 0) return; ifp = sc->rl_ifp; re_txeof(sc); if (sc->rl_ldata.rl_tx_free == sc->rl_ldata.rl_tx_desc_cnt) { if_printf(ifp, "watchdog timeout (missed Tx interrupts) " "-- recovering\n"); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) re_start_locked(ifp); return; } if_printf(ifp, "watchdog timeout\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); re_rxeof(sc, NULL); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; re_init_locked(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) re_start_locked(ifp); } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void re_stop(struct rl_softc *sc) { int i; struct ifnet *ifp; struct rl_txdesc *txd; struct rl_rxdesc *rxd; RL_LOCK_ASSERT(sc); ifp = sc->rl_ifp; sc->rl_watchdog_timer = 0; callout_stop(&sc->rl_stat_callout); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); /* * Disable accepting frames to put RX MAC into idle state. * Otherwise it's possible to get frames while stop command * execution is in progress and controller can DMA the frame * to already freed RX buffer during that period. */ CSR_WRITE_4(sc, RL_RXCFG, CSR_READ_4(sc, RL_RXCFG) & ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_INDIV | RL_RXCFG_RX_MULTI | RL_RXCFG_RX_BROAD)); if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0) { /* Enable RXDV gate. */ CSR_WRITE_4(sc, RL_MISC, CSR_READ_4(sc, RL_MISC) | 0x00080000); } if ((sc->rl_flags & RL_FLAG_WAIT_TXPOLL) != 0) { for (i = RL_TIMEOUT; i > 0; i--) { if ((CSR_READ_1(sc, sc->rl_txstart) & RL_TXSTART_START) == 0) break; DELAY(20); } if (i == 0) device_printf(sc->rl_dev, "stopping TX poll timed out!\n"); CSR_WRITE_1(sc, RL_COMMAND, 0x00); } else if ((sc->rl_flags & RL_FLAG_CMDSTOP) != 0) { CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_STOPREQ | RL_CMD_TX_ENB | RL_CMD_RX_ENB); if ((sc->rl_flags & RL_FLAG_CMDSTOP_WAIT_TXQ) != 0) { for (i = RL_TIMEOUT; i > 0; i--) { if ((CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_QUEUE_EMPTY) != 0) break; DELAY(100); } if (i == 0) device_printf(sc->rl_dev, "stopping TXQ timed out!\n"); } } else CSR_WRITE_1(sc, RL_COMMAND, 0x00); DELAY(1000); CSR_WRITE_2(sc, RL_IMR, 0x0000); CSR_WRITE_2(sc, RL_ISR, 0xFFFF); if (sc->rl_head != NULL) { m_freem(sc->rl_head); sc->rl_head = sc->rl_tail = NULL; } /* Free the TX list buffers. */ for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { txd = &sc->rl_ldata.rl_tx_desc[i]; if (txd->tx_m != NULL) { bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap); m_freem(txd->tx_m); txd->tx_m = NULL; } } /* Free the RX list buffers. */ for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { rxd = &sc->rl_ldata.rl_rx_desc[i]; if (rxd->rx_m != NULL) { bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap); m_freem(rxd->rx_m); rxd->rx_m = NULL; } } if ((sc->rl_flags & RL_FLAG_JUMBOV2) != 0) { for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { rxd = &sc->rl_ldata.rl_jrx_desc[i]; if (rxd->rx_m != NULL) { bus_dmamap_sync(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rl_ldata.rl_jrx_mtag, rxd->rx_dmamap); m_freem(rxd->rx_m); rxd->rx_m = NULL; } } } } /* * Device suspend routine. Stop the interface and save some PCI * settings in case the BIOS doesn't restore them properly on * resume. */ static int re_suspend(device_t dev) { struct rl_softc *sc; sc = device_get_softc(dev); RL_LOCK(sc); re_stop(sc); re_setwol(sc); sc->suspended = 1; RL_UNLOCK(sc); return (0); } /* * Device resume routine. Restore some PCI settings in case the BIOS * doesn't, re-enable busmastering, and restart the interface if * appropriate. */ static int re_resume(device_t dev) { struct rl_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); RL_LOCK(sc); ifp = sc->rl_ifp; /* Take controller out of sleep mode. */ if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) { if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80) CSR_WRITE_1(sc, RL_GPIO, CSR_READ_1(sc, RL_GPIO) | 0x01); } /* * Clear WOL matching such that normal Rx filtering * wouldn't interfere with WOL patterns. */ re_clrwol(sc); /* reinitialize interface if necessary */ if (ifp->if_flags & IFF_UP) re_init_locked(sc); sc->suspended = 0; RL_UNLOCK(sc); return (0); } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static int re_shutdown(device_t dev) { struct rl_softc *sc; sc = device_get_softc(dev); RL_LOCK(sc); re_stop(sc); /* * Mark interface as down since otherwise we will panic if * interrupt comes in later on, which can happen in some * cases. */ sc->rl_ifp->if_flags &= ~IFF_UP; re_setwol(sc); RL_UNLOCK(sc); return (0); } static void re_set_linkspeed(struct rl_softc *sc) { struct mii_softc *miisc; struct mii_data *mii; int aneg, i, phyno; RL_LOCK_ASSERT(sc); mii = device_get_softc(sc->rl_miibus); mii_pollstat(mii); aneg = 0; if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch IFM_SUBTYPE(mii->mii_media_active) { case IFM_10_T: case IFM_100_TX: return; case IFM_1000_T: aneg++; break; default: break; } } miisc = LIST_FIRST(&mii->mii_phys); phyno = miisc->mii_phy; LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); re_miibus_writereg(sc->rl_dev, phyno, MII_100T2CR, 0); re_miibus_writereg(sc->rl_dev, phyno, MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); re_miibus_writereg(sc->rl_dev, phyno, MII_BMCR, BMCR_AUTOEN | BMCR_STARTNEG); DELAY(1000); if (aneg != 0) { /* * Poll link state until re(4) get a 10/100Mbps link. */ for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { mii_pollstat(mii); if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: return; default: break; } } RL_UNLOCK(sc); pause("relnk", hz); RL_LOCK(sc); } if (i == MII_ANEGTICKS_GIGE) device_printf(sc->rl_dev, "establishing a link failed, WOL may not work!"); } /* * No link, force MAC to have 100Mbps, full-duplex link. * MAC does not require reprogramming on resolved speed/duplex, * so this is just for completeness. */ mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; } static void re_setwol(struct rl_softc *sc) { struct ifnet *ifp; int pmc; uint16_t pmstat; uint8_t v; RL_LOCK_ASSERT(sc); if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0) return; ifp = sc->rl_ifp; /* Put controller into sleep mode. */ if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) { if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80) CSR_WRITE_1(sc, RL_GPIO, CSR_READ_1(sc, RL_GPIO) & ~0x01); } if ((ifp->if_capenable & IFCAP_WOL) != 0) { if ((sc->rl_flags & RL_FLAG_8168G_PLUS) != 0) { /* Disable RXDV gate. */ CSR_WRITE_4(sc, RL_MISC, CSR_READ_4(sc, RL_MISC) & ~0x00080000); } re_set_rxmode(sc); if ((sc->rl_flags & RL_FLAG_WOL_MANLINK) != 0) re_set_linkspeed(sc); if ((sc->rl_flags & RL_FLAG_WOLRXENB) != 0) CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RX_ENB); } /* Enable config register write. */ CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); /* Enable PME. */ v = CSR_READ_1(sc, sc->rl_cfg1); v &= ~RL_CFG1_PME; if ((ifp->if_capenable & IFCAP_WOL) != 0) v |= RL_CFG1_PME; CSR_WRITE_1(sc, sc->rl_cfg1, v); v = CSR_READ_1(sc, sc->rl_cfg3); v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC); if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) v |= RL_CFG3_WOL_MAGIC; CSR_WRITE_1(sc, sc->rl_cfg3, v); v = CSR_READ_1(sc, sc->rl_cfg5); v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST | RL_CFG5_WOL_LANWAKE); if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0) v |= RL_CFG5_WOL_UCAST; if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) v |= RL_CFG5_WOL_MCAST | RL_CFG5_WOL_BCAST; if ((ifp->if_capenable & IFCAP_WOL) != 0) v |= RL_CFG5_WOL_LANWAKE; CSR_WRITE_1(sc, sc->rl_cfg5, v); /* Config register write done. */ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); if ((ifp->if_capenable & IFCAP_WOL) == 0 && (sc->rl_flags & RL_FLAG_PHYWAKE_PM) != 0) CSR_WRITE_1(sc, RL_PMCH, CSR_READ_1(sc, RL_PMCH) & ~0x80); /* * It seems that hardware resets its link speed to 100Mbps in * power down mode so switching to 100Mbps in driver is not * needed. */ /* Request PME if WOL is requested. */ pmstat = pci_read_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, 2); pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); if ((ifp->if_capenable & IFCAP_WOL) != 0) pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; pci_write_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); } static void re_clrwol(struct rl_softc *sc) { int pmc; uint8_t v; RL_LOCK_ASSERT(sc); if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0) return; /* Enable config register write. */ CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); v = CSR_READ_1(sc, sc->rl_cfg3); v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC); CSR_WRITE_1(sc, sc->rl_cfg3, v); /* Config register write done. */ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); v = CSR_READ_1(sc, sc->rl_cfg5); v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST); v &= ~RL_CFG5_WOL_LANWAKE; CSR_WRITE_1(sc, sc->rl_cfg5, v); } static void re_add_sysctls(struct rl_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; int error; ctx = device_get_sysctl_ctx(sc->rl_dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->rl_dev)); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0, re_sysctl_stats, "I", "Statistics Information"); if ((sc->rl_flags & (RL_FLAG_MSI | RL_FLAG_MSIX)) == 0) return; SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "int_rx_mod", CTLTYPE_INT | CTLFLAG_RW, &sc->rl_int_rx_mod, 0, sysctl_hw_re_int_mod, "I", "re RX interrupt moderation"); /* Pull in device tunables. */ sc->rl_int_rx_mod = RL_TIMER_DEFAULT; error = resource_int_value(device_get_name(sc->rl_dev), device_get_unit(sc->rl_dev), "int_rx_mod", &sc->rl_int_rx_mod); if (error == 0) { if (sc->rl_int_rx_mod < RL_TIMER_MIN || sc->rl_int_rx_mod > RL_TIMER_MAX) { device_printf(sc->rl_dev, "int_rx_mod value out of " "range; using default: %d\n", RL_TIMER_DEFAULT); sc->rl_int_rx_mod = RL_TIMER_DEFAULT; } } } static int re_sysctl_stats(SYSCTL_HANDLER_ARGS) { struct rl_softc *sc; struct rl_stats *stats; int error, i, result; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || req->newptr == NULL) return (error); if (result == 1) { sc = (struct rl_softc *)arg1; RL_LOCK(sc); if ((sc->rl_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { RL_UNLOCK(sc); goto done; } bus_dmamap_sync(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_smap, BUS_DMASYNC_PREREAD); CSR_WRITE_4(sc, RL_DUMPSTATS_HI, RL_ADDR_HI(sc->rl_ldata.rl_stats_addr)); CSR_WRITE_4(sc, RL_DUMPSTATS_LO, RL_ADDR_LO(sc->rl_ldata.rl_stats_addr)); CSR_WRITE_4(sc, RL_DUMPSTATS_LO, RL_ADDR_LO(sc->rl_ldata.rl_stats_addr | RL_DUMPSTATS_START)); for (i = RL_TIMEOUT; i > 0; i--) { if ((CSR_READ_4(sc, RL_DUMPSTATS_LO) & RL_DUMPSTATS_START) == 0) break; DELAY(1000); } bus_dmamap_sync(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_smap, BUS_DMASYNC_POSTREAD); RL_UNLOCK(sc); if (i == 0) { device_printf(sc->rl_dev, "DUMP statistics request timed out\n"); return (ETIMEDOUT); } done: stats = sc->rl_ldata.rl_stats; printf("%s statistics:\n", device_get_nameunit(sc->rl_dev)); printf("Tx frames : %ju\n", (uintmax_t)le64toh(stats->rl_tx_pkts)); printf("Rx frames : %ju\n", (uintmax_t)le64toh(stats->rl_rx_pkts)); printf("Tx errors : %ju\n", (uintmax_t)le64toh(stats->rl_tx_errs)); printf("Rx errors : %u\n", le32toh(stats->rl_rx_errs)); printf("Rx missed frames : %u\n", (uint32_t)le16toh(stats->rl_missed_pkts)); printf("Rx frame alignment errs : %u\n", (uint32_t)le16toh(stats->rl_rx_framealign_errs)); printf("Tx single collisions : %u\n", le32toh(stats->rl_tx_onecoll)); printf("Tx multiple collisions : %u\n", le32toh(stats->rl_tx_multicolls)); printf("Rx unicast frames : %ju\n", (uintmax_t)le64toh(stats->rl_rx_ucasts)); printf("Rx broadcast frames : %ju\n", (uintmax_t)le64toh(stats->rl_rx_bcasts)); printf("Rx multicast frames : %u\n", le32toh(stats->rl_rx_mcasts)); printf("Tx aborts : %u\n", (uint32_t)le16toh(stats->rl_tx_aborts)); printf("Tx underruns : %u\n", (uint32_t)le16toh(stats->rl_rx_underruns)); } return (error); } static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) { int error, value; if (arg1 == NULL) return (EINVAL); value = *(int *)arg1; error = sysctl_handle_int(oidp, &value, 0, req); if (error || req->newptr == NULL) return (error); if (value < low || value > high) return (EINVAL); *(int *)arg1 = value; return (0); } static int sysctl_hw_re_int_mod(SYSCTL_HANDLER_ARGS) { return (sysctl_int_range(oidp, arg1, arg2, req, RL_TIMER_MIN, RL_TIMER_MAX)); } #ifdef DEBUGNET static void re_debugnet_init(struct ifnet *ifp, int *nrxr, int *ncl, int *clsize) { struct rl_softc *sc; sc = if_getsoftc(ifp); RL_LOCK(sc); *nrxr = sc->rl_ldata.rl_rx_desc_cnt; *ncl = DEBUGNET_MAX_IN_FLIGHT; *clsize = (ifp->if_mtu > RL_MTU && (sc->rl_flags & RL_FLAG_JUMBOV2) != 0) ? MJUM9BYTES : MCLBYTES; RL_UNLOCK(sc); } static void re_debugnet_event(struct ifnet *ifp __unused, enum debugnet_ev event __unused) { } static int re_debugnet_transmit(struct ifnet *ifp, struct mbuf *m) { struct rl_softc *sc; int error; sc = if_getsoftc(ifp); if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0) return (EBUSY); error = re_encap(sc, &m); if (error == 0) re_start_tx(sc); return (error); } static int re_debugnet_poll(struct ifnet *ifp, int count) { struct rl_softc *sc; int error; sc = if_getsoftc(ifp); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 || (sc->rl_flags & RL_FLAG_LINK) == 0) return (EBUSY); re_txeof(sc); error = re_rxeof(sc, NULL); if (error != 0 && error != EAGAIN) return (error); return (0); } #endif /* DEBUGNET */ diff --git a/sys/dev/rt/if_rt.c b/sys/dev/rt/if_rt.c index e3305997ea88..cd47d051f36e 100644 --- a/sys/dev/rt/if_rt.c +++ b/sys/dev/rt/if_rt.c @@ -1,2950 +1,2950 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2015-2016, Stanislav Galabov * Copyright (c) 2014, Aleksandr A. Mityaev * Copyright (c) 2011, Aleksandr Rybalko * based on hard work * by Alexander Egorenkov * and by Damien Bergamini * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "if_rtvar.h" #include "if_rtreg.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "opt_platform.h" #include "opt_rt305x.h" #ifdef FDT #include #include #include #endif #include #include #ifdef RT_MDIO #include #include #include "mdio_if.h" #endif #if 0 #include #include #endif #ifdef IF_RT_PHY_SUPPORT #include "miibus_if.h" #endif /* * Defines and macros */ #define RT_MAX_AGG_SIZE 3840 #define RT_TX_DATA_SEG0_SIZE MJUMPAGESIZE #define RT_MS(_v, _f) (((_v) & _f) >> _f##_S) #define RT_SM(_v, _f) (((_v) << _f##_S) & _f) #define RT_TX_WATCHDOG_TIMEOUT 5 #define RT_CHIPID_RT2880 0x2880 #define RT_CHIPID_RT3050 0x3050 #define RT_CHIPID_RT5350 0x5350 #define RT_CHIPID_MT7620 0x7620 #define RT_CHIPID_MT7621 0x7621 #ifdef FDT /* more specific and new models should go first */ static const struct ofw_compat_data rt_compat_data[] = { { "ralink,rt2880-eth", RT_CHIPID_RT2880 }, { "ralink,rt3050-eth", RT_CHIPID_RT3050 }, { "ralink,rt3352-eth", RT_CHIPID_RT3050 }, { "ralink,rt3883-eth", RT_CHIPID_RT3050 }, { "ralink,rt5350-eth", RT_CHIPID_RT5350 }, { "ralink,mt7620a-eth", RT_CHIPID_MT7620 }, { "mediatek,mt7620-eth", RT_CHIPID_MT7620 }, { "ralink,mt7621-eth", RT_CHIPID_MT7621 }, { "mediatek,mt7621-eth", RT_CHIPID_MT7621 }, { NULL, 0 } }; #endif /* * Static function prototypes */ static int rt_probe(device_t dev); static int rt_attach(device_t dev); static int rt_detach(device_t dev); static int rt_shutdown(device_t dev); static int rt_suspend(device_t dev); static int rt_resume(device_t dev); static void rt_init_locked(void *priv); static void rt_init(void *priv); static void rt_stop_locked(void *priv); static void rt_stop(void *priv); static void rt_start(struct ifnet *ifp); static int rt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); static void rt_periodic(void *arg); static void rt_tx_watchdog(void *arg); static void rt_intr(void *arg); static void rt_rt5350_intr(void *arg); static void rt_tx_coherent_intr(struct rt_softc *sc); static void rt_rx_coherent_intr(struct rt_softc *sc); static void rt_rx_delay_intr(struct rt_softc *sc); static void rt_tx_delay_intr(struct rt_softc *sc); static void rt_rx_intr(struct rt_softc *sc, int qid); static void rt_tx_intr(struct rt_softc *sc, int qid); static void rt_rx_done_task(void *context, int pending); static void rt_tx_done_task(void *context, int pending); static void rt_periodic_task(void *context, int pending); static int rt_rx_eof(struct rt_softc *sc, struct rt_softc_rx_ring *ring, int limit); static void rt_tx_eof(struct rt_softc *sc, struct rt_softc_tx_ring *ring); static void rt_update_stats(struct rt_softc *sc); static void rt_watchdog(struct rt_softc *sc); static void rt_update_raw_counters(struct rt_softc *sc); static void rt_intr_enable(struct rt_softc *sc, uint32_t intr_mask); static void rt_intr_disable(struct rt_softc *sc, uint32_t intr_mask); static int rt_txrx_enable(struct rt_softc *sc); static int rt_alloc_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring, int qid); static void rt_reset_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring); static void rt_free_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring); static int rt_alloc_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring, int qid); static void rt_reset_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring); static void rt_free_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring); static void rt_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error); static void rt_sysctl_attach(struct rt_softc *sc); #ifdef IF_RT_PHY_SUPPORT void rt_miibus_statchg(device_t); #endif #if defined(IF_RT_PHY_SUPPORT) || defined(RT_MDIO) static int rt_miibus_readreg(device_t, int, int); static int rt_miibus_writereg(device_t, int, int, int); #endif static int rt_ifmedia_upd(struct ifnet *); static void rt_ifmedia_sts(struct ifnet *, struct ifmediareq *); static SYSCTL_NODE(_hw, OID_AUTO, rt, CTLFLAG_RD, 0, "RT driver parameters"); #ifdef IF_RT_DEBUG static int rt_debug = 0; SYSCTL_INT(_hw_rt, OID_AUTO, debug, CTLFLAG_RWTUN, &rt_debug, 0, "RT debug level"); #endif static int rt_probe(device_t dev) { struct rt_softc *sc = device_get_softc(dev); char buf[80]; #ifdef FDT const struct ofw_compat_data * cd; cd = ofw_bus_search_compatible(dev, rt_compat_data); if (cd->ocd_data == 0) return (ENXIO); sc->rt_chipid = (unsigned int)(cd->ocd_data); #else #if defined(MT7620) sc->rt_chipid = RT_CHIPID_MT7620; #elif defined(MT7621) sc->rt_chipid = RT_CHIPID_MT7621; #elif defined(RT5350) sc->rt_chipid = RT_CHIPID_RT5350; #else sc->rt_chipid = RT_CHIPID_RT3050; #endif #endif snprintf(buf, sizeof(buf), "Ralink %cT%x onChip Ethernet driver", sc->rt_chipid >= 0x7600 ? 'M' : 'R', sc->rt_chipid); device_set_desc_copy(dev, buf); return (BUS_PROBE_GENERIC); } /* * macaddr_atoi - translate string MAC address to uint8_t array */ static int macaddr_atoi(const char *str, uint8_t *mac) { int count, i; unsigned int amac[ETHER_ADDR_LEN]; /* Aligned version */ count = sscanf(str, "%x%*c%x%*c%x%*c%x%*c%x%*c%x", &amac[0], &amac[1], &amac[2], &amac[3], &amac[4], &amac[5]); if (count < ETHER_ADDR_LEN) { memset(mac, 0, ETHER_ADDR_LEN); return (1); } /* Copy aligned to result */ for (i = 0; i < ETHER_ADDR_LEN; i ++) mac[i] = (amac[i] & 0xff); return (0); } #ifdef USE_GENERATED_MAC_ADDRESS /* * generate_mac(uin8_t *mac) * This is MAC address generator for cases when real device MAC address * unknown or not yet accessible. * Use 'b','s','d' signature and 3 octets from CRC32 on kenv. * MAC = 'b', 's', 'd', CRC[3]^CRC[2], CRC[1], CRC[0] * * Output - MAC address, that do not change between reboots, if hints or * bootloader info unchange. */ static void generate_mac(uint8_t *mac) { unsigned char *cp; int i = 0; uint32_t crc = 0xffffffff; /* Generate CRC32 on kenv */ for (cp = kenvp[0]; cp != NULL; cp = kenvp[++i]) { crc = calculate_crc32c(crc, cp, strlen(cp) + 1); } crc = ~crc; mac[0] = 'b'; mac[1] = 's'; mac[2] = 'd'; mac[3] = (crc >> 24) ^ ((crc >> 16) & 0xff); mac[4] = (crc >> 8) & 0xff; mac[5] = crc & 0xff; } #endif /* * ether_request_mac - try to find usable MAC address. */ static int ether_request_mac(device_t dev, uint8_t *mac) { char *var; /* * "ethaddr" is passed via envp on RedBoot platforms * "kmac" is passed via argv on RouterBOOT platforms */ #if defined(RT305X_UBOOT) || defined(__REDBOOT__) || defined(__ROUTERBOOT__) if ((var = kern_getenv("ethaddr")) != NULL || (var = kern_getenv("kmac")) != NULL ) { if(!macaddr_atoi(var, mac)) { printf("%s: use %s macaddr from KENV\n", device_get_nameunit(dev), var); freeenv(var); return (0); } freeenv(var); } #endif /* * Try from hints * hint.[dev].[unit].macaddr */ if (!resource_string_value(device_get_name(dev), device_get_unit(dev), "macaddr", (const char **)&var)) { if(!macaddr_atoi(var, mac)) { printf("%s: use %s macaddr from hints\n", device_get_nameunit(dev), var); return (0); } } #ifdef USE_GENERATED_MAC_ADDRESS generate_mac(mac); device_printf(dev, "use generated %02x:%02x:%02x:%02x:%02x:%02x " "macaddr\n", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); #else /* Hardcoded */ mac[0] = 0x00; mac[1] = 0x18; mac[2] = 0xe7; mac[3] = 0xd5; mac[4] = 0x83; mac[5] = 0x90; device_printf(dev, "use hardcoded 00:18:e7:d5:83:90 macaddr\n"); #endif return (0); } /* * Reset hardware */ static void reset_freng(struct rt_softc *sc) { /* XXX hard reset kills everything so skip it ... */ return; } static int rt_attach(device_t dev) { struct rt_softc *sc; struct ifnet *ifp; int error, i; sc = device_get_softc(dev); sc->dev = dev; mtx_init(&sc->lock, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); sc->mem_rid = 0; sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, RF_ACTIVE | RF_SHAREABLE); if (sc->mem == NULL) { device_printf(dev, "could not allocate memory resource\n"); error = ENXIO; goto fail; } sc->bst = rman_get_bustag(sc->mem); sc->bsh = rman_get_bushandle(sc->mem); sc->irq_rid = 0; sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid, RF_ACTIVE); if (sc->irq == NULL) { device_printf(dev, "could not allocate interrupt resource\n"); error = ENXIO; goto fail; } #ifdef IF_RT_DEBUG sc->debug = rt_debug; SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", CTLFLAG_RW, &sc->debug, 0, "rt debug level"); #endif /* Reset hardware */ reset_freng(sc); if (sc->rt_chipid == RT_CHIPID_MT7620) { sc->csum_fail_ip = MT7620_RXD_SRC_IP_CSUM_FAIL; sc->csum_fail_l4 = MT7620_RXD_SRC_L4_CSUM_FAIL; } else if (sc->rt_chipid == RT_CHIPID_MT7621) { sc->csum_fail_ip = MT7621_RXD_SRC_IP_CSUM_FAIL; sc->csum_fail_l4 = MT7621_RXD_SRC_L4_CSUM_FAIL; } else { sc->csum_fail_ip = RT305X_RXD_SRC_IP_CSUM_FAIL; sc->csum_fail_l4 = RT305X_RXD_SRC_L4_CSUM_FAIL; } /* Fill in soc-specific registers map */ switch(sc->rt_chipid) { case RT_CHIPID_MT7620: case RT_CHIPID_MT7621: sc->gdma1_base = MT7620_GDMA1_BASE; /* fallthrough */ case RT_CHIPID_RT5350: device_printf(dev, "%cT%x Ethernet MAC (rev 0x%08x)\n", sc->rt_chipid >= 0x7600 ? 'M' : 'R', sc->rt_chipid, sc->mac_rev); /* RT5350: No GDMA, PSE, CDMA, PPE */ RT_WRITE(sc, GE_PORT_BASE + 0x0C00, // UDPCS, TCPCS, IPCS=1 RT_READ(sc, GE_PORT_BASE + 0x0C00) | (0x7<<16)); sc->delay_int_cfg=RT5350_PDMA_BASE+RT5350_DELAY_INT_CFG; sc->fe_int_status=RT5350_FE_INT_STATUS; sc->fe_int_enable=RT5350_FE_INT_ENABLE; sc->pdma_glo_cfg=RT5350_PDMA_BASE+RT5350_PDMA_GLO_CFG; sc->pdma_rst_idx=RT5350_PDMA_BASE+RT5350_PDMA_RST_IDX; for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) { sc->tx_base_ptr[i]=RT5350_PDMA_BASE+RT5350_TX_BASE_PTR(i); sc->tx_max_cnt[i]=RT5350_PDMA_BASE+RT5350_TX_MAX_CNT(i); sc->tx_ctx_idx[i]=RT5350_PDMA_BASE+RT5350_TX_CTX_IDX(i); sc->tx_dtx_idx[i]=RT5350_PDMA_BASE+RT5350_TX_DTX_IDX(i); } sc->rx_ring_count=2; sc->rx_base_ptr[0]=RT5350_PDMA_BASE+RT5350_RX_BASE_PTR0; sc->rx_max_cnt[0]=RT5350_PDMA_BASE+RT5350_RX_MAX_CNT0; sc->rx_calc_idx[0]=RT5350_PDMA_BASE+RT5350_RX_CALC_IDX0; sc->rx_drx_idx[0]=RT5350_PDMA_BASE+RT5350_RX_DRX_IDX0; sc->rx_base_ptr[1]=RT5350_PDMA_BASE+RT5350_RX_BASE_PTR1; sc->rx_max_cnt[1]=RT5350_PDMA_BASE+RT5350_RX_MAX_CNT1; sc->rx_calc_idx[1]=RT5350_PDMA_BASE+RT5350_RX_CALC_IDX1; sc->rx_drx_idx[1]=RT5350_PDMA_BASE+RT5350_RX_DRX_IDX1; sc->int_rx_done_mask=RT5350_INT_RXQ0_DONE; sc->int_tx_done_mask=RT5350_INT_TXQ0_DONE; break; default: device_printf(dev, "RT305XF Ethernet MAC (rev 0x%08x)\n", sc->mac_rev); sc->gdma1_base = GDMA1_BASE; sc->delay_int_cfg=PDMA_BASE+DELAY_INT_CFG; sc->fe_int_status=GE_PORT_BASE+FE_INT_STATUS; sc->fe_int_enable=GE_PORT_BASE+FE_INT_ENABLE; sc->pdma_glo_cfg=PDMA_BASE+PDMA_GLO_CFG; sc->pdma_rst_idx=PDMA_BASE+PDMA_RST_IDX; for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) { sc->tx_base_ptr[i]=PDMA_BASE+TX_BASE_PTR(i); sc->tx_max_cnt[i]=PDMA_BASE+TX_MAX_CNT(i); sc->tx_ctx_idx[i]=PDMA_BASE+TX_CTX_IDX(i); sc->tx_dtx_idx[i]=PDMA_BASE+TX_DTX_IDX(i); } sc->rx_ring_count=1; sc->rx_base_ptr[0]=PDMA_BASE+RX_BASE_PTR0; sc->rx_max_cnt[0]=PDMA_BASE+RX_MAX_CNT0; sc->rx_calc_idx[0]=PDMA_BASE+RX_CALC_IDX0; sc->rx_drx_idx[0]=PDMA_BASE+RX_DRX_IDX0; sc->int_rx_done_mask=INT_RX_DONE; sc->int_tx_done_mask=INT_TXQ0_DONE; } if (sc->gdma1_base != 0) RT_WRITE(sc, sc->gdma1_base + GDMA_FWD_CFG, ( GDM_ICS_EN | /* Enable IP Csum */ GDM_TCS_EN | /* Enable TCP Csum */ GDM_UCS_EN | /* Enable UDP Csum */ GDM_STRPCRC | /* Strip CRC from packet */ GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* fwd UCast to CPU */ GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* fwd BCast to CPU */ GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* fwd MCast to CPU */ GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT /* fwd Other to CPU */ )); if (sc->rt_chipid == RT_CHIPID_RT2880) RT_WRITE(sc, MDIO_CFG, MDIO_2880_100T_INIT); /* allocate Tx and Rx rings */ for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) { error = rt_alloc_tx_ring(sc, &sc->tx_ring[i], i); if (error != 0) { device_printf(dev, "could not allocate Tx ring #%d\n", i); goto fail; } } sc->tx_ring_mgtqid = 5; for (i = 0; i < sc->rx_ring_count; i++) { error = rt_alloc_rx_ring(sc, &sc->rx_ring[i], i); if (error != 0) { device_printf(dev, "could not allocate Rx ring\n"); goto fail; } } callout_init(&sc->periodic_ch, 0); callout_init_mtx(&sc->tx_watchdog_ch, &sc->lock, 0); ifp = sc->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "could not if_alloc()\n"); error = ENOMEM; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_init = rt_init; ifp->if_ioctl = rt_ioctl; ifp->if_start = rt_start; #define RT_TX_QLEN 256 IFQ_SET_MAXLEN(&ifp->if_snd, RT_TX_QLEN); ifp->if_snd.ifq_drv_maxlen = RT_TX_QLEN; IFQ_SET_READY(&ifp->if_snd); #ifdef IF_RT_PHY_SUPPORT error = mii_attach(dev, &sc->rt_miibus, ifp, rt_ifmedia_upd, rt_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); if (error != 0) { device_printf(dev, "attaching PHYs failed\n"); error = ENXIO; goto fail; } #else ifmedia_init(&sc->rt_ifmedia, 0, rt_ifmedia_upd, rt_ifmedia_sts); ifmedia_add(&sc->rt_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL); ifmedia_set(&sc->rt_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX); #endif /* IF_RT_PHY_SUPPORT */ ether_request_mac(dev, sc->mac_addr); ether_ifattach(ifp, sc->mac_addr); /* * Tell the upper layer(s) we support long frames. */ ifp->if_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities |= IFCAP_VLAN_MTU; ifp->if_capenable |= IFCAP_VLAN_MTU; ifp->if_capabilities |= IFCAP_RXCSUM|IFCAP_TXCSUM; ifp->if_capenable |= IFCAP_RXCSUM|IFCAP_TXCSUM; /* init task queue */ - TASK_INIT(&sc->rx_done_task, 0, rt_rx_done_task, sc); + NET_TASK_INIT(&sc->rx_done_task, 0, rt_rx_done_task, sc); TASK_INIT(&sc->tx_done_task, 0, rt_tx_done_task, sc); TASK_INIT(&sc->periodic_task, 0, rt_periodic_task, sc); sc->rx_process_limit = 100; sc->taskqueue = taskqueue_create("rt_taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->taskqueue); taskqueue_start_threads(&sc->taskqueue, 1, PI_NET, "%s taskq", device_get_nameunit(sc->dev)); rt_sysctl_attach(sc); /* set up interrupt */ error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, (sc->rt_chipid == RT_CHIPID_RT5350 || sc->rt_chipid == RT_CHIPID_MT7620 || sc->rt_chipid == RT_CHIPID_MT7621) ? rt_rt5350_intr : rt_intr, sc, &sc->irqh); if (error != 0) { printf("%s: could not set up interrupt\n", device_get_nameunit(dev)); goto fail; } #ifdef IF_RT_DEBUG device_printf(dev, "debug var at %#08x\n", (u_int)&(sc->debug)); #endif return (0); fail: /* free Tx and Rx rings */ for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) rt_free_tx_ring(sc, &sc->tx_ring[i]); for (i = 0; i < sc->rx_ring_count; i++) rt_free_rx_ring(sc, &sc->rx_ring[i]); mtx_destroy(&sc->lock); if (sc->mem != NULL) bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem); if (sc->irq != NULL) bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq); return (error); } /* * Set media options. */ static int rt_ifmedia_upd(struct ifnet *ifp) { struct rt_softc *sc; #ifdef IF_RT_PHY_SUPPORT struct mii_data *mii; struct mii_softc *miisc; int error = 0; sc = ifp->if_softc; RT_SOFTC_LOCK(sc); mii = device_get_softc(sc->rt_miibus); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); error = mii_mediachg(mii); RT_SOFTC_UNLOCK(sc); return (error); #else /* !IF_RT_PHY_SUPPORT */ struct ifmedia *ifm; struct ifmedia_entry *ife; sc = ifp->if_softc; ifm = &sc->rt_ifmedia; ife = ifm->ifm_cur; if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) { device_printf(sc->dev, "AUTO is not supported for multiphy MAC"); return (EINVAL); } /* * Ignore everything */ return (0); #endif /* IF_RT_PHY_SUPPORT */ } /* * Report current media status. */ static void rt_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { #ifdef IF_RT_PHY_SUPPORT struct rt_softc *sc; struct mii_data *mii; sc = ifp->if_softc; RT_SOFTC_LOCK(sc); mii = device_get_softc(sc->rt_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX; ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; RT_SOFTC_UNLOCK(sc); #else /* !IF_RT_PHY_SUPPORT */ ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX; #endif /* IF_RT_PHY_SUPPORT */ } static int rt_detach(device_t dev) { struct rt_softc *sc; struct ifnet *ifp; int i; sc = device_get_softc(dev); ifp = sc->ifp; RT_DPRINTF(sc, RT_DEBUG_ANY, "detaching\n"); RT_SOFTC_LOCK(sc); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); callout_stop(&sc->periodic_ch); callout_stop(&sc->tx_watchdog_ch); taskqueue_drain(sc->taskqueue, &sc->rx_done_task); taskqueue_drain(sc->taskqueue, &sc->tx_done_task); taskqueue_drain(sc->taskqueue, &sc->periodic_task); /* free Tx and Rx rings */ for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) rt_free_tx_ring(sc, &sc->tx_ring[i]); for (i = 0; i < sc->rx_ring_count; i++) rt_free_rx_ring(sc, &sc->rx_ring[i]); RT_SOFTC_UNLOCK(sc); #ifdef IF_RT_PHY_SUPPORT if (sc->rt_miibus != NULL) device_delete_child(dev, sc->rt_miibus); #endif ether_ifdetach(ifp); if_free(ifp); taskqueue_free(sc->taskqueue); mtx_destroy(&sc->lock); bus_generic_detach(dev); bus_teardown_intr(dev, sc->irq, sc->irqh); bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq); bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem); return (0); } static int rt_shutdown(device_t dev) { struct rt_softc *sc; sc = device_get_softc(dev); RT_DPRINTF(sc, RT_DEBUG_ANY, "shutting down\n"); rt_stop(sc); return (0); } static int rt_suspend(device_t dev) { struct rt_softc *sc; sc = device_get_softc(dev); RT_DPRINTF(sc, RT_DEBUG_ANY, "suspending\n"); rt_stop(sc); return (0); } static int rt_resume(device_t dev) { struct rt_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->ifp; RT_DPRINTF(sc, RT_DEBUG_ANY, "resuming\n"); if (ifp->if_flags & IFF_UP) rt_init(sc); return (0); } /* * rt_init_locked - Run initialization process having locked mtx. */ static void rt_init_locked(void *priv) { struct rt_softc *sc; struct ifnet *ifp; #ifdef IF_RT_PHY_SUPPORT struct mii_data *mii; #endif int i, ntries; uint32_t tmp; sc = priv; ifp = sc->ifp; #ifdef IF_RT_PHY_SUPPORT mii = device_get_softc(sc->rt_miibus); #endif RT_DPRINTF(sc, RT_DEBUG_ANY, "initializing\n"); RT_SOFTC_ASSERT_LOCKED(sc); /* hardware reset */ //RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET); //rt305x_sysctl_set(SYSCTL_RSTCTRL, SYSCTL_RSTCTRL_FRENG); /* Fwd to CPU (uni|broad|multi)cast and Unknown */ if (sc->gdma1_base != 0) RT_WRITE(sc, sc->gdma1_base + GDMA_FWD_CFG, ( GDM_ICS_EN | /* Enable IP Csum */ GDM_TCS_EN | /* Enable TCP Csum */ GDM_UCS_EN | /* Enable UDP Csum */ GDM_STRPCRC | /* Strip CRC from packet */ GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* fwd UCast to CPU */ GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* fwd BCast to CPU */ GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* fwd MCast to CPU */ GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT /* fwd Other to CPU */ )); /* disable DMA engine */ RT_WRITE(sc, sc->pdma_glo_cfg, 0); RT_WRITE(sc, sc->pdma_rst_idx, 0xffffffff); /* wait while DMA engine is busy */ for (ntries = 0; ntries < 100; ntries++) { tmp = RT_READ(sc, sc->pdma_glo_cfg); if (!(tmp & (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY))) break; DELAY(1000); } if (ntries == 100) { device_printf(sc->dev, "timeout waiting for DMA engine\n"); goto fail; } /* reset Rx and Tx rings */ tmp = FE_RST_DRX_IDX0 | FE_RST_DTX_IDX3 | FE_RST_DTX_IDX2 | FE_RST_DTX_IDX1 | FE_RST_DTX_IDX0; RT_WRITE(sc, sc->pdma_rst_idx, tmp); /* XXX switch set mac address */ for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) rt_reset_tx_ring(sc, &sc->tx_ring[i]); for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) { /* update TX_BASE_PTRx */ RT_WRITE(sc, sc->tx_base_ptr[i], sc->tx_ring[i].desc_phys_addr); RT_WRITE(sc, sc->tx_max_cnt[i], RT_SOFTC_TX_RING_DESC_COUNT); RT_WRITE(sc, sc->tx_ctx_idx[i], 0); } /* init Rx ring */ for (i = 0; i < sc->rx_ring_count; i++) rt_reset_rx_ring(sc, &sc->rx_ring[i]); /* update RX_BASE_PTRx */ for (i = 0; i < sc->rx_ring_count; i++) { RT_WRITE(sc, sc->rx_base_ptr[i], sc->rx_ring[i].desc_phys_addr); RT_WRITE(sc, sc->rx_max_cnt[i], RT_SOFTC_RX_RING_DATA_COUNT); RT_WRITE(sc, sc->rx_calc_idx[i], RT_SOFTC_RX_RING_DATA_COUNT - 1); } /* write back DDONE, 16byte burst enable RX/TX DMA */ tmp = FE_TX_WB_DDONE | FE_DMA_BT_SIZE16 | FE_RX_DMA_EN | FE_TX_DMA_EN; if (sc->rt_chipid == RT_CHIPID_MT7620 || sc->rt_chipid == RT_CHIPID_MT7621) tmp |= (1<<31); RT_WRITE(sc, sc->pdma_glo_cfg, tmp); /* disable interrupts mitigation */ RT_WRITE(sc, sc->delay_int_cfg, 0); /* clear pending interrupts */ RT_WRITE(sc, sc->fe_int_status, 0xffffffff); /* enable interrupts */ if (sc->rt_chipid == RT_CHIPID_RT5350 || sc->rt_chipid == RT_CHIPID_MT7620 || sc->rt_chipid == RT_CHIPID_MT7621) tmp = RT5350_INT_TX_COHERENT | RT5350_INT_RX_COHERENT | RT5350_INT_TXQ3_DONE | RT5350_INT_TXQ2_DONE | RT5350_INT_TXQ1_DONE | RT5350_INT_TXQ0_DONE | RT5350_INT_RXQ1_DONE | RT5350_INT_RXQ0_DONE; else tmp = CNT_PPE_AF | CNT_GDM_AF | PSE_P2_FC | GDM_CRC_DROP | PSE_BUF_DROP | GDM_OTHER_DROP | PSE_P1_FC | PSE_P0_FC | PSE_FQ_EMPTY | INT_TX_COHERENT | INT_RX_COHERENT | INT_TXQ3_DONE | INT_TXQ2_DONE | INT_TXQ1_DONE | INT_TXQ0_DONE | INT_RX_DONE; sc->intr_enable_mask = tmp; RT_WRITE(sc, sc->fe_int_enable, tmp); if (rt_txrx_enable(sc) != 0) goto fail; #ifdef IF_RT_PHY_SUPPORT if (mii) mii_mediachg(mii); #endif /* IF_RT_PHY_SUPPORT */ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp->if_drv_flags |= IFF_DRV_RUNNING; sc->periodic_round = 0; callout_reset(&sc->periodic_ch, hz / 10, rt_periodic, sc); return; fail: rt_stop_locked(sc); } /* * rt_init - lock and initialize device. */ static void rt_init(void *priv) { struct rt_softc *sc; sc = priv; RT_SOFTC_LOCK(sc); rt_init_locked(sc); RT_SOFTC_UNLOCK(sc); } /* * rt_stop_locked - stop TX/RX w/ lock */ static void rt_stop_locked(void *priv) { struct rt_softc *sc; struct ifnet *ifp; sc = priv; ifp = sc->ifp; RT_DPRINTF(sc, RT_DEBUG_ANY, "stopping\n"); RT_SOFTC_ASSERT_LOCKED(sc); sc->tx_timer = 0; ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); callout_stop(&sc->periodic_ch); callout_stop(&sc->tx_watchdog_ch); RT_SOFTC_UNLOCK(sc); taskqueue_block(sc->taskqueue); /* * Sometime rt_stop_locked called from isr and we get panic * When found, I fix it */ #ifdef notyet taskqueue_drain(sc->taskqueue, &sc->rx_done_task); taskqueue_drain(sc->taskqueue, &sc->tx_done_task); taskqueue_drain(sc->taskqueue, &sc->periodic_task); #endif RT_SOFTC_LOCK(sc); /* disable interrupts */ RT_WRITE(sc, sc->fe_int_enable, 0); if(sc->rt_chipid != RT_CHIPID_RT5350 && sc->rt_chipid != RT_CHIPID_MT7620 && sc->rt_chipid != RT_CHIPID_MT7621) { /* reset adapter */ RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET); } if (sc->gdma1_base != 0) RT_WRITE(sc, sc->gdma1_base + GDMA_FWD_CFG, ( GDM_ICS_EN | /* Enable IP Csum */ GDM_TCS_EN | /* Enable TCP Csum */ GDM_UCS_EN | /* Enable UDP Csum */ GDM_STRPCRC | /* Strip CRC from packet */ GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* fwd UCast to CPU */ GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* fwd BCast to CPU */ GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* fwd MCast to CPU */ GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT /* fwd Other to CPU */ )); } static void rt_stop(void *priv) { struct rt_softc *sc; sc = priv; RT_SOFTC_LOCK(sc); rt_stop_locked(sc); RT_SOFTC_UNLOCK(sc); } /* * rt_tx_data - transmit packet. */ static int rt_tx_data(struct rt_softc *sc, struct mbuf *m, int qid) { struct ifnet *ifp; struct rt_softc_tx_ring *ring; struct rt_softc_tx_data *data; struct rt_txdesc *desc; struct mbuf *m_d; bus_dma_segment_t dma_seg[RT_SOFTC_MAX_SCATTER]; int error, ndmasegs, ndescs, i; KASSERT(qid >= 0 && qid < RT_SOFTC_TX_RING_COUNT, ("%s: Tx data: invalid qid=%d\n", device_get_nameunit(sc->dev), qid)); RT_SOFTC_TX_RING_ASSERT_LOCKED(&sc->tx_ring[qid]); ifp = sc->ifp; ring = &sc->tx_ring[qid]; desc = &ring->desc[ring->desc_cur]; data = &ring->data[ring->data_cur]; error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag, data->dma_map, m, dma_seg, &ndmasegs, 0); if (error != 0) { /* too many fragments, linearize */ RT_DPRINTF(sc, RT_DEBUG_TX, "could not load mbuf DMA map, trying to linearize " "mbuf: ndmasegs=%d, len=%d, error=%d\n", ndmasegs, m->m_pkthdr.len, error); m_d = m_collapse(m, M_NOWAIT, 16); if (m_d == NULL) { m_freem(m); m = NULL; return (ENOMEM); } m = m_d; sc->tx_defrag_packets++; error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag, data->dma_map, m, dma_seg, &ndmasegs, 0); if (error != 0) { device_printf(sc->dev, "could not load mbuf DMA map: " "ndmasegs=%d, len=%d, error=%d\n", ndmasegs, m->m_pkthdr.len, error); m_freem(m); return (error); } } if (m->m_pkthdr.len == 0) ndmasegs = 0; /* determine how many Tx descs are required */ ndescs = 1 + ndmasegs / 2; if ((ring->desc_queued + ndescs) > (RT_SOFTC_TX_RING_DESC_COUNT - 2)) { RT_DPRINTF(sc, RT_DEBUG_TX, "there are not enough Tx descs\n"); sc->no_tx_desc_avail++; bus_dmamap_unload(ring->data_dma_tag, data->dma_map); m_freem(m); return (EFBIG); } data->m = m; /* set up Tx descs */ for (i = 0; i < ndmasegs; i += 2) { /* TODO: this needs to be refined as MT7620 for example has * a different word3 layout than RT305x and RT5350 (the last * one doesn't use word3 at all). And so does MT7621... */ if (sc->rt_chipid != RT_CHIPID_MT7621) { /* Set destination */ if (sc->rt_chipid != RT_CHIPID_MT7620) desc->dst = (TXDSCR_DST_PORT_GDMA1); if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) desc->dst |= (TXDSCR_IP_CSUM_GEN | TXDSCR_UDP_CSUM_GEN | TXDSCR_TCP_CSUM_GEN); /* Set queue id */ desc->qn = qid; /* No PPPoE */ desc->pppoe = 0; /* No VLAN */ desc->vid = 0; } else { desc->vid = 0; desc->pppoe = 0; desc->qn = 0; desc->dst = 2; } desc->sdp0 = htole32(dma_seg[i].ds_addr); desc->sdl0 = htole16(dma_seg[i].ds_len | ( ((i+1) == ndmasegs )?RT_TXDESC_SDL0_LASTSEG:0 )); if ((i+1) < ndmasegs) { desc->sdp1 = htole32(dma_seg[i+1].ds_addr); desc->sdl1 = htole16(dma_seg[i+1].ds_len | ( ((i+2) == ndmasegs )?RT_TXDESC_SDL1_LASTSEG:0 )); } else { desc->sdp1 = 0; desc->sdl1 = 0; } if ((i+2) < ndmasegs) { ring->desc_queued++; ring->desc_cur = (ring->desc_cur + 1) % RT_SOFTC_TX_RING_DESC_COUNT; } desc = &ring->desc[ring->desc_cur]; } RT_DPRINTF(sc, RT_DEBUG_TX, "sending data: len=%d, ndmasegs=%d, " "DMA ds_len=%d/%d/%d/%d/%d\n", m->m_pkthdr.len, ndmasegs, (int) dma_seg[0].ds_len, (int) dma_seg[1].ds_len, (int) dma_seg[2].ds_len, (int) dma_seg[3].ds_len, (int) dma_seg[4].ds_len); bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(ring->data_dma_tag, data->dma_map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map, BUS_DMASYNC_PREWRITE); ring->desc_queued++; ring->desc_cur = (ring->desc_cur + 1) % RT_SOFTC_TX_RING_DESC_COUNT; ring->data_queued++; ring->data_cur = (ring->data_cur + 1) % RT_SOFTC_TX_RING_DATA_COUNT; /* kick Tx */ RT_WRITE(sc, sc->tx_ctx_idx[qid], ring->desc_cur); return (0); } /* * rt_start - start Transmit/Receive */ static void rt_start(struct ifnet *ifp) { struct rt_softc *sc; struct mbuf *m; int qid = 0 /* XXX must check QoS priority */; sc = ifp->if_softc; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) return; for (;;) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; m->m_pkthdr.rcvif = NULL; RT_SOFTC_TX_RING_LOCK(&sc->tx_ring[qid]); if (sc->tx_ring[qid].data_queued >= RT_SOFTC_TX_RING_DATA_COUNT) { RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]); RT_DPRINTF(sc, RT_DEBUG_TX, "if_start: Tx ring with qid=%d is full\n", qid); m_freem(m); ifp->if_drv_flags |= IFF_DRV_OACTIVE; if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); sc->tx_data_queue_full[qid]++; break; } if (rt_tx_data(sc, m, qid) != 0) { RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); break; } RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]); sc->tx_timer = RT_TX_WATCHDOG_TIMEOUT; callout_reset(&sc->tx_watchdog_ch, hz, rt_tx_watchdog, sc); } } /* * rt_update_promisc - set/clear promiscuous mode. Unused yet, because * filtering done by attached Ethernet switch. */ static void rt_update_promisc(struct ifnet *ifp) { struct rt_softc *sc; sc = ifp->if_softc; printf("%s: %s promiscuous mode\n", device_get_nameunit(sc->dev), (ifp->if_flags & IFF_PROMISC) ? "entering" : "leaving"); } /* * rt_ioctl - ioctl handler. */ static int rt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct rt_softc *sc; struct ifreq *ifr; #ifdef IF_RT_PHY_SUPPORT struct mii_data *mii; #endif /* IF_RT_PHY_SUPPORT */ int error, startall; sc = ifp->if_softc; ifr = (struct ifreq *) data; error = 0; switch (cmd) { case SIOCSIFFLAGS: startall = 0; RT_SOFTC_LOCK(sc); if (ifp->if_flags & IFF_UP) { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { if ((ifp->if_flags ^ sc->if_flags) & IFF_PROMISC) rt_update_promisc(ifp); } else { rt_init_locked(sc); startall = 1; } } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) rt_stop_locked(sc); } sc->if_flags = ifp->if_flags; RT_SOFTC_UNLOCK(sc); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: #ifdef IF_RT_PHY_SUPPORT mii = device_get_softc(sc->rt_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); #else error = ifmedia_ioctl(ifp, ifr, &sc->rt_ifmedia, cmd); #endif /* IF_RT_PHY_SUPPORT */ break; default: error = ether_ioctl(ifp, cmd, data); break; } return (error); } /* * rt_periodic - Handler of PERIODIC interrupt */ static void rt_periodic(void *arg) { struct rt_softc *sc; sc = arg; RT_DPRINTF(sc, RT_DEBUG_PERIODIC, "periodic\n"); taskqueue_enqueue(sc->taskqueue, &sc->periodic_task); } /* * rt_tx_watchdog - Handler of TX Watchdog */ static void rt_tx_watchdog(void *arg) { struct rt_softc *sc; struct ifnet *ifp; sc = arg; ifp = sc->ifp; if (sc->tx_timer == 0) return; if (--sc->tx_timer == 0) { device_printf(sc->dev, "Tx watchdog timeout: resetting\n"); #ifdef notyet /* * XXX: Commented out, because reset break input. */ rt_stop_locked(sc); rt_init_locked(sc); #endif if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); sc->tx_watchdog_timeouts++; } callout_reset(&sc->tx_watchdog_ch, hz, rt_tx_watchdog, sc); } /* * rt_cnt_ppe_af - Handler of PPE Counter Table Almost Full interrupt */ static void rt_cnt_ppe_af(struct rt_softc *sc) { RT_DPRINTF(sc, RT_DEBUG_INTR, "PPE Counter Table Almost Full\n"); } /* * rt_cnt_gdm_af - Handler of GDMA 1 & 2 Counter Table Almost Full interrupt */ static void rt_cnt_gdm_af(struct rt_softc *sc) { RT_DPRINTF(sc, RT_DEBUG_INTR, "GDMA 1 & 2 Counter Table Almost Full\n"); } /* * rt_pse_p2_fc - Handler of PSE port2 (GDMA 2) flow control interrupt */ static void rt_pse_p2_fc(struct rt_softc *sc) { RT_DPRINTF(sc, RT_DEBUG_INTR, "PSE port2 (GDMA 2) flow control asserted.\n"); } /* * rt_gdm_crc_drop - Handler of GDMA 1/2 discard a packet due to CRC error * interrupt */ static void rt_gdm_crc_drop(struct rt_softc *sc) { RT_DPRINTF(sc, RT_DEBUG_INTR, "GDMA 1 & 2 discard a packet due to CRC error\n"); } /* * rt_pse_buf_drop - Handler of buffer sharing limitation interrupt */ static void rt_pse_buf_drop(struct rt_softc *sc) { RT_DPRINTF(sc, RT_DEBUG_INTR, "PSE discards a packet due to buffer sharing limitation\n"); } /* * rt_gdm_other_drop - Handler of discard on other reason interrupt */ static void rt_gdm_other_drop(struct rt_softc *sc) { RT_DPRINTF(sc, RT_DEBUG_INTR, "GDMA 1 & 2 discard a packet due to other reason\n"); } /* * rt_pse_p1_fc - Handler of PSE port1 (GDMA 1) flow control interrupt */ static void rt_pse_p1_fc(struct rt_softc *sc) { RT_DPRINTF(sc, RT_DEBUG_INTR, "PSE port1 (GDMA 1) flow control asserted.\n"); } /* * rt_pse_p0_fc - Handler of PSE port0 (CDMA) flow control interrupt */ static void rt_pse_p0_fc(struct rt_softc *sc) { RT_DPRINTF(sc, RT_DEBUG_INTR, "PSE port0 (CDMA) flow control asserted.\n"); } /* * rt_pse_fq_empty - Handler of PSE free Q empty threshold reached interrupt */ static void rt_pse_fq_empty(struct rt_softc *sc) { RT_DPRINTF(sc, RT_DEBUG_INTR, "PSE free Q empty threshold reached & forced drop " "condition occurred.\n"); } /* * rt_intr - main ISR */ static void rt_intr(void *arg) { struct rt_softc *sc; struct ifnet *ifp; uint32_t status; sc = arg; ifp = sc->ifp; /* acknowledge interrupts */ status = RT_READ(sc, sc->fe_int_status); RT_WRITE(sc, sc->fe_int_status, status); RT_DPRINTF(sc, RT_DEBUG_INTR, "interrupt: status=0x%08x\n", status); if (status == 0xffffffff || /* device likely went away */ status == 0) /* not for us */ return; sc->interrupts++; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) return; if (status & CNT_PPE_AF) rt_cnt_ppe_af(sc); if (status & CNT_GDM_AF) rt_cnt_gdm_af(sc); if (status & PSE_P2_FC) rt_pse_p2_fc(sc); if (status & GDM_CRC_DROP) rt_gdm_crc_drop(sc); if (status & PSE_BUF_DROP) rt_pse_buf_drop(sc); if (status & GDM_OTHER_DROP) rt_gdm_other_drop(sc); if (status & PSE_P1_FC) rt_pse_p1_fc(sc); if (status & PSE_P0_FC) rt_pse_p0_fc(sc); if (status & PSE_FQ_EMPTY) rt_pse_fq_empty(sc); if (status & INT_TX_COHERENT) rt_tx_coherent_intr(sc); if (status & INT_RX_COHERENT) rt_rx_coherent_intr(sc); if (status & RX_DLY_INT) rt_rx_delay_intr(sc); if (status & TX_DLY_INT) rt_tx_delay_intr(sc); if (status & INT_RX_DONE) rt_rx_intr(sc, 0); if (status & INT_TXQ3_DONE) rt_tx_intr(sc, 3); if (status & INT_TXQ2_DONE) rt_tx_intr(sc, 2); if (status & INT_TXQ1_DONE) rt_tx_intr(sc, 1); if (status & INT_TXQ0_DONE) rt_tx_intr(sc, 0); } /* * rt_rt5350_intr - main ISR for Ralink 5350 SoC */ static void rt_rt5350_intr(void *arg) { struct rt_softc *sc; struct ifnet *ifp; uint32_t status; sc = arg; ifp = sc->ifp; /* acknowledge interrupts */ status = RT_READ(sc, sc->fe_int_status); RT_WRITE(sc, sc->fe_int_status, status); RT_DPRINTF(sc, RT_DEBUG_INTR, "interrupt: status=0x%08x\n", status); if (status == 0xffffffff || /* device likely went away */ status == 0) /* not for us */ return; sc->interrupts++; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) return; if (status & RT5350_INT_TX_COHERENT) rt_tx_coherent_intr(sc); if (status & RT5350_INT_RX_COHERENT) rt_rx_coherent_intr(sc); if (status & RT5350_RX_DLY_INT) rt_rx_delay_intr(sc); if (status & RT5350_TX_DLY_INT) rt_tx_delay_intr(sc); if (status & RT5350_INT_RXQ1_DONE) rt_rx_intr(sc, 1); if (status & RT5350_INT_RXQ0_DONE) rt_rx_intr(sc, 0); if (status & RT5350_INT_TXQ3_DONE) rt_tx_intr(sc, 3); if (status & RT5350_INT_TXQ2_DONE) rt_tx_intr(sc, 2); if (status & RT5350_INT_TXQ1_DONE) rt_tx_intr(sc, 1); if (status & RT5350_INT_TXQ0_DONE) rt_tx_intr(sc, 0); } static void rt_tx_coherent_intr(struct rt_softc *sc) { uint32_t tmp; int i; RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx coherent interrupt\n"); sc->tx_coherent_interrupts++; /* restart DMA engine */ tmp = RT_READ(sc, sc->pdma_glo_cfg); tmp &= ~(FE_TX_WB_DDONE | FE_TX_DMA_EN); RT_WRITE(sc, sc->pdma_glo_cfg, tmp); for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) rt_reset_tx_ring(sc, &sc->tx_ring[i]); for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) { RT_WRITE(sc, sc->tx_base_ptr[i], sc->tx_ring[i].desc_phys_addr); RT_WRITE(sc, sc->tx_max_cnt[i], RT_SOFTC_TX_RING_DESC_COUNT); RT_WRITE(sc, sc->tx_ctx_idx[i], 0); } rt_txrx_enable(sc); } /* * rt_rx_coherent_intr */ static void rt_rx_coherent_intr(struct rt_softc *sc) { uint32_t tmp; int i; RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx coherent interrupt\n"); sc->rx_coherent_interrupts++; /* restart DMA engine */ tmp = RT_READ(sc, sc->pdma_glo_cfg); tmp &= ~(FE_RX_DMA_EN); RT_WRITE(sc, sc->pdma_glo_cfg, tmp); /* init Rx ring */ for (i = 0; i < sc->rx_ring_count; i++) rt_reset_rx_ring(sc, &sc->rx_ring[i]); for (i = 0; i < sc->rx_ring_count; i++) { RT_WRITE(sc, sc->rx_base_ptr[i], sc->rx_ring[i].desc_phys_addr); RT_WRITE(sc, sc->rx_max_cnt[i], RT_SOFTC_RX_RING_DATA_COUNT); RT_WRITE(sc, sc->rx_calc_idx[i], RT_SOFTC_RX_RING_DATA_COUNT - 1); } rt_txrx_enable(sc); } /* * rt_rx_intr - a packet received */ static void rt_rx_intr(struct rt_softc *sc, int qid) { KASSERT(qid >= 0 && qid < sc->rx_ring_count, ("%s: Rx interrupt: invalid qid=%d\n", device_get_nameunit(sc->dev), qid)); RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx interrupt\n"); sc->rx_interrupts[qid]++; RT_SOFTC_LOCK(sc); if (!(sc->intr_disable_mask & (sc->int_rx_done_mask << qid))) { rt_intr_disable(sc, (sc->int_rx_done_mask << qid)); taskqueue_enqueue(sc->taskqueue, &sc->rx_done_task); } sc->intr_pending_mask |= (sc->int_rx_done_mask << qid); RT_SOFTC_UNLOCK(sc); } static void rt_rx_delay_intr(struct rt_softc *sc) { RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx delay interrupt\n"); sc->rx_delay_interrupts++; } static void rt_tx_delay_intr(struct rt_softc *sc) { RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx delay interrupt\n"); sc->tx_delay_interrupts++; } /* * rt_tx_intr - Transsmition of packet done */ static void rt_tx_intr(struct rt_softc *sc, int qid) { KASSERT(qid >= 0 && qid < RT_SOFTC_TX_RING_COUNT, ("%s: Tx interrupt: invalid qid=%d\n", device_get_nameunit(sc->dev), qid)); RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx interrupt: qid=%d\n", qid); sc->tx_interrupts[qid]++; RT_SOFTC_LOCK(sc); if (!(sc->intr_disable_mask & (sc->int_tx_done_mask << qid))) { rt_intr_disable(sc, (sc->int_tx_done_mask << qid)); taskqueue_enqueue(sc->taskqueue, &sc->tx_done_task); } sc->intr_pending_mask |= (sc->int_tx_done_mask << qid); RT_SOFTC_UNLOCK(sc); } /* * rt_rx_done_task - run RX task */ static void rt_rx_done_task(void *context, int pending) { struct rt_softc *sc; struct ifnet *ifp; int again; sc = context; ifp = sc->ifp; RT_DPRINTF(sc, RT_DEBUG_RX, "Rx done task\n"); if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) return; sc->intr_pending_mask &= ~sc->int_rx_done_mask; again = rt_rx_eof(sc, &sc->rx_ring[0], sc->rx_process_limit); RT_SOFTC_LOCK(sc); if ((sc->intr_pending_mask & sc->int_rx_done_mask) || again) { RT_DPRINTF(sc, RT_DEBUG_RX, "Rx done task: scheduling again\n"); taskqueue_enqueue(sc->taskqueue, &sc->rx_done_task); } else { rt_intr_enable(sc, sc->int_rx_done_mask); } RT_SOFTC_UNLOCK(sc); } /* * rt_tx_done_task - check for pending TX task in all queues */ static void rt_tx_done_task(void *context, int pending) { struct rt_softc *sc; struct ifnet *ifp; uint32_t intr_mask; int i; sc = context; ifp = sc->ifp; RT_DPRINTF(sc, RT_DEBUG_TX, "Tx done task\n"); if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) return; for (i = RT_SOFTC_TX_RING_COUNT - 1; i >= 0; i--) { if (sc->intr_pending_mask & (sc->int_tx_done_mask << i)) { sc->intr_pending_mask &= ~(sc->int_tx_done_mask << i); rt_tx_eof(sc, &sc->tx_ring[i]); } } sc->tx_timer = 0; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if(sc->rt_chipid == RT_CHIPID_RT5350 || sc->rt_chipid == RT_CHIPID_MT7620 || sc->rt_chipid == RT_CHIPID_MT7621) intr_mask = ( RT5350_INT_TXQ3_DONE | RT5350_INT_TXQ2_DONE | RT5350_INT_TXQ1_DONE | RT5350_INT_TXQ0_DONE); else intr_mask = ( INT_TXQ3_DONE | INT_TXQ2_DONE | INT_TXQ1_DONE | INT_TXQ0_DONE); RT_SOFTC_LOCK(sc); rt_intr_enable(sc, ~sc->intr_pending_mask & (sc->intr_disable_mask & intr_mask)); if (sc->intr_pending_mask & intr_mask) { RT_DPRINTF(sc, RT_DEBUG_TX, "Tx done task: scheduling again\n"); taskqueue_enqueue(sc->taskqueue, &sc->tx_done_task); } RT_SOFTC_UNLOCK(sc); if (!IFQ_IS_EMPTY(&ifp->if_snd)) rt_start(ifp); } /* * rt_periodic_task - run periodic task */ static void rt_periodic_task(void *context, int pending) { struct rt_softc *sc; struct ifnet *ifp; sc = context; ifp = sc->ifp; RT_DPRINTF(sc, RT_DEBUG_PERIODIC, "periodic task: round=%lu\n", sc->periodic_round); if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) return; RT_SOFTC_LOCK(sc); sc->periodic_round++; rt_update_stats(sc); if ((sc->periodic_round % 10) == 0) { rt_update_raw_counters(sc); rt_watchdog(sc); } RT_SOFTC_UNLOCK(sc); callout_reset(&sc->periodic_ch, hz / 10, rt_periodic, sc); } /* * rt_rx_eof - check for frames that done by DMA engine and pass it into * network subsystem. */ static int rt_rx_eof(struct rt_softc *sc, struct rt_softc_rx_ring *ring, int limit) { struct ifnet *ifp; /* struct rt_softc_rx_ring *ring; */ struct rt_rxdesc *desc; struct rt_softc_rx_data *data; struct mbuf *m, *mnew; bus_dma_segment_t segs[1]; bus_dmamap_t dma_map; uint32_t index, desc_flags; int error, nsegs, len, nframes; ifp = sc->ifp; /* ring = &sc->rx_ring[0]; */ nframes = 0; while (limit != 0) { index = RT_READ(sc, sc->rx_drx_idx[0]); if (ring->cur == index) break; desc = &ring->desc[ring->cur]; data = &ring->data[ring->cur]; bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); #ifdef IF_RT_DEBUG if ( sc->debug & RT_DEBUG_RX ) { printf("\nRX Descriptor[%#08x] dump:\n", (u_int)desc); hexdump(desc, 16, 0, 0); printf("-----------------------------------\n"); } #endif /* XXX Sometime device don`t set DDONE bit */ #ifdef DDONE_FIXED if (!(desc->sdl0 & htole16(RT_RXDESC_SDL0_DDONE))) { RT_DPRINTF(sc, RT_DEBUG_RX, "DDONE=0, try next\n"); break; } #endif len = le16toh(desc->sdl0) & 0x3fff; RT_DPRINTF(sc, RT_DEBUG_RX, "new frame len=%d\n", len); nframes++; mnew = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); if (mnew == NULL) { sc->rx_mbuf_alloc_errors++; if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); goto skip; } mnew->m_len = mnew->m_pkthdr.len = MJUMPAGESIZE; error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag, ring->spare_dma_map, mnew, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { RT_DPRINTF(sc, RT_DEBUG_RX, "could not load Rx mbuf DMA map: " "error=%d, nsegs=%d\n", error, nsegs); m_freem(mnew); sc->rx_mbuf_dmamap_errors++; if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); goto skip; } KASSERT(nsegs == 1, ("%s: too many DMA segments", device_get_nameunit(sc->dev))); bus_dmamap_sync(ring->data_dma_tag, data->dma_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(ring->data_dma_tag, data->dma_map); dma_map = data->dma_map; data->dma_map = ring->spare_dma_map; ring->spare_dma_map = dma_map; bus_dmamap_sync(ring->data_dma_tag, data->dma_map, BUS_DMASYNC_PREREAD); m = data->m; desc_flags = desc->word3; data->m = mnew; /* Add 2 for proper align of RX IP header */ desc->sdp0 = htole32(segs[0].ds_addr+2); desc->sdl0 = htole32(segs[0].ds_len-2); desc->word3 = 0; RT_DPRINTF(sc, RT_DEBUG_RX, "Rx frame: rxdesc flags=0x%08x\n", desc_flags); m->m_pkthdr.rcvif = ifp; /* Add 2 to fix data align, after sdp0 = addr + 2 */ m->m_data += 2; m->m_pkthdr.len = m->m_len = len; /* check for crc errors */ if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { /*check for valid checksum*/ if (desc_flags & (sc->csum_fail_ip|sc->csum_fail_l4)) { RT_DPRINTF(sc, RT_DEBUG_RX, "rxdesc: crc error\n"); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); if (!(ifp->if_flags & IFF_PROMISC)) { m_freem(m); goto skip; } } if ((desc_flags & sc->csum_fail_ip) == 0) { m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; m->m_pkthdr.csum_flags |= CSUM_IP_VALID; m->m_pkthdr.csum_data = 0xffff; } m->m_flags &= ~M_HASFCS; } (*ifp->if_input)(ifp, m); skip: desc->sdl0 &= ~htole16(RT_RXDESC_SDL0_DDONE); bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); ring->cur = (ring->cur + 1) % RT_SOFTC_RX_RING_DATA_COUNT; limit--; } if (ring->cur == 0) RT_WRITE(sc, sc->rx_calc_idx[0], RT_SOFTC_RX_RING_DATA_COUNT - 1); else RT_WRITE(sc, sc->rx_calc_idx[0], ring->cur - 1); RT_DPRINTF(sc, RT_DEBUG_RX, "Rx eof: nframes=%d\n", nframes); sc->rx_packets += nframes; return (limit == 0); } /* * rt_tx_eof - check for successful transmitted frames and mark their * descriptor as free. */ static void rt_tx_eof(struct rt_softc *sc, struct rt_softc_tx_ring *ring) { struct ifnet *ifp; struct rt_txdesc *desc; struct rt_softc_tx_data *data; uint32_t index; int ndescs, nframes; ifp = sc->ifp; ndescs = 0; nframes = 0; for (;;) { index = RT_READ(sc, sc->tx_dtx_idx[ring->qid]); if (ring->desc_next == index) break; ndescs++; desc = &ring->desc[ring->desc_next]; bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (desc->sdl0 & htole16(RT_TXDESC_SDL0_LASTSEG) || desc->sdl1 & htole16(RT_TXDESC_SDL1_LASTSEG)) { nframes++; data = &ring->data[ring->data_next]; bus_dmamap_sync(ring->data_dma_tag, data->dma_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dma_tag, data->dma_map); m_freem(data->m); data->m = NULL; if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); RT_SOFTC_TX_RING_LOCK(ring); ring->data_queued--; ring->data_next = (ring->data_next + 1) % RT_SOFTC_TX_RING_DATA_COUNT; RT_SOFTC_TX_RING_UNLOCK(ring); } desc->sdl0 &= ~htole16(RT_TXDESC_SDL0_DDONE); bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); RT_SOFTC_TX_RING_LOCK(ring); ring->desc_queued--; ring->desc_next = (ring->desc_next + 1) % RT_SOFTC_TX_RING_DESC_COUNT; RT_SOFTC_TX_RING_UNLOCK(ring); } RT_DPRINTF(sc, RT_DEBUG_TX, "Tx eof: qid=%d, ndescs=%d, nframes=%d\n", ring->qid, ndescs, nframes); } /* * rt_update_stats - query statistics counters and update related variables. */ static void rt_update_stats(struct rt_softc *sc) { struct ifnet *ifp; ifp = sc->ifp; RT_DPRINTF(sc, RT_DEBUG_STATS, "update statistic: \n"); /* XXX do update stats here */ } /* * rt_watchdog - reinit device on watchdog event. */ static void rt_watchdog(struct rt_softc *sc) { uint32_t tmp; #ifdef notyet int ntries; #endif if(sc->rt_chipid != RT_CHIPID_RT5350 && sc->rt_chipid != RT_CHIPID_MT7620 && sc->rt_chipid != RT_CHIPID_MT7621) { tmp = RT_READ(sc, PSE_BASE + CDMA_OQ_STA); RT_DPRINTF(sc, RT_DEBUG_WATCHDOG, "watchdog: PSE_IQ_STA=0x%08x\n", tmp); } /* XXX: do not reset */ #ifdef notyet if (((tmp >> P0_IQ_PCNT_SHIFT) & 0xff) != 0) { sc->tx_queue_not_empty[0]++; for (ntries = 0; ntries < 10; ntries++) { tmp = RT_READ(sc, PSE_BASE + PSE_IQ_STA); if (((tmp >> P0_IQ_PCNT_SHIFT) & 0xff) == 0) break; DELAY(1); } } if (((tmp >> P1_IQ_PCNT_SHIFT) & 0xff) != 0) { sc->tx_queue_not_empty[1]++; for (ntries = 0; ntries < 10; ntries++) { tmp = RT_READ(sc, PSE_BASE + PSE_IQ_STA); if (((tmp >> P1_IQ_PCNT_SHIFT) & 0xff) == 0) break; DELAY(1); } } #endif } /* * rt_update_raw_counters - update counters. */ static void rt_update_raw_counters(struct rt_softc *sc) { sc->tx_bytes += RT_READ(sc, CNTR_BASE + GDMA_TX_GBCNT0); sc->tx_packets += RT_READ(sc, CNTR_BASE + GDMA_TX_GPCNT0); sc->tx_skip += RT_READ(sc, CNTR_BASE + GDMA_TX_SKIPCNT0); sc->tx_collision+= RT_READ(sc, CNTR_BASE + GDMA_TX_COLCNT0); sc->rx_bytes += RT_READ(sc, CNTR_BASE + GDMA_RX_GBCNT0); sc->rx_packets += RT_READ(sc, CNTR_BASE + GDMA_RX_GPCNT0); sc->rx_crc_err += RT_READ(sc, CNTR_BASE + GDMA_RX_CSUM_ERCNT0); sc->rx_short_err+= RT_READ(sc, CNTR_BASE + GDMA_RX_SHORT_ERCNT0); sc->rx_long_err += RT_READ(sc, CNTR_BASE + GDMA_RX_LONG_ERCNT0); sc->rx_phy_err += RT_READ(sc, CNTR_BASE + GDMA_RX_FERCNT0); sc->rx_fifo_overflows+= RT_READ(sc, CNTR_BASE + GDMA_RX_OERCNT0); } static void rt_intr_enable(struct rt_softc *sc, uint32_t intr_mask) { uint32_t tmp; sc->intr_disable_mask &= ~intr_mask; tmp = sc->intr_enable_mask & ~sc->intr_disable_mask; RT_WRITE(sc, sc->fe_int_enable, tmp); } static void rt_intr_disable(struct rt_softc *sc, uint32_t intr_mask) { uint32_t tmp; sc->intr_disable_mask |= intr_mask; tmp = sc->intr_enable_mask & ~sc->intr_disable_mask; RT_WRITE(sc, sc->fe_int_enable, tmp); } /* * rt_txrx_enable - enable TX/RX DMA */ static int rt_txrx_enable(struct rt_softc *sc) { struct ifnet *ifp; uint32_t tmp; int ntries; ifp = sc->ifp; /* enable Tx/Rx DMA engine */ for (ntries = 0; ntries < 200; ntries++) { tmp = RT_READ(sc, sc->pdma_glo_cfg); if (!(tmp & (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY))) break; DELAY(1000); } if (ntries == 200) { device_printf(sc->dev, "timeout waiting for DMA engine\n"); return (-1); } DELAY(50); tmp |= FE_TX_WB_DDONE | FE_RX_DMA_EN | FE_TX_DMA_EN; RT_WRITE(sc, sc->pdma_glo_cfg, tmp); /* XXX set Rx filter */ return (0); } /* * rt_alloc_rx_ring - allocate RX DMA ring buffer */ static int rt_alloc_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring, int qid) { struct rt_rxdesc *desc; struct rt_softc_rx_data *data; bus_dma_segment_t segs[1]; int i, nsegs, error; error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc), 1, RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc), 0, NULL, NULL, &ring->desc_dma_tag); if (error != 0) { device_printf(sc->dev, "could not create Rx desc DMA tag\n"); goto fail; } error = bus_dmamem_alloc(ring->desc_dma_tag, (void **) &ring->desc, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_dma_map); if (error != 0) { device_printf(sc->dev, "could not allocate Rx desc DMA memory\n"); goto fail; } error = bus_dmamap_load(ring->desc_dma_tag, ring->desc_dma_map, ring->desc, RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc), rt_dma_map_addr, &ring->desc_phys_addr, 0); if (error != 0) { device_printf(sc->dev, "could not load Rx desc DMA map\n"); goto fail; } error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MJUMPAGESIZE, 1, MJUMPAGESIZE, 0, NULL, NULL, &ring->data_dma_tag); if (error != 0) { device_printf(sc->dev, "could not create Rx data DMA tag\n"); goto fail; } for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) { desc = &ring->desc[i]; data = &ring->data[i]; error = bus_dmamap_create(ring->data_dma_tag, 0, &data->dma_map); if (error != 0) { device_printf(sc->dev, "could not create Rx data DMA " "map\n"); goto fail; } data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); if (data->m == NULL) { device_printf(sc->dev, "could not allocate Rx mbuf\n"); error = ENOMEM; goto fail; } data->m->m_len = data->m->m_pkthdr.len = MJUMPAGESIZE; error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag, data->dma_map, data->m, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->dev, "could not load Rx mbuf DMA map\n"); goto fail; } KASSERT(nsegs == 1, ("%s: too many DMA segments", device_get_nameunit(sc->dev))); /* Add 2 for proper align of RX IP header */ desc->sdp0 = htole32(segs[0].ds_addr+2); desc->sdl0 = htole32(segs[0].ds_len-2); } error = bus_dmamap_create(ring->data_dma_tag, 0, &ring->spare_dma_map); if (error != 0) { device_printf(sc->dev, "could not create Rx spare DMA map\n"); goto fail; } bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); ring->qid = qid; return (0); fail: rt_free_rx_ring(sc, ring); return (error); } /* * rt_reset_rx_ring - reset RX ring buffer */ static void rt_reset_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring) { struct rt_rxdesc *desc; int i; for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) { desc = &ring->desc[i]; desc->sdl0 &= ~htole16(RT_RXDESC_SDL0_DDONE); } bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); ring->cur = 0; } /* * rt_free_rx_ring - free memory used by RX ring buffer */ static void rt_free_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring) { struct rt_softc_rx_data *data; int i; if (ring->desc != NULL) { bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->desc_dma_tag, ring->desc_dma_map); bus_dmamem_free(ring->desc_dma_tag, ring->desc, ring->desc_dma_map); } if (ring->desc_dma_tag != NULL) bus_dma_tag_destroy(ring->desc_dma_tag); for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dma_tag, data->dma_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(ring->data_dma_tag, data->dma_map); m_freem(data->m); } if (data->dma_map != NULL) bus_dmamap_destroy(ring->data_dma_tag, data->dma_map); } if (ring->spare_dma_map != NULL) bus_dmamap_destroy(ring->data_dma_tag, ring->spare_dma_map); if (ring->data_dma_tag != NULL) bus_dma_tag_destroy(ring->data_dma_tag); } /* * rt_alloc_tx_ring - allocate TX ring buffer */ static int rt_alloc_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring, int qid) { struct rt_softc_tx_data *data; int error, i; mtx_init(&ring->lock, device_get_nameunit(sc->dev), NULL, MTX_DEF); error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, RT_SOFTC_TX_RING_DESC_COUNT * sizeof(struct rt_txdesc), 1, RT_SOFTC_TX_RING_DESC_COUNT * sizeof(struct rt_txdesc), 0, NULL, NULL, &ring->desc_dma_tag); if (error != 0) { device_printf(sc->dev, "could not create Tx desc DMA tag\n"); goto fail; } error = bus_dmamem_alloc(ring->desc_dma_tag, (void **) &ring->desc, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_dma_map); if (error != 0) { device_printf(sc->dev, "could not allocate Tx desc DMA memory\n"); goto fail; } error = bus_dmamap_load(ring->desc_dma_tag, ring->desc_dma_map, ring->desc, (RT_SOFTC_TX_RING_DESC_COUNT * sizeof(struct rt_txdesc)), rt_dma_map_addr, &ring->desc_phys_addr, 0); if (error != 0) { device_printf(sc->dev, "could not load Tx desc DMA map\n"); goto fail; } ring->desc_queued = 0; ring->desc_cur = 0; ring->desc_next = 0; error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE, 1, RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE, 0, NULL, NULL, &ring->seg0_dma_tag); if (error != 0) { device_printf(sc->dev, "could not create Tx seg0 DMA tag\n"); goto fail; } error = bus_dmamem_alloc(ring->seg0_dma_tag, (void **) &ring->seg0, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->seg0_dma_map); if (error != 0) { device_printf(sc->dev, "could not allocate Tx seg0 DMA memory\n"); goto fail; } error = bus_dmamap_load(ring->seg0_dma_tag, ring->seg0_dma_map, ring->seg0, RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE, rt_dma_map_addr, &ring->seg0_phys_addr, 0); if (error != 0) { device_printf(sc->dev, "could not load Tx seg0 DMA map\n"); goto fail; } error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MJUMPAGESIZE, RT_SOFTC_MAX_SCATTER, MJUMPAGESIZE, 0, NULL, NULL, &ring->data_dma_tag); if (error != 0) { device_printf(sc->dev, "could not create Tx data DMA tag\n"); goto fail; } for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) { data = &ring->data[i]; error = bus_dmamap_create(ring->data_dma_tag, 0, &data->dma_map); if (error != 0) { device_printf(sc->dev, "could not create Tx data DMA " "map\n"); goto fail; } } ring->data_queued = 0; ring->data_cur = 0; ring->data_next = 0; ring->qid = qid; return (0); fail: rt_free_tx_ring(sc, ring); return (error); } /* * rt_reset_tx_ring - reset TX ring buffer to empty state */ static void rt_reset_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring) { struct rt_softc_tx_data *data; struct rt_txdesc *desc; int i; for (i = 0; i < RT_SOFTC_TX_RING_DESC_COUNT; i++) { desc = &ring->desc[i]; desc->sdl0 = 0; desc->sdl1 = 0; } ring->desc_queued = 0; ring->desc_cur = 0; ring->desc_next = 0; bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map, BUS_DMASYNC_PREWRITE); for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dma_tag, data->dma_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dma_tag, data->dma_map); m_freem(data->m); data->m = NULL; } } ring->data_queued = 0; ring->data_cur = 0; ring->data_next = 0; } /* * rt_free_tx_ring - free RX ring buffer */ static void rt_free_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring) { struct rt_softc_tx_data *data; int i; if (ring->desc != NULL) { bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->desc_dma_tag, ring->desc_dma_map); bus_dmamem_free(ring->desc_dma_tag, ring->desc, ring->desc_dma_map); } if (ring->desc_dma_tag != NULL) bus_dma_tag_destroy(ring->desc_dma_tag); if (ring->seg0 != NULL) { bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->seg0_dma_tag, ring->seg0_dma_map); bus_dmamem_free(ring->seg0_dma_tag, ring->seg0, ring->seg0_dma_map); } if (ring->seg0_dma_tag != NULL) bus_dma_tag_destroy(ring->seg0_dma_tag); for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dma_tag, data->dma_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dma_tag, data->dma_map); m_freem(data->m); } if (data->dma_map != NULL) bus_dmamap_destroy(ring->data_dma_tag, data->dma_map); } if (ring->data_dma_tag != NULL) bus_dma_tag_destroy(ring->data_dma_tag); mtx_destroy(&ring->lock); } /* * rt_dma_map_addr - get address of busdma segment */ static void rt_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { if (error != 0) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); *(bus_addr_t *) arg = segs[0].ds_addr; } /* * rt_sysctl_attach - attach sysctl nodes for NIC counters. */ static void rt_sysctl_attach(struct rt_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; struct sysctl_oid *stats; ctx = device_get_sysctl_ctx(sc->dev); tree = device_get_sysctl_tree(sc->dev); /* statistic counters */ stats = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "stats", CTLFLAG_RD, 0, "statistic"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "interrupts", CTLFLAG_RD, &sc->interrupts, "all interrupts"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "tx_coherent_interrupts", CTLFLAG_RD, &sc->tx_coherent_interrupts, "Tx coherent interrupts"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx_coherent_interrupts", CTLFLAG_RD, &sc->rx_coherent_interrupts, "Rx coherent interrupts"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx_interrupts", CTLFLAG_RD, &sc->rx_interrupts[0], "Rx interrupts"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx_delay_interrupts", CTLFLAG_RD, &sc->rx_delay_interrupts, "Rx delay interrupts"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "TXQ3_interrupts", CTLFLAG_RD, &sc->tx_interrupts[3], "Tx AC3 interrupts"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "TXQ2_interrupts", CTLFLAG_RD, &sc->tx_interrupts[2], "Tx AC2 interrupts"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "TXQ1_interrupts", CTLFLAG_RD, &sc->tx_interrupts[1], "Tx AC1 interrupts"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "TXQ0_interrupts", CTLFLAG_RD, &sc->tx_interrupts[0], "Tx AC0 interrupts"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "tx_delay_interrupts", CTLFLAG_RD, &sc->tx_delay_interrupts, "Tx delay interrupts"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "TXQ3_desc_queued", CTLFLAG_RD, &sc->tx_ring[3].desc_queued, 0, "Tx AC3 descriptors queued"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "TXQ3_data_queued", CTLFLAG_RD, &sc->tx_ring[3].data_queued, 0, "Tx AC3 data queued"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "TXQ2_desc_queued", CTLFLAG_RD, &sc->tx_ring[2].desc_queued, 0, "Tx AC2 descriptors queued"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "TXQ2_data_queued", CTLFLAG_RD, &sc->tx_ring[2].data_queued, 0, "Tx AC2 data queued"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "TXQ1_desc_queued", CTLFLAG_RD, &sc->tx_ring[1].desc_queued, 0, "Tx AC1 descriptors queued"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "TXQ1_data_queued", CTLFLAG_RD, &sc->tx_ring[1].data_queued, 0, "Tx AC1 data queued"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "TXQ0_desc_queued", CTLFLAG_RD, &sc->tx_ring[0].desc_queued, 0, "Tx AC0 descriptors queued"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "TXQ0_data_queued", CTLFLAG_RD, &sc->tx_ring[0].data_queued, 0, "Tx AC0 data queued"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "TXQ3_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[3], "Tx AC3 data queue full"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "TXQ2_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[2], "Tx AC2 data queue full"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "TXQ1_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[1], "Tx AC1 data queue full"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "TXQ0_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[0], "Tx AC0 data queue full"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "tx_watchdog_timeouts", CTLFLAG_RD, &sc->tx_watchdog_timeouts, "Tx watchdog timeouts"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "tx_defrag_packets", CTLFLAG_RD, &sc->tx_defrag_packets, "Tx defragmented packets"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "no_tx_desc_avail", CTLFLAG_RD, &sc->no_tx_desc_avail, "no Tx descriptors available"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx_mbuf_alloc_errors", CTLFLAG_RD, &sc->rx_mbuf_alloc_errors, "Rx mbuf allocation errors"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx_mbuf_dmamap_errors", CTLFLAG_RD, &sc->rx_mbuf_dmamap_errors, "Rx mbuf DMA mapping errors"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "tx_queue_0_not_empty", CTLFLAG_RD, &sc->tx_queue_not_empty[0], "Tx queue 0 not empty"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "tx_queue_1_not_empty", CTLFLAG_RD, &sc->tx_queue_not_empty[1], "Tx queue 1 not empty"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx_packets", CTLFLAG_RD, &sc->rx_packets, "Rx packets"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx_crc_errors", CTLFLAG_RD, &sc->rx_crc_err, "Rx CRC errors"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx_phy_errors", CTLFLAG_RD, &sc->rx_phy_err, "Rx PHY errors"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx_dup_packets", CTLFLAG_RD, &sc->rx_dup_packets, "Rx duplicate packets"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx_fifo_overflows", CTLFLAG_RD, &sc->rx_fifo_overflows, "Rx FIFO overflows"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx_bytes", CTLFLAG_RD, &sc->rx_bytes, "Rx bytes"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx_long_err", CTLFLAG_RD, &sc->rx_long_err, "Rx too long frame errors"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx_short_err", CTLFLAG_RD, &sc->rx_short_err, "Rx too short frame errors"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "tx_bytes", CTLFLAG_RD, &sc->tx_bytes, "Tx bytes"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "tx_packets", CTLFLAG_RD, &sc->tx_packets, "Tx packets"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "tx_skip", CTLFLAG_RD, &sc->tx_skip, "Tx skip count for GDMA ports"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "tx_collision", CTLFLAG_RD, &sc->tx_collision, "Tx collision count for GDMA ports"); } #if defined(IF_RT_PHY_SUPPORT) || defined(RT_MDIO) /* This code is only work RT2880 and same chip. */ /* TODO: make RT3052 and later support code. But nobody need it? */ static int rt_miibus_readreg(device_t dev, int phy, int reg) { struct rt_softc *sc = device_get_softc(dev); int dat; /* * PSEUDO_PHYAD is a special value for indicate switch attached. * No one PHY use PSEUDO_PHYAD (0x1e) address. */ #ifndef RT_MDIO if (phy == 31) { /* Fake PHY ID for bfeswitch attach */ switch (reg) { case MII_BMSR: return (BMSR_EXTSTAT|BMSR_MEDIAMASK); case MII_PHYIDR1: return (0x40); /* As result of faking */ case MII_PHYIDR2: /* PHY will detect as */ return (0x6250); /* bfeswitch */ } } #endif /* Wait prev command done if any */ while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO); dat = ((phy << MDIO_PHY_ADDR_SHIFT) & MDIO_PHY_ADDR_MASK) | ((reg << MDIO_PHYREG_ADDR_SHIFT) & MDIO_PHYREG_ADDR_MASK); RT_WRITE(sc, MDIO_ACCESS, dat); RT_WRITE(sc, MDIO_ACCESS, dat | MDIO_CMD_ONGO); while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO); return (RT_READ(sc, MDIO_ACCESS) & MDIO_PHY_DATA_MASK); } static int rt_miibus_writereg(device_t dev, int phy, int reg, int val) { struct rt_softc *sc = device_get_softc(dev); int dat; /* Wait prev command done if any */ while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO); dat = MDIO_CMD_WR | ((phy << MDIO_PHY_ADDR_SHIFT) & MDIO_PHY_ADDR_MASK) | ((reg << MDIO_PHYREG_ADDR_SHIFT) & MDIO_PHYREG_ADDR_MASK) | (val & MDIO_PHY_DATA_MASK); RT_WRITE(sc, MDIO_ACCESS, dat); RT_WRITE(sc, MDIO_ACCESS, dat | MDIO_CMD_ONGO); while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO); return (0); } #endif #ifdef IF_RT_PHY_SUPPORT void rt_miibus_statchg(device_t dev) { struct rt_softc *sc = device_get_softc(dev); struct mii_data *mii; mii = device_get_softc(sc->rt_miibus); if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: /* XXX check link here */ sc->flags |= 1; break; default: break; } } } #endif /* IF_RT_PHY_SUPPORT */ static device_method_t rt_dev_methods[] = { DEVMETHOD(device_probe, rt_probe), DEVMETHOD(device_attach, rt_attach), DEVMETHOD(device_detach, rt_detach), DEVMETHOD(device_shutdown, rt_shutdown), DEVMETHOD(device_suspend, rt_suspend), DEVMETHOD(device_resume, rt_resume), #ifdef IF_RT_PHY_SUPPORT /* MII interface */ DEVMETHOD(miibus_readreg, rt_miibus_readreg), DEVMETHOD(miibus_writereg, rt_miibus_writereg), DEVMETHOD(miibus_statchg, rt_miibus_statchg), #endif DEVMETHOD_END }; static driver_t rt_driver = { "rt", rt_dev_methods, sizeof(struct rt_softc) }; static devclass_t rt_dev_class; DRIVER_MODULE(rt, nexus, rt_driver, rt_dev_class, 0, 0); #ifdef FDT DRIVER_MODULE(rt, simplebus, rt_driver, rt_dev_class, 0, 0); #endif MODULE_DEPEND(rt, ether, 1, 1, 1); MODULE_DEPEND(rt, miibus, 1, 1, 1); #ifdef RT_MDIO MODULE_DEPEND(rt, mdio, 1, 1, 1); static int rtmdio_probe(device_t); static int rtmdio_attach(device_t); static int rtmdio_detach(device_t); static struct mtx miibus_mtx; MTX_SYSINIT(miibus_mtx, &miibus_mtx, "rt mii lock", MTX_DEF); /* * Declare an additional, separate driver for accessing the MDIO bus. */ static device_method_t rtmdio_methods[] = { /* Device interface */ DEVMETHOD(device_probe, rtmdio_probe), DEVMETHOD(device_attach, rtmdio_attach), DEVMETHOD(device_detach, rtmdio_detach), /* bus interface */ DEVMETHOD(bus_add_child, device_add_child_ordered), /* MDIO access */ DEVMETHOD(mdio_readreg, rt_miibus_readreg), DEVMETHOD(mdio_writereg, rt_miibus_writereg), }; DEFINE_CLASS_0(rtmdio, rtmdio_driver, rtmdio_methods, sizeof(struct rt_softc)); static devclass_t rtmdio_devclass; DRIVER_MODULE(miiproxy, rt, miiproxy_driver, miiproxy_devclass, 0, 0); DRIVER_MODULE(rtmdio, simplebus, rtmdio_driver, rtmdio_devclass, 0, 0); DRIVER_MODULE(mdio, rtmdio, mdio_driver, mdio_devclass, 0, 0); static int rtmdio_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "ralink,rt2880-mdio")) return (ENXIO); device_set_desc(dev, "FV built-in ethernet interface, MDIO controller"); return(0); } static int rtmdio_attach(device_t dev) { struct rt_softc *sc; int error; sc = device_get_softc(dev); sc->dev = dev; sc->mem_rid = 0; sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, RF_ACTIVE | RF_SHAREABLE); if (sc->mem == NULL) { device_printf(dev, "couldn't map memory\n"); error = ENXIO; goto fail; } sc->bst = rman_get_bustag(sc->mem); sc->bsh = rman_get_bushandle(sc->mem); bus_generic_probe(dev); bus_enumerate_hinted_children(dev); error = bus_generic_attach(dev); fail: return(error); } static int rtmdio_detach(device_t dev) { return(0); } #endif diff --git a/sys/dev/smc/if_smc.c b/sys/dev/smc/if_smc.c index 6b69deb74d99..d3f911d8327c 100644 --- a/sys/dev/smc/if_smc.c +++ b/sys/dev/smc/if_smc.c @@ -1,1329 +1,1329 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2008 Benno Rice. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Driver for SMSC LAN91C111, may work for older variants. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #include #include #endif #include #include #include #include #include #include #include #define SMC_LOCK(sc) mtx_lock(&(sc)->smc_mtx) #define SMC_UNLOCK(sc) mtx_unlock(&(sc)->smc_mtx) #define SMC_ASSERT_LOCKED(sc) mtx_assert(&(sc)->smc_mtx, MA_OWNED) #define SMC_INTR_PRIORITY 0 #define SMC_RX_PRIORITY 5 #define SMC_TX_PRIORITY 10 devclass_t smc_devclass; static const char *smc_chip_ids[16] = { NULL, NULL, NULL, /* 3 */ "SMSC LAN91C90 or LAN91C92", /* 4 */ "SMSC LAN91C94", /* 5 */ "SMSC LAN91C95", /* 6 */ "SMSC LAN91C96", /* 7 */ "SMSC LAN91C100", /* 8 */ "SMSC LAN91C100FD", /* 9 */ "SMSC LAN91C110FD or LAN91C111FD", NULL, NULL, NULL, NULL, NULL, NULL }; static void smc_init(void *); static void smc_start(struct ifnet *); static void smc_stop(struct smc_softc *); static int smc_ioctl(struct ifnet *, u_long, caddr_t); static void smc_init_locked(struct smc_softc *); static void smc_start_locked(struct ifnet *); static void smc_reset(struct smc_softc *); static int smc_mii_ifmedia_upd(struct ifnet *); static void smc_mii_ifmedia_sts(struct ifnet *, struct ifmediareq *); static void smc_mii_tick(void *); static void smc_mii_mediachg(struct smc_softc *); static int smc_mii_mediaioctl(struct smc_softc *, struct ifreq *, u_long); static void smc_task_intr(void *, int); static void smc_task_rx(void *, int); static void smc_task_tx(void *, int); static driver_filter_t smc_intr; static callout_func_t smc_watchdog; #ifdef DEVICE_POLLING static poll_handler_t smc_poll; #endif /* * MII bit-bang glue */ static uint32_t smc_mii_bitbang_read(device_t); static void smc_mii_bitbang_write(device_t, uint32_t); static const struct mii_bitbang_ops smc_mii_bitbang_ops = { smc_mii_bitbang_read, smc_mii_bitbang_write, { MGMT_MDO, /* MII_BIT_MDO */ MGMT_MDI, /* MII_BIT_MDI */ MGMT_MCLK, /* MII_BIT_MDC */ MGMT_MDOE, /* MII_BIT_DIR_HOST_PHY */ 0, /* MII_BIT_DIR_PHY_HOST */ } }; static __inline void smc_select_bank(struct smc_softc *sc, uint16_t bank) { bus_barrier(sc->smc_reg, BSR, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); bus_write_2(sc->smc_reg, BSR, bank & BSR_BANK_MASK); bus_barrier(sc->smc_reg, BSR, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); } /* Never call this when not in bank 2. */ static __inline void smc_mmu_wait(struct smc_softc *sc) { KASSERT((bus_read_2(sc->smc_reg, BSR) & BSR_BANK_MASK) == 2, ("%s: smc_mmu_wait called when not in bank 2", device_get_nameunit(sc->smc_dev))); while (bus_read_2(sc->smc_reg, MMUCR) & MMUCR_BUSY) ; } static __inline uint8_t smc_read_1(struct smc_softc *sc, bus_size_t offset) { return (bus_read_1(sc->smc_reg, offset)); } static __inline void smc_write_1(struct smc_softc *sc, bus_size_t offset, uint8_t val) { bus_write_1(sc->smc_reg, offset, val); } static __inline uint16_t smc_read_2(struct smc_softc *sc, bus_size_t offset) { return (bus_read_2(sc->smc_reg, offset)); } static __inline void smc_write_2(struct smc_softc *sc, bus_size_t offset, uint16_t val) { bus_write_2(sc->smc_reg, offset, val); } static __inline void smc_read_multi_2(struct smc_softc *sc, bus_size_t offset, uint16_t *datap, bus_size_t count) { bus_read_multi_2(sc->smc_reg, offset, datap, count); } static __inline void smc_write_multi_2(struct smc_softc *sc, bus_size_t offset, uint16_t *datap, bus_size_t count) { bus_write_multi_2(sc->smc_reg, offset, datap, count); } static __inline void smc_barrier(struct smc_softc *sc, bus_size_t offset, bus_size_t length, int flags) { bus_barrier(sc->smc_reg, offset, length, flags); } int smc_probe(device_t dev) { int rid, type, error; uint16_t val; struct smc_softc *sc; struct resource *reg; sc = device_get_softc(dev); rid = 0; type = SYS_RES_IOPORT; error = 0; if (sc->smc_usemem) type = SYS_RES_MEMORY; reg = bus_alloc_resource_anywhere(dev, type, &rid, 16, RF_ACTIVE); if (reg == NULL) { if (bootverbose) device_printf(dev, "could not allocate I/O resource for probe\n"); return (ENXIO); } /* Check for the identification value in the BSR. */ val = bus_read_2(reg, BSR); if ((val & BSR_IDENTIFY_MASK) != BSR_IDENTIFY) { if (bootverbose) device_printf(dev, "identification value not in BSR\n"); error = ENXIO; goto done; } /* * Try switching banks and make sure we still get the identification * value. */ bus_write_2(reg, BSR, 0); val = bus_read_2(reg, BSR); if ((val & BSR_IDENTIFY_MASK) != BSR_IDENTIFY) { if (bootverbose) device_printf(dev, "identification value not in BSR after write\n"); error = ENXIO; goto done; } #if 0 /* Check the BAR. */ bus_write_2(reg, BSR, 1); val = bus_read_2(reg, BAR); val = BAR_ADDRESS(val); if (rman_get_start(reg) != val) { if (bootverbose) device_printf(dev, "BAR address %x does not match " "I/O resource address %lx\n", val, rman_get_start(reg)); error = ENXIO; goto done; } #endif /* Compare REV against known chip revisions. */ bus_write_2(reg, BSR, 3); val = bus_read_2(reg, REV); val = (val & REV_CHIP_MASK) >> REV_CHIP_SHIFT; if (smc_chip_ids[val] == NULL) { if (bootverbose) device_printf(dev, "Unknown chip revision: %d\n", val); error = ENXIO; goto done; } device_set_desc(dev, smc_chip_ids[val]); done: bus_release_resource(dev, type, rid, reg); return (error); } int smc_attach(device_t dev) { int type, error; uint16_t val; u_char eaddr[ETHER_ADDR_LEN]; struct smc_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); error = 0; sc->smc_dev = dev; ifp = sc->smc_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { error = ENOSPC; goto done; } mtx_init(&sc->smc_mtx, device_get_nameunit(dev), NULL, MTX_DEF); /* Set up watchdog callout. */ callout_init_mtx(&sc->smc_watchdog, &sc->smc_mtx, 0); type = SYS_RES_IOPORT; if (sc->smc_usemem) type = SYS_RES_MEMORY; sc->smc_reg_rid = 0; sc->smc_reg = bus_alloc_resource_anywhere(dev, type, &sc->smc_reg_rid, 16, RF_ACTIVE); if (sc->smc_reg == NULL) { error = ENXIO; goto done; } sc->smc_irq = bus_alloc_resource_anywhere(dev, SYS_RES_IRQ, &sc->smc_irq_rid, 1, RF_ACTIVE | RF_SHAREABLE); if (sc->smc_irq == NULL) { error = ENXIO; goto done; } SMC_LOCK(sc); smc_reset(sc); SMC_UNLOCK(sc); smc_select_bank(sc, 3); val = smc_read_2(sc, REV); sc->smc_chip = (val & REV_CHIP_MASK) >> REV_CHIP_SHIFT; sc->smc_rev = (val * REV_REV_MASK) >> REV_REV_SHIFT; if (bootverbose) device_printf(dev, "revision %x\n", sc->smc_rev); callout_init_mtx(&sc->smc_mii_tick_ch, &sc->smc_mtx, CALLOUT_RETURNUNLOCKED); if (sc->smc_chip >= REV_CHIP_91110FD) { (void)mii_attach(dev, &sc->smc_miibus, ifp, smc_mii_ifmedia_upd, smc_mii_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); if (sc->smc_miibus != NULL) { sc->smc_mii_tick = smc_mii_tick; sc->smc_mii_mediachg = smc_mii_mediachg; sc->smc_mii_mediaioctl = smc_mii_mediaioctl; } } smc_select_bank(sc, 1); eaddr[0] = smc_read_1(sc, IAR0); eaddr[1] = smc_read_1(sc, IAR1); eaddr[2] = smc_read_1(sc, IAR2); eaddr[3] = smc_read_1(sc, IAR3); eaddr[4] = smc_read_1(sc, IAR4); eaddr[5] = smc_read_1(sc, IAR5); if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_init = smc_init; ifp->if_ioctl = smc_ioctl; ifp->if_start = smc_start; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); IFQ_SET_READY(&ifp->if_snd); ifp->if_capabilities = ifp->if_capenable = 0; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif ether_ifattach(ifp, eaddr); /* Set up taskqueue */ TASK_INIT(&sc->smc_intr, SMC_INTR_PRIORITY, smc_task_intr, ifp); - TASK_INIT(&sc->smc_rx, SMC_RX_PRIORITY, smc_task_rx, ifp); + NET_TASK_INIT(&sc->smc_rx, SMC_RX_PRIORITY, smc_task_rx, ifp); TASK_INIT(&sc->smc_tx, SMC_TX_PRIORITY, smc_task_tx, ifp); sc->smc_tq = taskqueue_create_fast("smc_taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->smc_tq); taskqueue_start_threads(&sc->smc_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->smc_dev)); /* Mask all interrupts. */ sc->smc_mask = 0; smc_write_1(sc, MSK, 0); /* Wire up interrupt */ error = bus_setup_intr(dev, sc->smc_irq, INTR_TYPE_NET|INTR_MPSAFE, smc_intr, NULL, sc, &sc->smc_ih); if (error != 0) goto done; done: if (error != 0) smc_detach(dev); return (error); } int smc_detach(device_t dev) { int type; struct smc_softc *sc; sc = device_get_softc(dev); SMC_LOCK(sc); smc_stop(sc); SMC_UNLOCK(sc); if (sc->smc_ifp != NULL) { ether_ifdetach(sc->smc_ifp); } callout_drain(&sc->smc_watchdog); callout_drain(&sc->smc_mii_tick_ch); #ifdef DEVICE_POLLING if (sc->smc_ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(sc->smc_ifp); #endif if (sc->smc_ih != NULL) bus_teardown_intr(sc->smc_dev, sc->smc_irq, sc->smc_ih); if (sc->smc_tq != NULL) { taskqueue_drain(sc->smc_tq, &sc->smc_intr); taskqueue_drain(sc->smc_tq, &sc->smc_rx); taskqueue_drain(sc->smc_tq, &sc->smc_tx); taskqueue_free(sc->smc_tq); sc->smc_tq = NULL; } if (sc->smc_ifp != NULL) { if_free(sc->smc_ifp); } if (sc->smc_miibus != NULL) { device_delete_child(sc->smc_dev, sc->smc_miibus); bus_generic_detach(sc->smc_dev); } if (sc->smc_reg != NULL) { type = SYS_RES_IOPORT; if (sc->smc_usemem) type = SYS_RES_MEMORY; bus_release_resource(sc->smc_dev, type, sc->smc_reg_rid, sc->smc_reg); } if (sc->smc_irq != NULL) bus_release_resource(sc->smc_dev, SYS_RES_IRQ, sc->smc_irq_rid, sc->smc_irq); if (mtx_initialized(&sc->smc_mtx)) mtx_destroy(&sc->smc_mtx); return (0); } static void smc_start(struct ifnet *ifp) { struct smc_softc *sc; sc = ifp->if_softc; SMC_LOCK(sc); smc_start_locked(ifp); SMC_UNLOCK(sc); } static void smc_start_locked(struct ifnet *ifp) { struct smc_softc *sc; struct mbuf *m; u_int len, npages, spin_count; sc = ifp->if_softc; SMC_ASSERT_LOCKED(sc); if (ifp->if_drv_flags & IFF_DRV_OACTIVE) return; if (IFQ_IS_EMPTY(&ifp->if_snd)) return; /* * Grab the next packet. If it's too big, drop it. */ IFQ_DRV_DEQUEUE(&ifp->if_snd, m); len = m_length(m, NULL); len += (len & 1); if (len > ETHER_MAX_LEN - ETHER_CRC_LEN) { if_printf(ifp, "large packet discarded\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); m_freem(m); return; /* XXX readcheck? */ } /* * Flag that we're busy. */ ifp->if_drv_flags |= IFF_DRV_OACTIVE; sc->smc_pending = m; /* * Work out how many 256 byte "pages" we need. We have to include the * control data for the packet in this calculation. */ npages = (len + PKT_CTRL_DATA_LEN) >> 8; if (npages == 0) npages = 1; /* * Request memory. */ smc_select_bank(sc, 2); smc_mmu_wait(sc); smc_write_2(sc, MMUCR, MMUCR_CMD_TX_ALLOC | npages); /* * Spin briefly to see if the allocation succeeds. */ spin_count = TX_ALLOC_WAIT_TIME; do { if (smc_read_1(sc, IST) & ALLOC_INT) { smc_write_1(sc, ACK, ALLOC_INT); break; } } while (--spin_count); /* * If the allocation is taking too long, unmask the alloc interrupt * and wait. */ if (spin_count == 0) { sc->smc_mask |= ALLOC_INT; if ((ifp->if_capenable & IFCAP_POLLING) == 0) smc_write_1(sc, MSK, sc->smc_mask); return; } taskqueue_enqueue(sc->smc_tq, &sc->smc_tx); } static void smc_task_tx(void *context, int pending) { struct ifnet *ifp; struct smc_softc *sc; struct mbuf *m, *m0; u_int packet, len; int last_len; uint8_t *data; (void)pending; ifp = (struct ifnet *)context; sc = ifp->if_softc; SMC_LOCK(sc); if (sc->smc_pending == NULL) { SMC_UNLOCK(sc); goto next_packet; } m = m0 = sc->smc_pending; sc->smc_pending = NULL; smc_select_bank(sc, 2); /* * Check the allocation result. */ packet = smc_read_1(sc, ARR); /* * If the allocation failed, requeue the packet and retry. */ if (packet & ARR_FAILED) { IFQ_DRV_PREPEND(&ifp->if_snd, m); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; smc_start_locked(ifp); SMC_UNLOCK(sc); return; } /* * Tell the device to write to our packet number. */ smc_write_1(sc, PNR, packet); smc_write_2(sc, PTR, 0 | PTR_AUTO_INCR); /* * Tell the device how long the packet is (including control data). */ len = m_length(m, 0); len += PKT_CTRL_DATA_LEN; smc_write_2(sc, DATA0, 0); smc_write_2(sc, DATA0, len); /* * Push the data out to the device. */ data = NULL; last_len = 0; for (; m != NULL; m = m->m_next) { data = mtod(m, uint8_t *); smc_write_multi_2(sc, DATA0, (uint16_t *)data, m->m_len / 2); last_len = m->m_len; } /* * Push out the control byte and and the odd byte if needed. */ if ((len & 1) != 0 && data != NULL) smc_write_2(sc, DATA0, (CTRL_ODD << 8) | data[last_len - 1]); else smc_write_2(sc, DATA0, 0); /* * Unmask the TX empty interrupt. */ sc->smc_mask |= TX_EMPTY_INT; if ((ifp->if_capenable & IFCAP_POLLING) == 0) smc_write_1(sc, MSK, sc->smc_mask); /* * Enqueue the packet. */ smc_mmu_wait(sc); smc_write_2(sc, MMUCR, MMUCR_CMD_ENQUEUE); callout_reset(&sc->smc_watchdog, hz * 2, smc_watchdog, sc); /* * Finish up. */ if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; SMC_UNLOCK(sc); BPF_MTAP(ifp, m0); m_freem(m0); next_packet: /* * See if there's anything else to do. */ smc_start(ifp); } static void smc_task_rx(void *context, int pending) { u_int packet, status, len; uint8_t *data; struct ifnet *ifp; struct smc_softc *sc; struct mbuf *m, *mhead, *mtail; (void)pending; ifp = (struct ifnet *)context; sc = ifp->if_softc; mhead = mtail = NULL; SMC_LOCK(sc); packet = smc_read_1(sc, FIFO_RX); while ((packet & FIFO_EMPTY) == 0) { /* * Grab an mbuf and attach a cluster. */ MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) { break; } if (!(MCLGET(m, M_NOWAIT))) { m_freem(m); break; } /* * Point to the start of the packet. */ smc_select_bank(sc, 2); smc_write_1(sc, PNR, packet); smc_write_2(sc, PTR, 0 | PTR_READ | PTR_RCV | PTR_AUTO_INCR); /* * Grab status and packet length. */ status = smc_read_2(sc, DATA0); len = smc_read_2(sc, DATA0) & RX_LEN_MASK; len -= 6; if (status & RX_ODDFRM) len += 1; /* * Check for errors. */ if (status & (RX_TOOSHORT | RX_TOOLNG | RX_BADCRC | RX_ALGNERR)) { smc_mmu_wait(sc); smc_write_2(sc, MMUCR, MMUCR_CMD_RELEASE); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); m_freem(m); break; } /* * Set the mbuf up the way we want it. */ m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = len + 2; /* XXX: Is this right? */ m_adj(m, ETHER_ALIGN); /* * Pull the packet out of the device. Make sure we're in the * right bank first as things may have changed while we were * allocating our mbuf. */ smc_select_bank(sc, 2); smc_write_1(sc, PNR, packet); smc_write_2(sc, PTR, 4 | PTR_READ | PTR_RCV | PTR_AUTO_INCR); data = mtod(m, uint8_t *); smc_read_multi_2(sc, DATA0, (uint16_t *)data, len >> 1); if (len & 1) { data += len & ~1; *data = smc_read_1(sc, DATA0); } /* * Tell the device we're done. */ smc_mmu_wait(sc); smc_write_2(sc, MMUCR, MMUCR_CMD_RELEASE); if (m == NULL) { break; } if (mhead == NULL) { mhead = mtail = m; m->m_next = NULL; } else { mtail->m_next = m; mtail = m; } packet = smc_read_1(sc, FIFO_RX); } sc->smc_mask |= RCV_INT; if ((ifp->if_capenable & IFCAP_POLLING) == 0) smc_write_1(sc, MSK, sc->smc_mask); SMC_UNLOCK(sc); while (mhead != NULL) { m = mhead; mhead = mhead->m_next; m->m_next = NULL; if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); (*ifp->if_input)(ifp, m); } } #ifdef DEVICE_POLLING static int smc_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct smc_softc *sc; sc = ifp->if_softc; SMC_LOCK(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { SMC_UNLOCK(sc); return (0); } SMC_UNLOCK(sc); if (cmd == POLL_AND_CHECK_STATUS) taskqueue_enqueue(sc->smc_tq, &sc->smc_intr); return (0); } #endif static int smc_intr(void *context) { struct smc_softc *sc; uint32_t curbank; sc = (struct smc_softc *)context; /* * Save current bank and restore later in this function */ curbank = (smc_read_2(sc, BSR) & BSR_BANK_MASK); /* * Block interrupts in order to let smc_task_intr to kick in */ smc_select_bank(sc, 2); smc_write_1(sc, MSK, 0); /* Restore bank */ smc_select_bank(sc, curbank); taskqueue_enqueue(sc->smc_tq, &sc->smc_intr); return (FILTER_HANDLED); } static void smc_task_intr(void *context, int pending) { struct smc_softc *sc; struct ifnet *ifp; u_int status, packet, counter, tcr; (void)pending; ifp = (struct ifnet *)context; sc = ifp->if_softc; SMC_LOCK(sc); smc_select_bank(sc, 2); /* * Find out what interrupts are flagged. */ status = smc_read_1(sc, IST) & sc->smc_mask; /* * Transmit error */ if (status & TX_INT) { /* * Kill off the packet if there is one and re-enable transmit. */ packet = smc_read_1(sc, FIFO_TX); if ((packet & FIFO_EMPTY) == 0) { callout_stop(&sc->smc_watchdog); smc_select_bank(sc, 2); smc_write_1(sc, PNR, packet); smc_write_2(sc, PTR, 0 | PTR_READ | PTR_AUTO_INCR); smc_select_bank(sc, 0); tcr = smc_read_2(sc, EPHSR); #if 0 if ((tcr & EPHSR_TX_SUC) == 0) device_printf(sc->smc_dev, "bad packet\n"); #endif smc_select_bank(sc, 2); smc_mmu_wait(sc); smc_write_2(sc, MMUCR, MMUCR_CMD_RELEASE_PKT); smc_select_bank(sc, 0); tcr = smc_read_2(sc, TCR); tcr |= TCR_TXENA | TCR_PAD_EN; smc_write_2(sc, TCR, tcr); smc_select_bank(sc, 2); taskqueue_enqueue(sc->smc_tq, &sc->smc_tx); } /* * Ack the interrupt. */ smc_write_1(sc, ACK, TX_INT); } /* * Receive */ if (status & RCV_INT) { smc_write_1(sc, ACK, RCV_INT); sc->smc_mask &= ~RCV_INT; taskqueue_enqueue(sc->smc_tq, &sc->smc_rx); } /* * Allocation */ if (status & ALLOC_INT) { smc_write_1(sc, ACK, ALLOC_INT); sc->smc_mask &= ~ALLOC_INT; taskqueue_enqueue(sc->smc_tq, &sc->smc_tx); } /* * Receive overrun */ if (status & RX_OVRN_INT) { smc_write_1(sc, ACK, RX_OVRN_INT); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); } /* * Transmit empty */ if (status & TX_EMPTY_INT) { smc_write_1(sc, ACK, TX_EMPTY_INT); sc->smc_mask &= ~TX_EMPTY_INT; callout_stop(&sc->smc_watchdog); /* * Update collision stats. */ smc_select_bank(sc, 0); counter = smc_read_2(sc, ECR); smc_select_bank(sc, 2); if_inc_counter(ifp, IFCOUNTER_COLLISIONS, ((counter & ECR_SNGLCOL_MASK) >> ECR_SNGLCOL_SHIFT) + ((counter & ECR_MULCOL_MASK) >> ECR_MULCOL_SHIFT)); /* * See if there are any packets to transmit. */ taskqueue_enqueue(sc->smc_tq, &sc->smc_tx); } /* * Update the interrupt mask. */ smc_select_bank(sc, 2); if ((ifp->if_capenable & IFCAP_POLLING) == 0) smc_write_1(sc, MSK, sc->smc_mask); SMC_UNLOCK(sc); } static uint32_t smc_mii_bitbang_read(device_t dev) { struct smc_softc *sc; uint32_t val; sc = device_get_softc(dev); SMC_ASSERT_LOCKED(sc); KASSERT((smc_read_2(sc, BSR) & BSR_BANK_MASK) == 3, ("%s: smc_mii_bitbang_read called with bank %d (!= 3)", device_get_nameunit(sc->smc_dev), smc_read_2(sc, BSR) & BSR_BANK_MASK)); val = smc_read_2(sc, MGMT); smc_barrier(sc, MGMT, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); return (val); } static void smc_mii_bitbang_write(device_t dev, uint32_t val) { struct smc_softc *sc; sc = device_get_softc(dev); SMC_ASSERT_LOCKED(sc); KASSERT((smc_read_2(sc, BSR) & BSR_BANK_MASK) == 3, ("%s: smc_mii_bitbang_write called with bank %d (!= 3)", device_get_nameunit(sc->smc_dev), smc_read_2(sc, BSR) & BSR_BANK_MASK)); smc_write_2(sc, MGMT, val); smc_barrier(sc, MGMT, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); } int smc_miibus_readreg(device_t dev, int phy, int reg) { struct smc_softc *sc; int val; sc = device_get_softc(dev); SMC_LOCK(sc); smc_select_bank(sc, 3); val = mii_bitbang_readreg(dev, &smc_mii_bitbang_ops, phy, reg); SMC_UNLOCK(sc); return (val); } int smc_miibus_writereg(device_t dev, int phy, int reg, int data) { struct smc_softc *sc; sc = device_get_softc(dev); SMC_LOCK(sc); smc_select_bank(sc, 3); mii_bitbang_writereg(dev, &smc_mii_bitbang_ops, phy, reg, data); SMC_UNLOCK(sc); return (0); } void smc_miibus_statchg(device_t dev) { struct smc_softc *sc; struct mii_data *mii; uint16_t tcr; sc = device_get_softc(dev); mii = device_get_softc(sc->smc_miibus); SMC_LOCK(sc); smc_select_bank(sc, 0); tcr = smc_read_2(sc, TCR); if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) tcr |= TCR_SWFDUP; else tcr &= ~TCR_SWFDUP; smc_write_2(sc, TCR, tcr); SMC_UNLOCK(sc); } static int smc_mii_ifmedia_upd(struct ifnet *ifp) { struct smc_softc *sc; struct mii_data *mii; sc = ifp->if_softc; if (sc->smc_miibus == NULL) return (ENXIO); mii = device_get_softc(sc->smc_miibus); return (mii_mediachg(mii)); } static void smc_mii_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct smc_softc *sc; struct mii_data *mii; sc = ifp->if_softc; if (sc->smc_miibus == NULL) return; mii = device_get_softc(sc->smc_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } static void smc_mii_tick(void *context) { struct smc_softc *sc; sc = (struct smc_softc *)context; if (sc->smc_miibus == NULL) return; SMC_UNLOCK(sc); mii_tick(device_get_softc(sc->smc_miibus)); callout_reset(&sc->smc_mii_tick_ch, hz, smc_mii_tick, sc); } static void smc_mii_mediachg(struct smc_softc *sc) { if (sc->smc_miibus == NULL) return; mii_mediachg(device_get_softc(sc->smc_miibus)); } static int smc_mii_mediaioctl(struct smc_softc *sc, struct ifreq *ifr, u_long command) { struct mii_data *mii; if (sc->smc_miibus == NULL) return (EINVAL); mii = device_get_softc(sc->smc_miibus); return (ifmedia_ioctl(sc->smc_ifp, ifr, &mii->mii_media, command)); } static void smc_reset(struct smc_softc *sc) { u_int ctr; SMC_ASSERT_LOCKED(sc); smc_select_bank(sc, 2); /* * Mask all interrupts. */ smc_write_1(sc, MSK, 0); /* * Tell the device to reset. */ smc_select_bank(sc, 0); smc_write_2(sc, RCR, RCR_SOFT_RST); /* * Set up the configuration register. */ smc_select_bank(sc, 1); smc_write_2(sc, CR, CR_EPH_POWER_EN); DELAY(1); /* * Turn off transmit and receive. */ smc_select_bank(sc, 0); smc_write_2(sc, TCR, 0); smc_write_2(sc, RCR, 0); /* * Set up the control register. */ smc_select_bank(sc, 1); ctr = smc_read_2(sc, CTR); ctr |= CTR_LE_ENABLE | CTR_AUTO_RELEASE; smc_write_2(sc, CTR, ctr); /* * Reset the MMU. */ smc_select_bank(sc, 2); smc_mmu_wait(sc); smc_write_2(sc, MMUCR, MMUCR_CMD_MMU_RESET); } static void smc_enable(struct smc_softc *sc) { struct ifnet *ifp; SMC_ASSERT_LOCKED(sc); ifp = sc->smc_ifp; /* * Set up the receive/PHY control register. */ smc_select_bank(sc, 0); smc_write_2(sc, RPCR, RPCR_ANEG | (RPCR_LED_LINK_ANY << RPCR_LSA_SHIFT) | (RPCR_LED_ACT_ANY << RPCR_LSB_SHIFT)); /* * Set up the transmit and receive control registers. */ smc_write_2(sc, TCR, TCR_TXENA | TCR_PAD_EN); smc_write_2(sc, RCR, RCR_RXEN | RCR_STRIP_CRC); /* * Set up the interrupt mask. */ smc_select_bank(sc, 2); sc->smc_mask = EPH_INT | RX_OVRN_INT | RCV_INT | TX_INT; if ((ifp->if_capenable & IFCAP_POLLING) != 0) smc_write_1(sc, MSK, sc->smc_mask); } static void smc_stop(struct smc_softc *sc) { SMC_ASSERT_LOCKED(sc); /* * Turn off callouts. */ callout_stop(&sc->smc_watchdog); callout_stop(&sc->smc_mii_tick_ch); /* * Mask all interrupts. */ smc_select_bank(sc, 2); sc->smc_mask = 0; smc_write_1(sc, MSK, 0); #ifdef DEVICE_POLLING ether_poll_deregister(sc->smc_ifp); sc->smc_ifp->if_capenable &= ~IFCAP_POLLING; #endif /* * Disable transmit and receive. */ smc_select_bank(sc, 0); smc_write_2(sc, TCR, 0); smc_write_2(sc, RCR, 0); sc->smc_ifp->if_drv_flags &= ~IFF_DRV_RUNNING; } static void smc_watchdog(void *arg) { struct smc_softc *sc; sc = (struct smc_softc *)arg; device_printf(sc->smc_dev, "watchdog timeout\n"); taskqueue_enqueue(sc->smc_tq, &sc->smc_intr); } static void smc_init(void *context) { struct smc_softc *sc; sc = (struct smc_softc *)context; SMC_LOCK(sc); smc_init_locked(sc); SMC_UNLOCK(sc); } static void smc_init_locked(struct smc_softc *sc) { struct ifnet *ifp; SMC_ASSERT_LOCKED(sc); ifp = sc->smc_ifp; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) return; smc_reset(sc); smc_enable(sc); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; smc_start_locked(ifp); if (sc->smc_mii_tick != NULL) callout_reset(&sc->smc_mii_tick_ch, hz, sc->smc_mii_tick, sc); #ifdef DEVICE_POLLING SMC_UNLOCK(sc); ether_poll_register(smc_poll, ifp); SMC_LOCK(sc); ifp->if_capenable |= IFCAP_POLLING; #endif } static int smc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct smc_softc *sc; int error; sc = ifp->if_softc; error = 0; switch (cmd) { case SIOCSIFFLAGS: if ((ifp->if_flags & IFF_UP) == 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { SMC_LOCK(sc); smc_stop(sc); SMC_UNLOCK(sc); } else { smc_init(sc); if (sc->smc_mii_mediachg != NULL) sc->smc_mii_mediachg(sc); } break; case SIOCADDMULTI: case SIOCDELMULTI: /* XXX SMC_LOCK(sc); smc_setmcast(sc); SMC_UNLOCK(sc); */ error = EINVAL; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: if (sc->smc_mii_mediaioctl == NULL) { error = EINVAL; break; } sc->smc_mii_mediaioctl(sc, (struct ifreq *)data, cmd); break; default: error = ether_ioctl(ifp, cmd, data); break; } return (error); } diff --git a/sys/dev/virtio/network/if_vtnet.c b/sys/dev/virtio/network/if_vtnet.c index 697e8de8fe32..ceb3ffaaf2b4 100644 --- a/sys/dev/virtio/network/if_vtnet.c +++ b/sys/dev/virtio/network/if_vtnet.c @@ -1,4090 +1,4090 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2011, Bryan Venteicher * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Driver for VirtIO network devices. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "virtio_if.h" #include "opt_inet.h" #include "opt_inet6.h" static int vtnet_modevent(module_t, int, void *); static int vtnet_probe(device_t); static int vtnet_attach(device_t); static int vtnet_detach(device_t); static int vtnet_suspend(device_t); static int vtnet_resume(device_t); static int vtnet_shutdown(device_t); static int vtnet_attach_completed(device_t); static int vtnet_config_change(device_t); static void vtnet_negotiate_features(struct vtnet_softc *); static void vtnet_setup_features(struct vtnet_softc *); static int vtnet_init_rxq(struct vtnet_softc *, int); static int vtnet_init_txq(struct vtnet_softc *, int); static int vtnet_alloc_rxtx_queues(struct vtnet_softc *); static void vtnet_free_rxtx_queues(struct vtnet_softc *); static int vtnet_alloc_rx_filters(struct vtnet_softc *); static void vtnet_free_rx_filters(struct vtnet_softc *); static int vtnet_alloc_virtqueues(struct vtnet_softc *); static int vtnet_setup_interface(struct vtnet_softc *); static int vtnet_change_mtu(struct vtnet_softc *, int); static int vtnet_ioctl(struct ifnet *, u_long, caddr_t); static uint64_t vtnet_get_counter(struct ifnet *, ift_counter); static int vtnet_rxq_populate(struct vtnet_rxq *); static void vtnet_rxq_free_mbufs(struct vtnet_rxq *); static struct mbuf * vtnet_rx_alloc_buf(struct vtnet_softc *, int , struct mbuf **); static int vtnet_rxq_replace_lro_nomgr_buf(struct vtnet_rxq *, struct mbuf *, int); static int vtnet_rxq_replace_buf(struct vtnet_rxq *, struct mbuf *, int); static int vtnet_rxq_enqueue_buf(struct vtnet_rxq *, struct mbuf *); static int vtnet_rxq_new_buf(struct vtnet_rxq *); static int vtnet_rxq_csum(struct vtnet_rxq *, struct mbuf *, struct virtio_net_hdr *); static void vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *, int); static void vtnet_rxq_discard_buf(struct vtnet_rxq *, struct mbuf *); static int vtnet_rxq_merged_eof(struct vtnet_rxq *, struct mbuf *, int); static void vtnet_rxq_input(struct vtnet_rxq *, struct mbuf *, struct virtio_net_hdr *); static int vtnet_rxq_eof(struct vtnet_rxq *); static void vtnet_rx_vq_intr(void *); static void vtnet_rxq_tq_intr(void *, int); static int vtnet_txq_below_threshold(struct vtnet_txq *); static int vtnet_txq_notify(struct vtnet_txq *); static void vtnet_txq_free_mbufs(struct vtnet_txq *); static int vtnet_txq_offload_ctx(struct vtnet_txq *, struct mbuf *, int *, int *, int *); static int vtnet_txq_offload_tso(struct vtnet_txq *, struct mbuf *, int, int, struct virtio_net_hdr *); static struct mbuf * vtnet_txq_offload(struct vtnet_txq *, struct mbuf *, struct virtio_net_hdr *); static int vtnet_txq_enqueue_buf(struct vtnet_txq *, struct mbuf **, struct vtnet_tx_header *); static int vtnet_txq_encap(struct vtnet_txq *, struct mbuf **, int); #ifdef VTNET_LEGACY_TX static void vtnet_start_locked(struct vtnet_txq *, struct ifnet *); static void vtnet_start(struct ifnet *); #else static int vtnet_txq_mq_start_locked(struct vtnet_txq *, struct mbuf *); static int vtnet_txq_mq_start(struct ifnet *, struct mbuf *); static void vtnet_txq_tq_deferred(void *, int); #endif static void vtnet_txq_start(struct vtnet_txq *); static void vtnet_txq_tq_intr(void *, int); static int vtnet_txq_eof(struct vtnet_txq *); static void vtnet_tx_vq_intr(void *); static void vtnet_tx_start_all(struct vtnet_softc *); #ifndef VTNET_LEGACY_TX static void vtnet_qflush(struct ifnet *); #endif static int vtnet_watchdog(struct vtnet_txq *); static void vtnet_accum_stats(struct vtnet_softc *, struct vtnet_rxq_stats *, struct vtnet_txq_stats *); static void vtnet_tick(void *); static void vtnet_start_taskqueues(struct vtnet_softc *); static void vtnet_free_taskqueues(struct vtnet_softc *); static void vtnet_drain_taskqueues(struct vtnet_softc *); static void vtnet_drain_rxtx_queues(struct vtnet_softc *); static void vtnet_stop_rendezvous(struct vtnet_softc *); static void vtnet_stop(struct vtnet_softc *); static int vtnet_virtio_reinit(struct vtnet_softc *); static void vtnet_init_rx_filters(struct vtnet_softc *); static int vtnet_init_rx_queues(struct vtnet_softc *); static int vtnet_init_tx_queues(struct vtnet_softc *); static int vtnet_init_rxtx_queues(struct vtnet_softc *); static void vtnet_set_active_vq_pairs(struct vtnet_softc *); static int vtnet_reinit(struct vtnet_softc *); static void vtnet_init_locked(struct vtnet_softc *); static void vtnet_init(void *); static void vtnet_free_ctrl_vq(struct vtnet_softc *); static void vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *, struct sglist *, int, int); static int vtnet_ctrl_mac_cmd(struct vtnet_softc *, uint8_t *); static int vtnet_ctrl_mq_cmd(struct vtnet_softc *, uint16_t); static int vtnet_ctrl_rx_cmd(struct vtnet_softc *, int, int); static int vtnet_set_promisc(struct vtnet_softc *, int); static int vtnet_set_allmulti(struct vtnet_softc *, int); static void vtnet_attach_disable_promisc(struct vtnet_softc *); static void vtnet_rx_filter(struct vtnet_softc *); static void vtnet_rx_filter_mac(struct vtnet_softc *); static int vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t); static void vtnet_rx_filter_vlan(struct vtnet_softc *); static void vtnet_update_vlan_filter(struct vtnet_softc *, int, uint16_t); static void vtnet_register_vlan(void *, struct ifnet *, uint16_t); static void vtnet_unregister_vlan(void *, struct ifnet *, uint16_t); static int vtnet_is_link_up(struct vtnet_softc *); static void vtnet_update_link_status(struct vtnet_softc *); static int vtnet_ifmedia_upd(struct ifnet *); static void vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *); static void vtnet_get_hwaddr(struct vtnet_softc *); static void vtnet_set_hwaddr(struct vtnet_softc *); static void vtnet_vlan_tag_remove(struct mbuf *); static void vtnet_set_rx_process_limit(struct vtnet_softc *); static void vtnet_set_tx_intr_threshold(struct vtnet_softc *); static void vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *, struct sysctl_oid_list *, struct vtnet_rxq *); static void vtnet_setup_txq_sysctl(struct sysctl_ctx_list *, struct sysctl_oid_list *, struct vtnet_txq *); static void vtnet_setup_queue_sysctl(struct vtnet_softc *); static void vtnet_setup_sysctl(struct vtnet_softc *); static int vtnet_rxq_enable_intr(struct vtnet_rxq *); static void vtnet_rxq_disable_intr(struct vtnet_rxq *); static int vtnet_txq_enable_intr(struct vtnet_txq *); static void vtnet_txq_disable_intr(struct vtnet_txq *); static void vtnet_enable_rx_interrupts(struct vtnet_softc *); static void vtnet_enable_tx_interrupts(struct vtnet_softc *); static void vtnet_enable_interrupts(struct vtnet_softc *); static void vtnet_disable_rx_interrupts(struct vtnet_softc *); static void vtnet_disable_tx_interrupts(struct vtnet_softc *); static void vtnet_disable_interrupts(struct vtnet_softc *); static int vtnet_tunable_int(struct vtnet_softc *, const char *, int); DEBUGNET_DEFINE(vtnet); /* Tunables. */ static SYSCTL_NODE(_hw, OID_AUTO, vtnet, CTLFLAG_RD, 0, "VNET driver parameters"); static int vtnet_csum_disable = 0; TUNABLE_INT("hw.vtnet.csum_disable", &vtnet_csum_disable); SYSCTL_INT(_hw_vtnet, OID_AUTO, csum_disable, CTLFLAG_RDTUN, &vtnet_csum_disable, 0, "Disables receive and send checksum offload"); static int vtnet_tso_disable = 0; TUNABLE_INT("hw.vtnet.tso_disable", &vtnet_tso_disable); SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_disable, CTLFLAG_RDTUN, &vtnet_tso_disable, 0, "Disables TCP Segmentation Offload"); static int vtnet_lro_disable = 0; TUNABLE_INT("hw.vtnet.lro_disable", &vtnet_lro_disable); SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_disable, CTLFLAG_RDTUN, &vtnet_lro_disable, 0, "Disables TCP Large Receive Offload"); static int vtnet_mq_disable = 0; TUNABLE_INT("hw.vtnet.mq_disable", &vtnet_mq_disable); SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_disable, CTLFLAG_RDTUN, &vtnet_mq_disable, 0, "Disables Multi Queue support"); static int vtnet_mq_max_pairs = VTNET_MAX_QUEUE_PAIRS; TUNABLE_INT("hw.vtnet.mq_max_pairs", &vtnet_mq_max_pairs); SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_max_pairs, CTLFLAG_RDTUN, &vtnet_mq_max_pairs, 0, "Sets the maximum number of Multi Queue pairs"); static int vtnet_rx_process_limit = 512; TUNABLE_INT("hw.vtnet.rx_process_limit", &vtnet_rx_process_limit); SYSCTL_INT(_hw_vtnet, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN, &vtnet_rx_process_limit, 0, "Limits the number RX segments processed in a single pass"); static uma_zone_t vtnet_tx_header_zone; static struct virtio_feature_desc vtnet_feature_desc[] = { { VIRTIO_NET_F_CSUM, "TxChecksum" }, { VIRTIO_NET_F_GUEST_CSUM, "RxChecksum" }, { VIRTIO_NET_F_MAC, "MacAddress" }, { VIRTIO_NET_F_GSO, "TxAllGSO" }, { VIRTIO_NET_F_GUEST_TSO4, "RxTSOv4" }, { VIRTIO_NET_F_GUEST_TSO6, "RxTSOv6" }, { VIRTIO_NET_F_GUEST_ECN, "RxECN" }, { VIRTIO_NET_F_GUEST_UFO, "RxUFO" }, { VIRTIO_NET_F_HOST_TSO4, "TxTSOv4" }, { VIRTIO_NET_F_HOST_TSO6, "TxTSOv6" }, { VIRTIO_NET_F_HOST_ECN, "TxTSOECN" }, { VIRTIO_NET_F_HOST_UFO, "TxUFO" }, { VIRTIO_NET_F_MRG_RXBUF, "MrgRxBuf" }, { VIRTIO_NET_F_STATUS, "Status" }, { VIRTIO_NET_F_CTRL_VQ, "ControlVq" }, { VIRTIO_NET_F_CTRL_RX, "RxMode" }, { VIRTIO_NET_F_CTRL_VLAN, "VLanFilter" }, { VIRTIO_NET_F_CTRL_RX_EXTRA, "RxModeExtra" }, { VIRTIO_NET_F_GUEST_ANNOUNCE, "GuestAnnounce" }, { VIRTIO_NET_F_MQ, "Multiqueue" }, { VIRTIO_NET_F_CTRL_MAC_ADDR, "SetMacAddress" }, { 0, NULL } }; static device_method_t vtnet_methods[] = { /* Device methods. */ DEVMETHOD(device_probe, vtnet_probe), DEVMETHOD(device_attach, vtnet_attach), DEVMETHOD(device_detach, vtnet_detach), DEVMETHOD(device_suspend, vtnet_suspend), DEVMETHOD(device_resume, vtnet_resume), DEVMETHOD(device_shutdown, vtnet_shutdown), /* VirtIO methods. */ DEVMETHOD(virtio_attach_completed, vtnet_attach_completed), DEVMETHOD(virtio_config_change, vtnet_config_change), DEVMETHOD_END }; #ifdef DEV_NETMAP #include #endif /* DEV_NETMAP */ static driver_t vtnet_driver = { "vtnet", vtnet_methods, sizeof(struct vtnet_softc) }; static devclass_t vtnet_devclass; DRIVER_MODULE(vtnet, virtio_mmio, vtnet_driver, vtnet_devclass, vtnet_modevent, 0); DRIVER_MODULE(vtnet, virtio_pci, vtnet_driver, vtnet_devclass, vtnet_modevent, 0); MODULE_VERSION(vtnet, 1); MODULE_DEPEND(vtnet, virtio, 1, 1, 1); #ifdef DEV_NETMAP MODULE_DEPEND(vtnet, netmap, 1, 1, 1); #endif /* DEV_NETMAP */ VIRTIO_SIMPLE_PNPTABLE(vtnet, VIRTIO_ID_NETWORK, "VirtIO Networking Adapter"); VIRTIO_SIMPLE_PNPINFO(virtio_mmio, vtnet); VIRTIO_SIMPLE_PNPINFO(virtio_pci, vtnet); static int vtnet_modevent(module_t mod, int type, void *unused) { int error = 0; static int loaded = 0; switch (type) { case MOD_LOAD: if (loaded++ == 0) { vtnet_tx_header_zone = uma_zcreate("vtnet_tx_hdr", sizeof(struct vtnet_tx_header), NULL, NULL, NULL, NULL, 0, 0); #ifdef DEBUGNET /* * We need to allocate from this zone in the transmit path, so ensure * that we have at least one item per header available. * XXX add a separate zone like we do for mbufs? otherwise we may alloc * buckets */ uma_zone_reserve(vtnet_tx_header_zone, DEBUGNET_MAX_IN_FLIGHT * 2); uma_prealloc(vtnet_tx_header_zone, DEBUGNET_MAX_IN_FLIGHT * 2); #endif } break; case MOD_QUIESCE: if (uma_zone_get_cur(vtnet_tx_header_zone) > 0) error = EBUSY; break; case MOD_UNLOAD: if (--loaded == 0) { uma_zdestroy(vtnet_tx_header_zone); vtnet_tx_header_zone = NULL; } break; case MOD_SHUTDOWN: break; default: error = EOPNOTSUPP; break; } return (error); } static int vtnet_probe(device_t dev) { return (VIRTIO_SIMPLE_PROBE(dev, vtnet)); } static int vtnet_attach(device_t dev) { struct vtnet_softc *sc; int error; sc = device_get_softc(dev); sc->vtnet_dev = dev; /* Register our feature descriptions. */ virtio_set_feature_desc(dev, vtnet_feature_desc); VTNET_CORE_LOCK_INIT(sc); callout_init_mtx(&sc->vtnet_tick_ch, VTNET_CORE_MTX(sc), 0); vtnet_setup_sysctl(sc); vtnet_setup_features(sc); error = vtnet_alloc_rx_filters(sc); if (error) { device_printf(dev, "cannot allocate Rx filters\n"); goto fail; } error = vtnet_alloc_rxtx_queues(sc); if (error) { device_printf(dev, "cannot allocate queues\n"); goto fail; } error = vtnet_alloc_virtqueues(sc); if (error) { device_printf(dev, "cannot allocate virtqueues\n"); goto fail; } error = vtnet_setup_interface(sc); if (error) { device_printf(dev, "cannot setup interface\n"); goto fail; } error = virtio_setup_intr(dev, INTR_TYPE_NET); if (error) { device_printf(dev, "cannot setup virtqueue interrupts\n"); /* BMV: This will crash if during boot! */ ether_ifdetach(sc->vtnet_ifp); goto fail; } #ifdef DEV_NETMAP vtnet_netmap_attach(sc); #endif /* DEV_NETMAP */ vtnet_start_taskqueues(sc); fail: if (error) vtnet_detach(dev); return (error); } static int vtnet_detach(device_t dev) { struct vtnet_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->vtnet_ifp; if (device_is_attached(dev)) { VTNET_CORE_LOCK(sc); vtnet_stop(sc); VTNET_CORE_UNLOCK(sc); callout_drain(&sc->vtnet_tick_ch); vtnet_drain_taskqueues(sc); ether_ifdetach(ifp); } #ifdef DEV_NETMAP netmap_detach(ifp); #endif /* DEV_NETMAP */ vtnet_free_taskqueues(sc); if (sc->vtnet_vlan_attach != NULL) { EVENTHANDLER_DEREGISTER(vlan_config, sc->vtnet_vlan_attach); sc->vtnet_vlan_attach = NULL; } if (sc->vtnet_vlan_detach != NULL) { EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vtnet_vlan_detach); sc->vtnet_vlan_detach = NULL; } ifmedia_removeall(&sc->vtnet_media); if (ifp != NULL) { if_free(ifp); sc->vtnet_ifp = NULL; } vtnet_free_rxtx_queues(sc); vtnet_free_rx_filters(sc); if (sc->vtnet_ctrl_vq != NULL) vtnet_free_ctrl_vq(sc); VTNET_CORE_LOCK_DESTROY(sc); return (0); } static int vtnet_suspend(device_t dev) { struct vtnet_softc *sc; sc = device_get_softc(dev); VTNET_CORE_LOCK(sc); vtnet_stop(sc); sc->vtnet_flags |= VTNET_FLAG_SUSPENDED; VTNET_CORE_UNLOCK(sc); return (0); } static int vtnet_resume(device_t dev) { struct vtnet_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->vtnet_ifp; VTNET_CORE_LOCK(sc); if (ifp->if_flags & IFF_UP) vtnet_init_locked(sc); sc->vtnet_flags &= ~VTNET_FLAG_SUSPENDED; VTNET_CORE_UNLOCK(sc); return (0); } static int vtnet_shutdown(device_t dev) { /* * Suspend already does all of what we need to * do here; we just never expect to be resumed. */ return (vtnet_suspend(dev)); } static int vtnet_attach_completed(device_t dev) { vtnet_attach_disable_promisc(device_get_softc(dev)); return (0); } static int vtnet_config_change(device_t dev) { struct vtnet_softc *sc; sc = device_get_softc(dev); VTNET_CORE_LOCK(sc); vtnet_update_link_status(sc); if (sc->vtnet_link_active != 0) vtnet_tx_start_all(sc); VTNET_CORE_UNLOCK(sc); return (0); } static void vtnet_negotiate_features(struct vtnet_softc *sc) { device_t dev; uint64_t mask, features; dev = sc->vtnet_dev; mask = 0; /* * TSO and LRO are only available when their corresponding checksum * offload feature is also negotiated. */ if (vtnet_tunable_int(sc, "csum_disable", vtnet_csum_disable)) { mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM; mask |= VTNET_TSO_FEATURES | VTNET_LRO_FEATURES; } if (vtnet_tunable_int(sc, "tso_disable", vtnet_tso_disable)) mask |= VTNET_TSO_FEATURES; if (vtnet_tunable_int(sc, "lro_disable", vtnet_lro_disable)) mask |= VTNET_LRO_FEATURES; #ifndef VTNET_LEGACY_TX if (vtnet_tunable_int(sc, "mq_disable", vtnet_mq_disable)) mask |= VIRTIO_NET_F_MQ; #else mask |= VIRTIO_NET_F_MQ; #endif features = VTNET_FEATURES & ~mask; sc->vtnet_features = virtio_negotiate_features(dev, features); if (virtio_with_feature(dev, VTNET_LRO_FEATURES) && virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0) { /* * LRO without mergeable buffers requires special care. This * is not ideal because every receive buffer must be large * enough to hold the maximum TCP packet, the Ethernet header, * and the header. This requires up to 34 descriptors with * MCLBYTES clusters. If we do not have indirect descriptors, * LRO is disabled since the virtqueue will not contain very * many receive buffers. */ if (!virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) { device_printf(dev, "LRO disabled due to both mergeable buffers and " "indirect descriptors not negotiated\n"); features &= ~VTNET_LRO_FEATURES; sc->vtnet_features = virtio_negotiate_features(dev, features); } else sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG; } } static void vtnet_setup_features(struct vtnet_softc *sc) { device_t dev; dev = sc->vtnet_dev; vtnet_negotiate_features(sc); if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) sc->vtnet_flags |= VTNET_FLAG_INDIRECT; if (virtio_with_feature(dev, VIRTIO_RING_F_EVENT_IDX)) sc->vtnet_flags |= VTNET_FLAG_EVENT_IDX; if (virtio_with_feature(dev, VIRTIO_NET_F_MAC)) { /* This feature should always be negotiated. */ sc->vtnet_flags |= VTNET_FLAG_MAC; } if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) { sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS; sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf); } else sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr); if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) sc->vtnet_rx_nsegs = VTNET_MRG_RX_SEGS; else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) sc->vtnet_rx_nsegs = VTNET_MAX_RX_SEGS; else sc->vtnet_rx_nsegs = VTNET_MIN_RX_SEGS; if (virtio_with_feature(dev, VIRTIO_NET_F_GSO) || virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4) || virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6)) sc->vtnet_tx_nsegs = VTNET_MAX_TX_SEGS; else sc->vtnet_tx_nsegs = VTNET_MIN_TX_SEGS; if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) { sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ; if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX)) sc->vtnet_flags |= VTNET_FLAG_CTRL_RX; if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VLAN)) sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER; if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_MAC_ADDR)) sc->vtnet_flags |= VTNET_FLAG_CTRL_MAC; } if (virtio_with_feature(dev, VIRTIO_NET_F_MQ) && sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) { sc->vtnet_max_vq_pairs = virtio_read_dev_config_2(dev, offsetof(struct virtio_net_config, max_virtqueue_pairs)); } else sc->vtnet_max_vq_pairs = 1; if (sc->vtnet_max_vq_pairs > 1) { /* * Limit the maximum number of queue pairs to the lower of * the number of CPUs and the configured maximum. * The actual number of queues that get used may be less. */ int max; max = vtnet_tunable_int(sc, "mq_max_pairs", vtnet_mq_max_pairs); if (max > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN) { if (max > mp_ncpus) max = mp_ncpus; if (max > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX) max = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX; if (max > 1) { sc->vtnet_requested_vq_pairs = max; sc->vtnet_flags |= VTNET_FLAG_MULTIQ; } } } } static int vtnet_init_rxq(struct vtnet_softc *sc, int id) { struct vtnet_rxq *rxq; rxq = &sc->vtnet_rxqs[id]; snprintf(rxq->vtnrx_name, sizeof(rxq->vtnrx_name), "%s-rx%d", device_get_nameunit(sc->vtnet_dev), id); mtx_init(&rxq->vtnrx_mtx, rxq->vtnrx_name, NULL, MTX_DEF); rxq->vtnrx_sc = sc; rxq->vtnrx_id = id; rxq->vtnrx_sg = sglist_alloc(sc->vtnet_rx_nsegs, M_NOWAIT); if (rxq->vtnrx_sg == NULL) return (ENOMEM); - TASK_INIT(&rxq->vtnrx_intrtask, 0, vtnet_rxq_tq_intr, rxq); + NET_TASK_INIT(&rxq->vtnrx_intrtask, 0, vtnet_rxq_tq_intr, rxq); rxq->vtnrx_tq = taskqueue_create(rxq->vtnrx_name, M_NOWAIT, taskqueue_thread_enqueue, &rxq->vtnrx_tq); return (rxq->vtnrx_tq == NULL ? ENOMEM : 0); } static int vtnet_init_txq(struct vtnet_softc *sc, int id) { struct vtnet_txq *txq; txq = &sc->vtnet_txqs[id]; snprintf(txq->vtntx_name, sizeof(txq->vtntx_name), "%s-tx%d", device_get_nameunit(sc->vtnet_dev), id); mtx_init(&txq->vtntx_mtx, txq->vtntx_name, NULL, MTX_DEF); txq->vtntx_sc = sc; txq->vtntx_id = id; txq->vtntx_sg = sglist_alloc(sc->vtnet_tx_nsegs, M_NOWAIT); if (txq->vtntx_sg == NULL) return (ENOMEM); #ifndef VTNET_LEGACY_TX txq->vtntx_br = buf_ring_alloc(VTNET_DEFAULT_BUFRING_SIZE, M_DEVBUF, M_NOWAIT, &txq->vtntx_mtx); if (txq->vtntx_br == NULL) return (ENOMEM); TASK_INIT(&txq->vtntx_defrtask, 0, vtnet_txq_tq_deferred, txq); #endif TASK_INIT(&txq->vtntx_intrtask, 0, vtnet_txq_tq_intr, txq); txq->vtntx_tq = taskqueue_create(txq->vtntx_name, M_NOWAIT, taskqueue_thread_enqueue, &txq->vtntx_tq); if (txq->vtntx_tq == NULL) return (ENOMEM); return (0); } static int vtnet_alloc_rxtx_queues(struct vtnet_softc *sc) { int i, npairs, error; npairs = sc->vtnet_max_vq_pairs; sc->vtnet_rxqs = malloc(sizeof(struct vtnet_rxq) * npairs, M_DEVBUF, M_NOWAIT | M_ZERO); sc->vtnet_txqs = malloc(sizeof(struct vtnet_txq) * npairs, M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->vtnet_rxqs == NULL || sc->vtnet_txqs == NULL) return (ENOMEM); for (i = 0; i < npairs; i++) { error = vtnet_init_rxq(sc, i); if (error) return (error); error = vtnet_init_txq(sc, i); if (error) return (error); } vtnet_setup_queue_sysctl(sc); return (0); } static void vtnet_destroy_rxq(struct vtnet_rxq *rxq) { rxq->vtnrx_sc = NULL; rxq->vtnrx_id = -1; if (rxq->vtnrx_sg != NULL) { sglist_free(rxq->vtnrx_sg); rxq->vtnrx_sg = NULL; } if (mtx_initialized(&rxq->vtnrx_mtx) != 0) mtx_destroy(&rxq->vtnrx_mtx); } static void vtnet_destroy_txq(struct vtnet_txq *txq) { txq->vtntx_sc = NULL; txq->vtntx_id = -1; if (txq->vtntx_sg != NULL) { sglist_free(txq->vtntx_sg); txq->vtntx_sg = NULL; } #ifndef VTNET_LEGACY_TX if (txq->vtntx_br != NULL) { buf_ring_free(txq->vtntx_br, M_DEVBUF); txq->vtntx_br = NULL; } #endif if (mtx_initialized(&txq->vtntx_mtx) != 0) mtx_destroy(&txq->vtntx_mtx); } static void vtnet_free_rxtx_queues(struct vtnet_softc *sc) { int i; if (sc->vtnet_rxqs != NULL) { for (i = 0; i < sc->vtnet_max_vq_pairs; i++) vtnet_destroy_rxq(&sc->vtnet_rxqs[i]); free(sc->vtnet_rxqs, M_DEVBUF); sc->vtnet_rxqs = NULL; } if (sc->vtnet_txqs != NULL) { for (i = 0; i < sc->vtnet_max_vq_pairs; i++) vtnet_destroy_txq(&sc->vtnet_txqs[i]); free(sc->vtnet_txqs, M_DEVBUF); sc->vtnet_txqs = NULL; } } static int vtnet_alloc_rx_filters(struct vtnet_softc *sc) { if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) { sc->vtnet_mac_filter = malloc(sizeof(struct vtnet_mac_filter), M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->vtnet_mac_filter == NULL) return (ENOMEM); } if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) { sc->vtnet_vlan_filter = malloc(sizeof(uint32_t) * VTNET_VLAN_FILTER_NWORDS, M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->vtnet_vlan_filter == NULL) return (ENOMEM); } return (0); } static void vtnet_free_rx_filters(struct vtnet_softc *sc) { if (sc->vtnet_mac_filter != NULL) { free(sc->vtnet_mac_filter, M_DEVBUF); sc->vtnet_mac_filter = NULL; } if (sc->vtnet_vlan_filter != NULL) { free(sc->vtnet_vlan_filter, M_DEVBUF); sc->vtnet_vlan_filter = NULL; } } static int vtnet_alloc_virtqueues(struct vtnet_softc *sc) { device_t dev; struct vq_alloc_info *info; struct vtnet_rxq *rxq; struct vtnet_txq *txq; int i, idx, flags, nvqs, error; dev = sc->vtnet_dev; flags = 0; nvqs = sc->vtnet_max_vq_pairs * 2; if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) nvqs++; info = malloc(sizeof(struct vq_alloc_info) * nvqs, M_TEMP, M_NOWAIT); if (info == NULL) return (ENOMEM); for (i = 0, idx = 0; i < sc->vtnet_max_vq_pairs; i++, idx+=2) { rxq = &sc->vtnet_rxqs[i]; VQ_ALLOC_INFO_INIT(&info[idx], sc->vtnet_rx_nsegs, vtnet_rx_vq_intr, rxq, &rxq->vtnrx_vq, "%s-%d rx", device_get_nameunit(dev), rxq->vtnrx_id); txq = &sc->vtnet_txqs[i]; VQ_ALLOC_INFO_INIT(&info[idx+1], sc->vtnet_tx_nsegs, vtnet_tx_vq_intr, txq, &txq->vtntx_vq, "%s-%d tx", device_get_nameunit(dev), txq->vtntx_id); } if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) { VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, NULL, &sc->vtnet_ctrl_vq, "%s ctrl", device_get_nameunit(dev)); } /* * Enable interrupt binding if this is multiqueue. This only matters * when per-vq MSIX is available. */ if (sc->vtnet_flags & VTNET_FLAG_MULTIQ) flags |= 0; error = virtio_alloc_virtqueues(dev, flags, nvqs, info); free(info, M_TEMP); return (error); } static int vtnet_setup_interface(struct vtnet_softc *sc) { device_t dev; struct pfil_head_args pa; struct ifnet *ifp; dev = sc->vtnet_dev; ifp = sc->vtnet_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "cannot allocate ifnet structure\n"); return (ENOSPC); } if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_baudrate = IF_Gbps(10); /* Approx. */ ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_init = vtnet_init; ifp->if_ioctl = vtnet_ioctl; ifp->if_get_counter = vtnet_get_counter; #ifndef VTNET_LEGACY_TX ifp->if_transmit = vtnet_txq_mq_start; ifp->if_qflush = vtnet_qflush; #else struct virtqueue *vq = sc->vtnet_txqs[0].vtntx_vq; ifp->if_start = vtnet_start; IFQ_SET_MAXLEN(&ifp->if_snd, virtqueue_size(vq) - 1); ifp->if_snd.ifq_drv_maxlen = virtqueue_size(vq) - 1; IFQ_SET_READY(&ifp->if_snd); #endif ifmedia_init(&sc->vtnet_media, IFM_IMASK, vtnet_ifmedia_upd, vtnet_ifmedia_sts); ifmedia_add(&sc->vtnet_media, VTNET_MEDIATYPE, 0, NULL); ifmedia_set(&sc->vtnet_media, VTNET_MEDIATYPE); /* Read (or generate) the MAC address for the adapter. */ vtnet_get_hwaddr(sc); ether_ifattach(ifp, sc->vtnet_hwaddr); if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS)) ifp->if_capabilities |= IFCAP_LINKSTATE; /* Tell the upper layer(s) we support long frames. */ ifp->if_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU; if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) { ifp->if_capabilities |= IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6; if (virtio_with_feature(dev, VIRTIO_NET_F_GSO)) { ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6; sc->vtnet_flags |= VTNET_FLAG_TSO_ECN; } else { if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4)) ifp->if_capabilities |= IFCAP_TSO4; if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6)) ifp->if_capabilities |= IFCAP_TSO6; if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN)) sc->vtnet_flags |= VTNET_FLAG_TSO_ECN; } if (ifp->if_capabilities & IFCAP_TSO) ifp->if_capabilities |= IFCAP_VLAN_HWTSO; } if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) { ifp->if_capabilities |= IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6; if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) || virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6)) ifp->if_capabilities |= IFCAP_LRO; } if (ifp->if_capabilities & IFCAP_HWCSUM) { /* * VirtIO does not support VLAN tagging, but we can fake * it by inserting and removing the 802.1Q header during * transmit and receive. We are then able to do checksum * offloading of VLAN frames. */ ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM; } ifp->if_capenable = ifp->if_capabilities; /* * Capabilities after here are not enabled by default. */ if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) { ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; sc->vtnet_vlan_attach = EVENTHANDLER_REGISTER(vlan_config, vtnet_register_vlan, sc, EVENTHANDLER_PRI_FIRST); sc->vtnet_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST); } vtnet_set_rx_process_limit(sc); vtnet_set_tx_intr_threshold(sc); DEBUGNET_SET(ifp, vtnet); pa.pa_version = PFIL_VERSION; pa.pa_flags = PFIL_IN; pa.pa_type = PFIL_TYPE_ETHERNET; pa.pa_headname = ifp->if_xname; sc->vtnet_pfil = pfil_head_register(&pa); return (0); } static int vtnet_change_mtu(struct vtnet_softc *sc, int new_mtu) { struct ifnet *ifp; int frame_size, clsize; ifp = sc->vtnet_ifp; if (new_mtu < ETHERMIN || new_mtu > VTNET_MAX_MTU) return (EINVAL); frame_size = sc->vtnet_hdr_size + sizeof(struct ether_vlan_header) + new_mtu; /* * Based on the new MTU (and hence frame size) determine which * cluster size is most appropriate for the receive queues. */ if (frame_size <= MCLBYTES) { clsize = MCLBYTES; } else if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { /* Avoid going past 9K jumbos. */ if (frame_size > MJUM9BYTES) return (EINVAL); clsize = MJUM9BYTES; } else clsize = MJUMPAGESIZE; ifp->if_mtu = new_mtu; sc->vtnet_rx_new_clsize = clsize; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; vtnet_init_locked(sc); } return (0); } static int vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct vtnet_softc *sc; struct ifreq *ifr; int reinit, mask, error; sc = ifp->if_softc; ifr = (struct ifreq *) data; error = 0; switch (cmd) { case SIOCSIFMTU: if (ifp->if_mtu != ifr->ifr_mtu) { VTNET_CORE_LOCK(sc); error = vtnet_change_mtu(sc, ifr->ifr_mtu); VTNET_CORE_UNLOCK(sc); } break; case SIOCSIFFLAGS: VTNET_CORE_LOCK(sc); if ((ifp->if_flags & IFF_UP) == 0) { if (ifp->if_drv_flags & IFF_DRV_RUNNING) vtnet_stop(sc); } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { if ((ifp->if_flags ^ sc->vtnet_if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) { if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) vtnet_rx_filter(sc); else { ifp->if_flags |= IFF_PROMISC; if ((ifp->if_flags ^ sc->vtnet_if_flags) & IFF_ALLMULTI) error = ENOTSUP; } } } else vtnet_init_locked(sc); if (error == 0) sc->vtnet_if_flags = ifp->if_flags; VTNET_CORE_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0) break; VTNET_CORE_LOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) vtnet_rx_filter_mac(sc); VTNET_CORE_UNLOCK(sc); break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->vtnet_media, cmd); break; case SIOCSIFCAP: VTNET_CORE_LOCK(sc); mask = ifr->ifr_reqcap ^ ifp->if_capenable; if (mask & IFCAP_TXCSUM) ifp->if_capenable ^= IFCAP_TXCSUM; if (mask & IFCAP_TXCSUM_IPV6) ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; if (mask & IFCAP_TSO4) ifp->if_capenable ^= IFCAP_TSO4; if (mask & IFCAP_TSO6) ifp->if_capenable ^= IFCAP_TSO6; if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO | IFCAP_VLAN_HWFILTER)) { /* These Rx features require us to renegotiate. */ reinit = 1; if (mask & IFCAP_RXCSUM) ifp->if_capenable ^= IFCAP_RXCSUM; if (mask & IFCAP_RXCSUM_IPV6) ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; if (mask & IFCAP_LRO) ifp->if_capenable ^= IFCAP_LRO; if (mask & IFCAP_VLAN_HWFILTER) ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; } else reinit = 0; if (mask & IFCAP_VLAN_HWTSO) ifp->if_capenable ^= IFCAP_VLAN_HWTSO; if (mask & IFCAP_VLAN_HWTAGGING) ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; vtnet_init_locked(sc); } VTNET_CORE_UNLOCK(sc); VLAN_CAPABILITIES(ifp); break; default: error = ether_ioctl(ifp, cmd, data); break; } VTNET_CORE_LOCK_ASSERT_NOTOWNED(sc); return (error); } static int vtnet_rxq_populate(struct vtnet_rxq *rxq) { struct virtqueue *vq; int nbufs, error; #ifdef DEV_NETMAP error = vtnet_netmap_rxq_populate(rxq); if (error >= 0) return (error); #endif /* DEV_NETMAP */ vq = rxq->vtnrx_vq; error = ENOSPC; for (nbufs = 0; !virtqueue_full(vq); nbufs++) { error = vtnet_rxq_new_buf(rxq); if (error) break; } if (nbufs > 0) { virtqueue_notify(vq); /* * EMSGSIZE signifies the virtqueue did not have enough * entries available to hold the last mbuf. This is not * an error. */ if (error == EMSGSIZE) error = 0; } return (error); } static void vtnet_rxq_free_mbufs(struct vtnet_rxq *rxq) { struct virtqueue *vq; struct mbuf *m; int last; #ifdef DEV_NETMAP int netmap_bufs = vtnet_netmap_queue_on(rxq->vtnrx_sc, NR_RX, rxq->vtnrx_id); #else /* !DEV_NETMAP */ int netmap_bufs = 0; #endif /* !DEV_NETMAP */ vq = rxq->vtnrx_vq; last = 0; while ((m = virtqueue_drain(vq, &last)) != NULL) { if (!netmap_bufs) m_freem(m); } KASSERT(virtqueue_empty(vq), ("%s: mbufs remaining in rx queue %p", __func__, rxq)); } static struct mbuf * vtnet_rx_alloc_buf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp) { struct mbuf *m_head, *m_tail, *m; int i, clsize; clsize = sc->vtnet_rx_clsize; KASSERT(nbufs == 1 || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG, ("%s: chained mbuf %d request without LRO_NOMRG", __func__, nbufs)); m_head = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, clsize); if (m_head == NULL) goto fail; m_head->m_len = clsize; m_tail = m_head; /* Allocate the rest of the chain. */ for (i = 1; i < nbufs; i++) { m = m_getjcl(M_NOWAIT, MT_DATA, 0, clsize); if (m == NULL) goto fail; m->m_len = clsize; m_tail->m_next = m; m_tail = m; } if (m_tailp != NULL) *m_tailp = m_tail; return (m_head); fail: sc->vtnet_stats.mbuf_alloc_failed++; m_freem(m_head); return (NULL); } /* * Slow path for when LRO without mergeable buffers is negotiated. */ static int vtnet_rxq_replace_lro_nomgr_buf(struct vtnet_rxq *rxq, struct mbuf *m0, int len0) { struct vtnet_softc *sc; struct mbuf *m, *m_prev; struct mbuf *m_new, *m_tail; int len, clsize, nreplace, error; sc = rxq->vtnrx_sc; clsize = sc->vtnet_rx_clsize; m_prev = NULL; m_tail = NULL; nreplace = 0; m = m0; len = len0; /* * Since these mbuf chains are so large, we avoid allocating an * entire replacement chain if possible. When the received frame * did not consume the entire chain, the unused mbufs are moved * to the replacement chain. */ while (len > 0) { /* * Something is seriously wrong if we received a frame * larger than the chain. Drop it. */ if (m == NULL) { sc->vtnet_stats.rx_frame_too_large++; return (EMSGSIZE); } /* We always allocate the same cluster size. */ KASSERT(m->m_len == clsize, ("%s: mbuf size %d is not the cluster size %d", __func__, m->m_len, clsize)); m->m_len = MIN(m->m_len, len); len -= m->m_len; m_prev = m; m = m->m_next; nreplace++; } KASSERT(nreplace <= sc->vtnet_rx_nmbufs, ("%s: too many replacement mbufs %d max %d", __func__, nreplace, sc->vtnet_rx_nmbufs)); m_new = vtnet_rx_alloc_buf(sc, nreplace, &m_tail); if (m_new == NULL) { m_prev->m_len = clsize; return (ENOBUFS); } /* * Move any unused mbufs from the received chain onto the end * of the new chain. */ if (m_prev->m_next != NULL) { m_tail->m_next = m_prev->m_next; m_prev->m_next = NULL; } error = vtnet_rxq_enqueue_buf(rxq, m_new); if (error) { /* * BAD! We could not enqueue the replacement mbuf chain. We * must restore the m0 chain to the original state if it was * modified so we can subsequently discard it. * * NOTE: The replacement is suppose to be an identical copy * to the one just dequeued so this is an unexpected error. */ sc->vtnet_stats.rx_enq_replacement_failed++; if (m_tail->m_next != NULL) { m_prev->m_next = m_tail->m_next; m_tail->m_next = NULL; } m_prev->m_len = clsize; m_freem(m_new); } return (error); } static int vtnet_rxq_replace_buf(struct vtnet_rxq *rxq, struct mbuf *m, int len) { struct vtnet_softc *sc; struct mbuf *m_new; int error; sc = rxq->vtnrx_sc; KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG || m->m_next == NULL, ("%s: chained mbuf without LRO_NOMRG", __func__)); if (m->m_next == NULL) { /* Fast-path for the common case of just one mbuf. */ if (m->m_len < len) return (EINVAL); m_new = vtnet_rx_alloc_buf(sc, 1, NULL); if (m_new == NULL) return (ENOBUFS); error = vtnet_rxq_enqueue_buf(rxq, m_new); if (error) { /* * The new mbuf is suppose to be an identical * copy of the one just dequeued so this is an * unexpected error. */ m_freem(m_new); sc->vtnet_stats.rx_enq_replacement_failed++; } else m->m_len = len; } else error = vtnet_rxq_replace_lro_nomgr_buf(rxq, m, len); return (error); } static int vtnet_rxq_enqueue_buf(struct vtnet_rxq *rxq, struct mbuf *m) { struct vtnet_softc *sc; struct sglist *sg; struct vtnet_rx_header *rxhdr; uint8_t *mdata; int offset, error; sc = rxq->vtnrx_sc; sg = rxq->vtnrx_sg; mdata = mtod(m, uint8_t *); VTNET_RXQ_LOCK_ASSERT(rxq); KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG || m->m_next == NULL, ("%s: chained mbuf without LRO_NOMRG", __func__)); KASSERT(m->m_len == sc->vtnet_rx_clsize, ("%s: unexpected cluster size %d/%d", __func__, m->m_len, sc->vtnet_rx_clsize)); sglist_reset(sg); if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr)); rxhdr = (struct vtnet_rx_header *) mdata; sglist_append(sg, &rxhdr->vrh_hdr, sc->vtnet_hdr_size); offset = sizeof(struct vtnet_rx_header); } else offset = 0; sglist_append(sg, mdata + offset, m->m_len - offset); if (m->m_next != NULL) { error = sglist_append_mbuf(sg, m->m_next); MPASS(error == 0); } error = virtqueue_enqueue(rxq->vtnrx_vq, m, sg, 0, sg->sg_nseg); return (error); } static int vtnet_rxq_new_buf(struct vtnet_rxq *rxq) { struct vtnet_softc *sc; struct mbuf *m; int error; sc = rxq->vtnrx_sc; m = vtnet_rx_alloc_buf(sc, sc->vtnet_rx_nmbufs, NULL); if (m == NULL) return (ENOBUFS); error = vtnet_rxq_enqueue_buf(rxq, m); if (error) m_freem(m); return (error); } /* * Use the checksum offset in the VirtIO header to set the * correct CSUM_* flags. */ static int vtnet_rxq_csum_by_offset(struct vtnet_rxq *rxq, struct mbuf *m, uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr) { struct vtnet_softc *sc; #if defined(INET) || defined(INET6) int offset = hdr->csum_start + hdr->csum_offset; #endif sc = rxq->vtnrx_sc; /* Only do a basic sanity check on the offset. */ switch (eth_type) { #if defined(INET) case ETHERTYPE_IP: if (__predict_false(offset < ip_start + sizeof(struct ip))) return (1); break; #endif #if defined(INET6) case ETHERTYPE_IPV6: if (__predict_false(offset < ip_start + sizeof(struct ip6_hdr))) return (1); break; #endif default: sc->vtnet_stats.rx_csum_bad_ethtype++; return (1); } /* * Use the offset to determine the appropriate CSUM_* flags. This is * a bit dirty, but we can get by with it since the checksum offsets * happen to be different. We assume the host host does not do IPv4 * header checksum offloading. */ switch (hdr->csum_offset) { case offsetof(struct udphdr, uh_sum): case offsetof(struct tcphdr, th_sum): m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xFFFF; break; default: sc->vtnet_stats.rx_csum_bad_offset++; return (1); } return (0); } static int vtnet_rxq_csum_by_parse(struct vtnet_rxq *rxq, struct mbuf *m, uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr) { struct vtnet_softc *sc; int offset, proto; sc = rxq->vtnrx_sc; switch (eth_type) { #if defined(INET) case ETHERTYPE_IP: { struct ip *ip; if (__predict_false(m->m_len < ip_start + sizeof(struct ip))) return (1); ip = (struct ip *)(m->m_data + ip_start); proto = ip->ip_p; offset = ip_start + (ip->ip_hl << 2); break; } #endif #if defined(INET6) case ETHERTYPE_IPV6: if (__predict_false(m->m_len < ip_start + sizeof(struct ip6_hdr))) return (1); offset = ip6_lasthdr(m, ip_start, IPPROTO_IPV6, &proto); if (__predict_false(offset < 0)) return (1); break; #endif default: sc->vtnet_stats.rx_csum_bad_ethtype++; return (1); } switch (proto) { case IPPROTO_TCP: if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) return (1); m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xFFFF; break; case IPPROTO_UDP: if (__predict_false(m->m_len < offset + sizeof(struct udphdr))) return (1); m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xFFFF; break; default: /* * For the remaining protocols, FreeBSD does not support * checksum offloading, so the checksum will be recomputed. */ #if 0 if_printf(sc->vtnet_ifp, "cksum offload of unsupported " "protocol eth_type=%#x proto=%d csum_start=%d " "csum_offset=%d\n", __func__, eth_type, proto, hdr->csum_start, hdr->csum_offset); #endif break; } return (0); } /* * Set the appropriate CSUM_* flags. Unfortunately, the information * provided is not directly useful to us. The VirtIO header gives the * offset of the checksum, which is all Linux needs, but this is not * how FreeBSD does things. We are forced to peek inside the packet * a bit. * * It would be nice if VirtIO gave us the L4 protocol or if FreeBSD * could accept the offsets and let the stack figure it out. */ static int vtnet_rxq_csum(struct vtnet_rxq *rxq, struct mbuf *m, struct virtio_net_hdr *hdr) { struct ether_header *eh; struct ether_vlan_header *evh; uint16_t eth_type; int offset, error; eh = mtod(m, struct ether_header *); eth_type = ntohs(eh->ether_type); if (eth_type == ETHERTYPE_VLAN) { /* BMV: We should handle nested VLAN tags too. */ evh = mtod(m, struct ether_vlan_header *); eth_type = ntohs(evh->evl_proto); offset = sizeof(struct ether_vlan_header); } else offset = sizeof(struct ether_header); if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) error = vtnet_rxq_csum_by_offset(rxq, m, eth_type, offset, hdr); else error = vtnet_rxq_csum_by_parse(rxq, m, eth_type, offset, hdr); return (error); } static void vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *rxq, int nbufs) { struct mbuf *m; while (--nbufs > 0) { m = virtqueue_dequeue(rxq->vtnrx_vq, NULL); if (m == NULL) break; vtnet_rxq_discard_buf(rxq, m); } } static void vtnet_rxq_discard_buf(struct vtnet_rxq *rxq, struct mbuf *m) { int error; /* * Requeue the discarded mbuf. This should always be successful * since it was just dequeued. */ error = vtnet_rxq_enqueue_buf(rxq, m); KASSERT(error == 0, ("%s: cannot requeue discarded mbuf %d", __func__, error)); } static int vtnet_rxq_merged_eof(struct vtnet_rxq *rxq, struct mbuf *m_head, int nbufs) { struct vtnet_softc *sc; struct virtqueue *vq; struct mbuf *m, *m_tail; int len; sc = rxq->vtnrx_sc; vq = rxq->vtnrx_vq; m_tail = m_head; while (--nbufs > 0) { m = virtqueue_dequeue(vq, &len); if (m == NULL) { rxq->vtnrx_stats.vrxs_ierrors++; goto fail; } if (vtnet_rxq_new_buf(rxq) != 0) { rxq->vtnrx_stats.vrxs_iqdrops++; vtnet_rxq_discard_buf(rxq, m); if (nbufs > 1) vtnet_rxq_discard_merged_bufs(rxq, nbufs); goto fail; } if (m->m_len < len) len = m->m_len; m->m_len = len; m->m_flags &= ~M_PKTHDR; m_head->m_pkthdr.len += len; m_tail->m_next = m; m_tail = m; } return (0); fail: sc->vtnet_stats.rx_mergeable_failed++; m_freem(m_head); return (1); } static void vtnet_rxq_input(struct vtnet_rxq *rxq, struct mbuf *m, struct virtio_net_hdr *hdr) { struct vtnet_softc *sc; struct ifnet *ifp; struct ether_header *eh; sc = rxq->vtnrx_sc; ifp = sc->vtnet_ifp; if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { eh = mtod(m, struct ether_header *); if (eh->ether_type == htons(ETHERTYPE_VLAN)) { vtnet_vlan_tag_remove(m); /* * With the 802.1Q header removed, update the * checksum starting location accordingly. */ if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) hdr->csum_start -= ETHER_VLAN_ENCAP_LEN; } } m->m_pkthdr.flowid = rxq->vtnrx_id; M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); /* * BMV: FreeBSD does not have the UNNECESSARY and PARTIAL checksum * distinction that Linux does. Need to reevaluate if performing * offloading for the NEEDS_CSUM case is really appropriate. */ if (hdr->flags & (VIRTIO_NET_HDR_F_NEEDS_CSUM | VIRTIO_NET_HDR_F_DATA_VALID)) { if (vtnet_rxq_csum(rxq, m, hdr) == 0) rxq->vtnrx_stats.vrxs_csum++; else rxq->vtnrx_stats.vrxs_csum_failed++; } rxq->vtnrx_stats.vrxs_ipackets++; rxq->vtnrx_stats.vrxs_ibytes += m->m_pkthdr.len; VTNET_RXQ_UNLOCK(rxq); (*ifp->if_input)(ifp, m); VTNET_RXQ_LOCK(rxq); } static int vtnet_rxq_eof(struct vtnet_rxq *rxq) { struct virtio_net_hdr lhdr, *hdr; struct vtnet_softc *sc; struct ifnet *ifp; struct virtqueue *vq; struct mbuf *m, *mr; struct virtio_net_hdr_mrg_rxbuf *mhdr; int len, deq, nbufs, adjsz, count; pfil_return_t pfil; bool pfil_done; sc = rxq->vtnrx_sc; vq = rxq->vtnrx_vq; ifp = sc->vtnet_ifp; hdr = &lhdr; deq = 0; count = sc->vtnet_rx_process_limit; VTNET_RXQ_LOCK_ASSERT(rxq); while (count-- > 0) { m = virtqueue_dequeue(vq, &len); if (m == NULL) break; deq++; if (len < sc->vtnet_hdr_size + ETHER_HDR_LEN) { rxq->vtnrx_stats.vrxs_ierrors++; vtnet_rxq_discard_buf(rxq, m); continue; } if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { nbufs = 1; adjsz = sizeof(struct vtnet_rx_header); /* * Account for our pad inserted between the header * and the actual start of the frame. */ len += VTNET_RX_HEADER_PAD; } else { mhdr = mtod(m, struct virtio_net_hdr_mrg_rxbuf *); nbufs = mhdr->num_buffers; adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf); } /* * If we have enough data in first mbuf, run it through * pfil as a memory buffer before dequeueing the rest. */ if (PFIL_HOOKED_IN(sc->vtnet_pfil) && len - adjsz >= ETHER_HDR_LEN + max_protohdr) { pfil = pfil_run_hooks(sc->vtnet_pfil, m->m_data + adjsz, ifp, (len - adjsz) | PFIL_MEMPTR | PFIL_IN, NULL); switch (pfil) { case PFIL_REALLOCED: mr = pfil_mem2mbuf(m->m_data + adjsz); vtnet_rxq_input(rxq, mr, hdr); /* FALLTHROUGH */ case PFIL_DROPPED: case PFIL_CONSUMED: vtnet_rxq_discard_buf(rxq, m); if (nbufs > 1) vtnet_rxq_discard_merged_bufs(rxq, nbufs); continue; default: KASSERT(pfil == PFIL_PASS, ("Filter returned %d!\n", pfil)); }; pfil_done = true; } else pfil_done = false; if (vtnet_rxq_replace_buf(rxq, m, len) != 0) { rxq->vtnrx_stats.vrxs_iqdrops++; vtnet_rxq_discard_buf(rxq, m); if (nbufs > 1) vtnet_rxq_discard_merged_bufs(rxq, nbufs); continue; } m->m_pkthdr.len = len; m->m_pkthdr.rcvif = ifp; m->m_pkthdr.csum_flags = 0; if (nbufs > 1) { /* Dequeue the rest of chain. */ if (vtnet_rxq_merged_eof(rxq, m, nbufs) != 0) continue; } /* * Save copy of header before we strip it. For both mergeable * and non-mergeable, the header is at the beginning of the * mbuf data. We no longer need num_buffers, so always use a * regular header. * * BMV: Is this memcpy() expensive? We know the mbuf data is * still valid even after the m_adj(). */ memcpy(hdr, mtod(m, void *), sizeof(struct virtio_net_hdr)); m_adj(m, adjsz); if (PFIL_HOOKED_IN(sc->vtnet_pfil) && pfil_done == false) { pfil = pfil_run_hooks(sc->vtnet_pfil, &m, ifp, PFIL_IN, NULL); switch (pfil) { case PFIL_DROPPED: case PFIL_CONSUMED: continue; default: KASSERT(pfil == PFIL_PASS, ("Filter returned %d!\n", pfil)); } } vtnet_rxq_input(rxq, m, hdr); /* Must recheck after dropping the Rx lock. */ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) break; } if (deq > 0) virtqueue_notify(vq); return (count > 0 ? 0 : EAGAIN); } static void vtnet_rx_vq_intr(void *xrxq) { struct vtnet_softc *sc; struct vtnet_rxq *rxq; struct ifnet *ifp; int tries, more; rxq = xrxq; sc = rxq->vtnrx_sc; ifp = sc->vtnet_ifp; tries = 0; if (__predict_false(rxq->vtnrx_id >= sc->vtnet_act_vq_pairs)) { /* * Ignore this interrupt. Either this is a spurious interrupt * or multiqueue without per-VQ MSIX so every queue needs to * be polled (a brain dead configuration we could try harder * to avoid). */ vtnet_rxq_disable_intr(rxq); return; } #ifdef DEV_NETMAP if (netmap_rx_irq(ifp, rxq->vtnrx_id, &more) != NM_IRQ_PASS) return; #endif /* DEV_NETMAP */ VTNET_RXQ_LOCK(rxq); again: if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { VTNET_RXQ_UNLOCK(rxq); return; } more = vtnet_rxq_eof(rxq); if (more || vtnet_rxq_enable_intr(rxq) != 0) { if (!more) vtnet_rxq_disable_intr(rxq); /* * This is an occasional condition or race (when !more), * so retry a few times before scheduling the taskqueue. */ if (tries++ < VTNET_INTR_DISABLE_RETRIES) goto again; VTNET_RXQ_UNLOCK(rxq); rxq->vtnrx_stats.vrxs_rescheduled++; taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask); } else VTNET_RXQ_UNLOCK(rxq); } static void vtnet_rxq_tq_intr(void *xrxq, int pending) { struct vtnet_softc *sc; struct vtnet_rxq *rxq; struct ifnet *ifp; int more; rxq = xrxq; sc = rxq->vtnrx_sc; ifp = sc->vtnet_ifp; VTNET_RXQ_LOCK(rxq); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { VTNET_RXQ_UNLOCK(rxq); return; } more = vtnet_rxq_eof(rxq); if (more || vtnet_rxq_enable_intr(rxq) != 0) { if (!more) vtnet_rxq_disable_intr(rxq); rxq->vtnrx_stats.vrxs_rescheduled++; taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask); } VTNET_RXQ_UNLOCK(rxq); } static int vtnet_txq_below_threshold(struct vtnet_txq *txq) { struct vtnet_softc *sc; struct virtqueue *vq; sc = txq->vtntx_sc; vq = txq->vtntx_vq; return (virtqueue_nfree(vq) <= sc->vtnet_tx_intr_thresh); } static int vtnet_txq_notify(struct vtnet_txq *txq) { struct virtqueue *vq; vq = txq->vtntx_vq; txq->vtntx_watchdog = VTNET_TX_TIMEOUT; virtqueue_notify(vq); if (vtnet_txq_enable_intr(txq) == 0) return (0); /* * Drain frames that were completed since last checked. If this * causes the queue to go above the threshold, the caller should * continue transmitting. */ if (vtnet_txq_eof(txq) != 0 && vtnet_txq_below_threshold(txq) == 0) { virtqueue_disable_intr(vq); return (1); } return (0); } static void vtnet_txq_free_mbufs(struct vtnet_txq *txq) { struct virtqueue *vq; struct vtnet_tx_header *txhdr; int last; #ifdef DEV_NETMAP int netmap_bufs = vtnet_netmap_queue_on(txq->vtntx_sc, NR_TX, txq->vtntx_id); #else /* !DEV_NETMAP */ int netmap_bufs = 0; #endif /* !DEV_NETMAP */ vq = txq->vtntx_vq; last = 0; while ((txhdr = virtqueue_drain(vq, &last)) != NULL) { if (!netmap_bufs) { m_freem(txhdr->vth_mbuf); uma_zfree(vtnet_tx_header_zone, txhdr); } } KASSERT(virtqueue_empty(vq), ("%s: mbufs remaining in tx queue %p", __func__, txq)); } /* * BMV: Much of this can go away once we finally have offsets in * the mbuf packet header. Bug andre@. */ static int vtnet_txq_offload_ctx(struct vtnet_txq *txq, struct mbuf *m, int *etype, int *proto, int *start) { struct vtnet_softc *sc; struct ether_vlan_header *evh; int offset; sc = txq->vtntx_sc; evh = mtod(m, struct ether_vlan_header *); if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { /* BMV: We should handle nested VLAN tags too. */ *etype = ntohs(evh->evl_proto); offset = sizeof(struct ether_vlan_header); } else { *etype = ntohs(evh->evl_encap_proto); offset = sizeof(struct ether_header); } switch (*etype) { #if defined(INET) case ETHERTYPE_IP: { struct ip *ip, iphdr; if (__predict_false(m->m_len < offset + sizeof(struct ip))) { m_copydata(m, offset, sizeof(struct ip), (caddr_t) &iphdr); ip = &iphdr; } else ip = (struct ip *)(m->m_data + offset); *proto = ip->ip_p; *start = offset + (ip->ip_hl << 2); break; } #endif #if defined(INET6) case ETHERTYPE_IPV6: *proto = -1; *start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto); /* Assert the network stack sent us a valid packet. */ KASSERT(*start > offset, ("%s: mbuf %p start %d offset %d proto %d", __func__, m, *start, offset, *proto)); break; #endif default: sc->vtnet_stats.tx_csum_bad_ethtype++; return (EINVAL); } return (0); } static int vtnet_txq_offload_tso(struct vtnet_txq *txq, struct mbuf *m, int eth_type, int offset, struct virtio_net_hdr *hdr) { static struct timeval lastecn; static int curecn; struct vtnet_softc *sc; struct tcphdr *tcp, tcphdr; sc = txq->vtntx_sc; if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) { m_copydata(m, offset, sizeof(struct tcphdr), (caddr_t) &tcphdr); tcp = &tcphdr; } else tcp = (struct tcphdr *)(m->m_data + offset); hdr->hdr_len = offset + (tcp->th_off << 2); hdr->gso_size = m->m_pkthdr.tso_segsz; hdr->gso_type = eth_type == ETHERTYPE_IP ? VIRTIO_NET_HDR_GSO_TCPV4 : VIRTIO_NET_HDR_GSO_TCPV6; if (tcp->th_flags & TH_CWR) { /* * Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In FreeBSD, * ECN support is not on a per-interface basis, but globally via * the net.inet.tcp.ecn.enable sysctl knob. The default is off. */ if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) { if (ppsratecheck(&lastecn, &curecn, 1)) if_printf(sc->vtnet_ifp, "TSO with ECN not negotiated with host\n"); return (ENOTSUP); } hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; } txq->vtntx_stats.vtxs_tso++; return (0); } static struct mbuf * vtnet_txq_offload(struct vtnet_txq *txq, struct mbuf *m, struct virtio_net_hdr *hdr) { struct vtnet_softc *sc; int flags, etype, csum_start, proto, error; sc = txq->vtntx_sc; flags = m->m_pkthdr.csum_flags; error = vtnet_txq_offload_ctx(txq, m, &etype, &proto, &csum_start); if (error) goto drop; if ((etype == ETHERTYPE_IP && flags & VTNET_CSUM_OFFLOAD) || (etype == ETHERTYPE_IPV6 && flags & VTNET_CSUM_OFFLOAD_IPV6)) { /* * We could compare the IP protocol vs the CSUM_ flag too, * but that really should not be necessary. */ hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM; hdr->csum_start = csum_start; hdr->csum_offset = m->m_pkthdr.csum_data; txq->vtntx_stats.vtxs_csum++; } if (flags & CSUM_TSO) { if (__predict_false(proto != IPPROTO_TCP)) { /* Likely failed to correctly parse the mbuf. */ sc->vtnet_stats.tx_tso_not_tcp++; goto drop; } KASSERT(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM, ("%s: mbuf %p TSO without checksum offload %#x", __func__, m, flags)); error = vtnet_txq_offload_tso(txq, m, etype, csum_start, hdr); if (error) goto drop; } return (m); drop: m_freem(m); return (NULL); } static int vtnet_txq_enqueue_buf(struct vtnet_txq *txq, struct mbuf **m_head, struct vtnet_tx_header *txhdr) { struct vtnet_softc *sc; struct virtqueue *vq; struct sglist *sg; struct mbuf *m; int error; sc = txq->vtntx_sc; vq = txq->vtntx_vq; sg = txq->vtntx_sg; m = *m_head; sglist_reset(sg); error = sglist_append(sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size); KASSERT(error == 0 && sg->sg_nseg == 1, ("%s: error %d adding header to sglist", __func__, error)); error = sglist_append_mbuf(sg, m); if (error) { m = m_defrag(m, M_NOWAIT); if (m == NULL) goto fail; *m_head = m; sc->vtnet_stats.tx_defragged++; error = sglist_append_mbuf(sg, m); if (error) goto fail; } txhdr->vth_mbuf = m; error = virtqueue_enqueue(vq, txhdr, sg, sg->sg_nseg, 0); return (error); fail: sc->vtnet_stats.tx_defrag_failed++; m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } static int vtnet_txq_encap(struct vtnet_txq *txq, struct mbuf **m_head, int flags) { struct vtnet_tx_header *txhdr; struct virtio_net_hdr *hdr; struct mbuf *m; int error; m = *m_head; M_ASSERTPKTHDR(m); txhdr = uma_zalloc(vtnet_tx_header_zone, flags | M_ZERO); if (txhdr == NULL) { m_freem(m); *m_head = NULL; return (ENOMEM); } /* * Always use the non-mergeable header, regardless if the feature * was negotiated. For transmit, num_buffers is always zero. The * vtnet_hdr_size is used to enqueue the correct header size. */ hdr = &txhdr->vth_uhdr.hdr; if (m->m_flags & M_VLANTAG) { m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); if ((*m_head = m) == NULL) { error = ENOBUFS; goto fail; } m->m_flags &= ~M_VLANTAG; } if (m->m_pkthdr.csum_flags & VTNET_CSUM_ALL_OFFLOAD) { m = vtnet_txq_offload(txq, m, hdr); if ((*m_head = m) == NULL) { error = ENOBUFS; goto fail; } } error = vtnet_txq_enqueue_buf(txq, m_head, txhdr); if (error == 0) return (0); fail: uma_zfree(vtnet_tx_header_zone, txhdr); return (error); } #ifdef VTNET_LEGACY_TX static void vtnet_start_locked(struct vtnet_txq *txq, struct ifnet *ifp) { struct vtnet_softc *sc; struct virtqueue *vq; struct mbuf *m0; int tries, enq; sc = txq->vtntx_sc; vq = txq->vtntx_vq; tries = 0; VTNET_TXQ_LOCK_ASSERT(txq); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->vtnet_link_active == 0) return; vtnet_txq_eof(txq); again: enq = 0; while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { if (virtqueue_full(vq)) break; IFQ_DRV_DEQUEUE(&ifp->if_snd, m0); if (m0 == NULL) break; if (vtnet_txq_encap(txq, &m0, M_NOWAIT) != 0) { if (m0 != NULL) IFQ_DRV_PREPEND(&ifp->if_snd, m0); break; } enq++; ETHER_BPF_MTAP(ifp, m0); } if (enq > 0 && vtnet_txq_notify(txq) != 0) { if (tries++ < VTNET_NOTIFY_RETRIES) goto again; txq->vtntx_stats.vtxs_rescheduled++; taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_intrtask); } } static void vtnet_start(struct ifnet *ifp) { struct vtnet_softc *sc; struct vtnet_txq *txq; sc = ifp->if_softc; txq = &sc->vtnet_txqs[0]; VTNET_TXQ_LOCK(txq); vtnet_start_locked(txq, ifp); VTNET_TXQ_UNLOCK(txq); } #else /* !VTNET_LEGACY_TX */ static int vtnet_txq_mq_start_locked(struct vtnet_txq *txq, struct mbuf *m) { struct vtnet_softc *sc; struct virtqueue *vq; struct buf_ring *br; struct ifnet *ifp; int enq, tries, error; sc = txq->vtntx_sc; vq = txq->vtntx_vq; br = txq->vtntx_br; ifp = sc->vtnet_ifp; tries = 0; error = 0; VTNET_TXQ_LOCK_ASSERT(txq); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->vtnet_link_active == 0) { if (m != NULL) error = drbr_enqueue(ifp, br, m); return (error); } if (m != NULL) { error = drbr_enqueue(ifp, br, m); if (error) return (error); } vtnet_txq_eof(txq); again: enq = 0; while ((m = drbr_peek(ifp, br)) != NULL) { if (virtqueue_full(vq)) { drbr_putback(ifp, br, m); break; } if (vtnet_txq_encap(txq, &m, M_NOWAIT) != 0) { if (m != NULL) drbr_putback(ifp, br, m); else drbr_advance(ifp, br); break; } drbr_advance(ifp, br); enq++; ETHER_BPF_MTAP(ifp, m); } if (enq > 0 && vtnet_txq_notify(txq) != 0) { if (tries++ < VTNET_NOTIFY_RETRIES) goto again; txq->vtntx_stats.vtxs_rescheduled++; taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_intrtask); } return (0); } static int vtnet_txq_mq_start(struct ifnet *ifp, struct mbuf *m) { struct vtnet_softc *sc; struct vtnet_txq *txq; int i, npairs, error; sc = ifp->if_softc; npairs = sc->vtnet_act_vq_pairs; /* check if flowid is set */ if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) i = m->m_pkthdr.flowid % npairs; else i = curcpu % npairs; txq = &sc->vtnet_txqs[i]; if (VTNET_TXQ_TRYLOCK(txq) != 0) { error = vtnet_txq_mq_start_locked(txq, m); VTNET_TXQ_UNLOCK(txq); } else { error = drbr_enqueue(ifp, txq->vtntx_br, m); taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_defrtask); } return (error); } static void vtnet_txq_tq_deferred(void *xtxq, int pending) { struct vtnet_softc *sc; struct vtnet_txq *txq; txq = xtxq; sc = txq->vtntx_sc; VTNET_TXQ_LOCK(txq); if (!drbr_empty(sc->vtnet_ifp, txq->vtntx_br)) vtnet_txq_mq_start_locked(txq, NULL); VTNET_TXQ_UNLOCK(txq); } #endif /* VTNET_LEGACY_TX */ static void vtnet_txq_start(struct vtnet_txq *txq) { struct vtnet_softc *sc; struct ifnet *ifp; sc = txq->vtntx_sc; ifp = sc->vtnet_ifp; #ifdef VTNET_LEGACY_TX if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) vtnet_start_locked(txq, ifp); #else if (!drbr_empty(ifp, txq->vtntx_br)) vtnet_txq_mq_start_locked(txq, NULL); #endif } static void vtnet_txq_tq_intr(void *xtxq, int pending) { struct vtnet_softc *sc; struct vtnet_txq *txq; struct ifnet *ifp; txq = xtxq; sc = txq->vtntx_sc; ifp = sc->vtnet_ifp; VTNET_TXQ_LOCK(txq); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { VTNET_TXQ_UNLOCK(txq); return; } vtnet_txq_eof(txq); vtnet_txq_start(txq); VTNET_TXQ_UNLOCK(txq); } static int vtnet_txq_eof(struct vtnet_txq *txq) { struct virtqueue *vq; struct vtnet_tx_header *txhdr; struct mbuf *m; int deq; vq = txq->vtntx_vq; deq = 0; VTNET_TXQ_LOCK_ASSERT(txq); while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) { m = txhdr->vth_mbuf; deq++; txq->vtntx_stats.vtxs_opackets++; txq->vtntx_stats.vtxs_obytes += m->m_pkthdr.len; if (m->m_flags & M_MCAST) txq->vtntx_stats.vtxs_omcasts++; m_freem(m); uma_zfree(vtnet_tx_header_zone, txhdr); } if (virtqueue_empty(vq)) txq->vtntx_watchdog = 0; return (deq); } static void vtnet_tx_vq_intr(void *xtxq) { struct vtnet_softc *sc; struct vtnet_txq *txq; struct ifnet *ifp; txq = xtxq; sc = txq->vtntx_sc; ifp = sc->vtnet_ifp; if (__predict_false(txq->vtntx_id >= sc->vtnet_act_vq_pairs)) { /* * Ignore this interrupt. Either this is a spurious interrupt * or multiqueue without per-VQ MSIX so every queue needs to * be polled (a brain dead configuration we could try harder * to avoid). */ vtnet_txq_disable_intr(txq); return; } #ifdef DEV_NETMAP if (netmap_tx_irq(ifp, txq->vtntx_id) != NM_IRQ_PASS) return; #endif /* DEV_NETMAP */ VTNET_TXQ_LOCK(txq); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { VTNET_TXQ_UNLOCK(txq); return; } vtnet_txq_eof(txq); vtnet_txq_start(txq); VTNET_TXQ_UNLOCK(txq); } static void vtnet_tx_start_all(struct vtnet_softc *sc) { struct vtnet_txq *txq; int i; VTNET_CORE_LOCK_ASSERT(sc); for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { txq = &sc->vtnet_txqs[i]; VTNET_TXQ_LOCK(txq); vtnet_txq_start(txq); VTNET_TXQ_UNLOCK(txq); } } #ifndef VTNET_LEGACY_TX static void vtnet_qflush(struct ifnet *ifp) { struct vtnet_softc *sc; struct vtnet_txq *txq; struct mbuf *m; int i; sc = ifp->if_softc; for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { txq = &sc->vtnet_txqs[i]; VTNET_TXQ_LOCK(txq); while ((m = buf_ring_dequeue_sc(txq->vtntx_br)) != NULL) m_freem(m); VTNET_TXQ_UNLOCK(txq); } if_qflush(ifp); } #endif static int vtnet_watchdog(struct vtnet_txq *txq) { struct ifnet *ifp; ifp = txq->vtntx_sc->vtnet_ifp; VTNET_TXQ_LOCK(txq); if (txq->vtntx_watchdog == 1) { /* * Only drain completed frames if the watchdog is about to * expire. If any frames were drained, there may be enough * free descriptors now available to transmit queued frames. * In that case, the timer will immediately be decremented * below, but the timeout is generous enough that should not * be a problem. */ if (vtnet_txq_eof(txq) != 0) vtnet_txq_start(txq); } if (txq->vtntx_watchdog == 0 || --txq->vtntx_watchdog) { VTNET_TXQ_UNLOCK(txq); return (0); } VTNET_TXQ_UNLOCK(txq); if_printf(ifp, "watchdog timeout on queue %d\n", txq->vtntx_id); return (1); } static void vtnet_accum_stats(struct vtnet_softc *sc, struct vtnet_rxq_stats *rxacc, struct vtnet_txq_stats *txacc) { bzero(rxacc, sizeof(struct vtnet_rxq_stats)); bzero(txacc, sizeof(struct vtnet_txq_stats)); for (int i = 0; i < sc->vtnet_max_vq_pairs; i++) { struct vtnet_rxq_stats *rxst; struct vtnet_txq_stats *txst; rxst = &sc->vtnet_rxqs[i].vtnrx_stats; rxacc->vrxs_ipackets += rxst->vrxs_ipackets; rxacc->vrxs_ibytes += rxst->vrxs_ibytes; rxacc->vrxs_iqdrops += rxst->vrxs_iqdrops; rxacc->vrxs_csum += rxst->vrxs_csum; rxacc->vrxs_csum_failed += rxst->vrxs_csum_failed; rxacc->vrxs_rescheduled += rxst->vrxs_rescheduled; txst = &sc->vtnet_txqs[i].vtntx_stats; txacc->vtxs_opackets += txst->vtxs_opackets; txacc->vtxs_obytes += txst->vtxs_obytes; txacc->vtxs_csum += txst->vtxs_csum; txacc->vtxs_tso += txst->vtxs_tso; txacc->vtxs_rescheduled += txst->vtxs_rescheduled; } } static uint64_t vtnet_get_counter(if_t ifp, ift_counter cnt) { struct vtnet_softc *sc; struct vtnet_rxq_stats rxaccum; struct vtnet_txq_stats txaccum; sc = if_getsoftc(ifp); vtnet_accum_stats(sc, &rxaccum, &txaccum); switch (cnt) { case IFCOUNTER_IPACKETS: return (rxaccum.vrxs_ipackets); case IFCOUNTER_IQDROPS: return (rxaccum.vrxs_iqdrops); case IFCOUNTER_IERRORS: return (rxaccum.vrxs_ierrors); case IFCOUNTER_OPACKETS: return (txaccum.vtxs_opackets); #ifndef VTNET_LEGACY_TX case IFCOUNTER_OBYTES: return (txaccum.vtxs_obytes); case IFCOUNTER_OMCASTS: return (txaccum.vtxs_omcasts); #endif default: return (if_get_counter_default(ifp, cnt)); } } static void vtnet_tick(void *xsc) { struct vtnet_softc *sc; struct ifnet *ifp; int i, timedout; sc = xsc; ifp = sc->vtnet_ifp; timedout = 0; VTNET_CORE_LOCK_ASSERT(sc); for (i = 0; i < sc->vtnet_act_vq_pairs; i++) timedout |= vtnet_watchdog(&sc->vtnet_txqs[i]); if (timedout != 0) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; vtnet_init_locked(sc); } else callout_schedule(&sc->vtnet_tick_ch, hz); } static void vtnet_start_taskqueues(struct vtnet_softc *sc) { device_t dev; struct vtnet_rxq *rxq; struct vtnet_txq *txq; int i, error; dev = sc->vtnet_dev; /* * Errors here are very difficult to recover from - we cannot * easily fail because, if this is during boot, we will hang * when freeing any successfully started taskqueues because * the scheduler isn't up yet. * * Most drivers just ignore the return value - it only fails * with ENOMEM so an error is not likely. */ for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { rxq = &sc->vtnet_rxqs[i]; error = taskqueue_start_threads(&rxq->vtnrx_tq, 1, PI_NET, "%s rxq %d", device_get_nameunit(dev), rxq->vtnrx_id); if (error) { device_printf(dev, "failed to start rx taskq %d\n", rxq->vtnrx_id); } txq = &sc->vtnet_txqs[i]; error = taskqueue_start_threads(&txq->vtntx_tq, 1, PI_NET, "%s txq %d", device_get_nameunit(dev), txq->vtntx_id); if (error) { device_printf(dev, "failed to start tx taskq %d\n", txq->vtntx_id); } } } static void vtnet_free_taskqueues(struct vtnet_softc *sc) { struct vtnet_rxq *rxq; struct vtnet_txq *txq; int i; for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { rxq = &sc->vtnet_rxqs[i]; if (rxq->vtnrx_tq != NULL) { taskqueue_free(rxq->vtnrx_tq); rxq->vtnrx_tq = NULL; } txq = &sc->vtnet_txqs[i]; if (txq->vtntx_tq != NULL) { taskqueue_free(txq->vtntx_tq); txq->vtntx_tq = NULL; } } } static void vtnet_drain_taskqueues(struct vtnet_softc *sc) { struct vtnet_rxq *rxq; struct vtnet_txq *txq; int i; for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { rxq = &sc->vtnet_rxqs[i]; if (rxq->vtnrx_tq != NULL) taskqueue_drain(rxq->vtnrx_tq, &rxq->vtnrx_intrtask); txq = &sc->vtnet_txqs[i]; if (txq->vtntx_tq != NULL) { taskqueue_drain(txq->vtntx_tq, &txq->vtntx_intrtask); #ifndef VTNET_LEGACY_TX taskqueue_drain(txq->vtntx_tq, &txq->vtntx_defrtask); #endif } } } static void vtnet_drain_rxtx_queues(struct vtnet_softc *sc) { struct vtnet_rxq *rxq; struct vtnet_txq *txq; int i; for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { rxq = &sc->vtnet_rxqs[i]; vtnet_rxq_free_mbufs(rxq); txq = &sc->vtnet_txqs[i]; vtnet_txq_free_mbufs(txq); } } static void vtnet_stop_rendezvous(struct vtnet_softc *sc) { struct vtnet_rxq *rxq; struct vtnet_txq *txq; int i; /* * Lock and unlock the per-queue mutex so we known the stop * state is visible. Doing only the active queues should be * sufficient, but it does not cost much extra to do all the * queues. Note we hold the core mutex here too. */ for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { rxq = &sc->vtnet_rxqs[i]; VTNET_RXQ_LOCK(rxq); VTNET_RXQ_UNLOCK(rxq); txq = &sc->vtnet_txqs[i]; VTNET_TXQ_LOCK(txq); VTNET_TXQ_UNLOCK(txq); } } static void vtnet_stop(struct vtnet_softc *sc) { device_t dev; struct ifnet *ifp; dev = sc->vtnet_dev; ifp = sc->vtnet_ifp; VTNET_CORE_LOCK_ASSERT(sc); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; sc->vtnet_link_active = 0; callout_stop(&sc->vtnet_tick_ch); /* Only advisory. */ vtnet_disable_interrupts(sc); /* * Stop the host adapter. This resets it to the pre-initialized * state. It will not generate any interrupts until after it is * reinitialized. */ virtio_stop(dev); vtnet_stop_rendezvous(sc); /* Free any mbufs left in the virtqueues. */ vtnet_drain_rxtx_queues(sc); } static int vtnet_virtio_reinit(struct vtnet_softc *sc) { device_t dev; struct ifnet *ifp; uint64_t features; int mask, error; dev = sc->vtnet_dev; ifp = sc->vtnet_ifp; features = sc->vtnet_features; mask = 0; #if defined(INET) mask |= IFCAP_RXCSUM; #endif #if defined (INET6) mask |= IFCAP_RXCSUM_IPV6; #endif /* * Re-negotiate with the host, removing any disabled receive * features. Transmit features are disabled only on our side * via if_capenable and if_hwassist. */ if (ifp->if_capabilities & mask) { /* * We require both IPv4 and IPv6 offloading to be enabled * in order to negotiated it: VirtIO does not distinguish * between the two. */ if ((ifp->if_capenable & mask) != mask) features &= ~VIRTIO_NET_F_GUEST_CSUM; } if (ifp->if_capabilities & IFCAP_LRO) { if ((ifp->if_capenable & IFCAP_LRO) == 0) features &= ~VTNET_LRO_FEATURES; } if (ifp->if_capabilities & IFCAP_VLAN_HWFILTER) { if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0) features &= ~VIRTIO_NET_F_CTRL_VLAN; } error = virtio_reinit(dev, features); if (error) device_printf(dev, "virtio reinit error %d\n", error); return (error); } static void vtnet_init_rx_filters(struct vtnet_softc *sc) { struct ifnet *ifp; ifp = sc->vtnet_ifp; if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) { /* Restore promiscuous and all-multicast modes. */ vtnet_rx_filter(sc); /* Restore filtered MAC addresses. */ vtnet_rx_filter_mac(sc); } if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) vtnet_rx_filter_vlan(sc); } static int vtnet_init_rx_queues(struct vtnet_softc *sc) { device_t dev; struct vtnet_rxq *rxq; int i, clsize, error; dev = sc->vtnet_dev; /* * Use the new cluster size if one has been set (via a MTU * change). Otherwise, use the standard 2K clusters. * * BMV: It might make sense to use page sized clusters as * the default (depending on the features negotiated). */ if (sc->vtnet_rx_new_clsize != 0) { clsize = sc->vtnet_rx_new_clsize; sc->vtnet_rx_new_clsize = 0; } else clsize = MCLBYTES; sc->vtnet_rx_clsize = clsize; sc->vtnet_rx_nmbufs = VTNET_NEEDED_RX_MBUFS(sc, clsize); KASSERT(sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS || sc->vtnet_rx_nmbufs < sc->vtnet_rx_nsegs, ("%s: too many rx mbufs %d for %d segments", __func__, sc->vtnet_rx_nmbufs, sc->vtnet_rx_nsegs)); for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { rxq = &sc->vtnet_rxqs[i]; /* Hold the lock to satisfy asserts. */ VTNET_RXQ_LOCK(rxq); error = vtnet_rxq_populate(rxq); VTNET_RXQ_UNLOCK(rxq); if (error) { device_printf(dev, "cannot allocate mbufs for Rx queue %d\n", i); return (error); } } return (0); } static int vtnet_init_tx_queues(struct vtnet_softc *sc) { struct vtnet_txq *txq; int i; for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { txq = &sc->vtnet_txqs[i]; txq->vtntx_watchdog = 0; } return (0); } static int vtnet_init_rxtx_queues(struct vtnet_softc *sc) { int error; error = vtnet_init_rx_queues(sc); if (error) return (error); error = vtnet_init_tx_queues(sc); if (error) return (error); return (0); } static void vtnet_set_active_vq_pairs(struct vtnet_softc *sc) { device_t dev; int npairs; dev = sc->vtnet_dev; if ((sc->vtnet_flags & VTNET_FLAG_MULTIQ) == 0) { sc->vtnet_act_vq_pairs = 1; return; } npairs = sc->vtnet_requested_vq_pairs; if (vtnet_ctrl_mq_cmd(sc, npairs) != 0) { device_printf(dev, "cannot set active queue pairs to %d\n", npairs); npairs = 1; } sc->vtnet_act_vq_pairs = npairs; } static int vtnet_reinit(struct vtnet_softc *sc) { struct ifnet *ifp; int error; ifp = sc->vtnet_ifp; /* Use the current MAC address. */ bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN); vtnet_set_hwaddr(sc); vtnet_set_active_vq_pairs(sc); ifp->if_hwassist = 0; if (ifp->if_capenable & IFCAP_TXCSUM) ifp->if_hwassist |= VTNET_CSUM_OFFLOAD; if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) ifp->if_hwassist |= VTNET_CSUM_OFFLOAD_IPV6; if (ifp->if_capenable & IFCAP_TSO4) ifp->if_hwassist |= CSUM_IP_TSO; if (ifp->if_capenable & IFCAP_TSO6) ifp->if_hwassist |= CSUM_IP6_TSO; if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) vtnet_init_rx_filters(sc); error = vtnet_init_rxtx_queues(sc); if (error) return (error); vtnet_enable_interrupts(sc); ifp->if_drv_flags |= IFF_DRV_RUNNING; return (0); } static void vtnet_init_locked(struct vtnet_softc *sc) { device_t dev; struct ifnet *ifp; dev = sc->vtnet_dev; ifp = sc->vtnet_ifp; VTNET_CORE_LOCK_ASSERT(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) return; vtnet_stop(sc); /* Reinitialize with the host. */ if (vtnet_virtio_reinit(sc) != 0) goto fail; if (vtnet_reinit(sc) != 0) goto fail; virtio_reinit_complete(dev); vtnet_update_link_status(sc); callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc); return; fail: vtnet_stop(sc); } static void vtnet_init(void *xsc) { struct vtnet_softc *sc; sc = xsc; VTNET_CORE_LOCK(sc); vtnet_init_locked(sc); VTNET_CORE_UNLOCK(sc); } static void vtnet_free_ctrl_vq(struct vtnet_softc *sc) { struct virtqueue *vq; vq = sc->vtnet_ctrl_vq; /* * The control virtqueue is only polled and therefore it should * already be empty. */ KASSERT(virtqueue_empty(vq), ("%s: ctrl vq %p not empty", __func__, vq)); } static void vtnet_exec_ctrl_cmd(struct vtnet_softc *sc, void *cookie, struct sglist *sg, int readable, int writable) { struct virtqueue *vq; vq = sc->vtnet_ctrl_vq; VTNET_CORE_LOCK_ASSERT(sc); KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ, ("%s: CTRL_VQ feature not negotiated", __func__)); if (!virtqueue_empty(vq)) return; if (virtqueue_enqueue(vq, cookie, sg, readable, writable) != 0) return; /* * Poll for the response, but the command is likely already * done when we return from the notify. */ virtqueue_notify(vq); virtqueue_poll(vq, NULL); } static int vtnet_ctrl_mac_cmd(struct vtnet_softc *sc, uint8_t *hwaddr) { struct virtio_net_ctrl_hdr hdr __aligned(2); struct sglist_seg segs[3]; struct sglist sg; uint8_t ack; int error; hdr.class = VIRTIO_NET_CTRL_MAC; hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET; ack = VIRTIO_NET_ERR; sglist_init(&sg, 3, segs); error = 0; error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr)); error |= sglist_append(&sg, hwaddr, ETHER_ADDR_LEN); error |= sglist_append(&sg, &ack, sizeof(uint8_t)); KASSERT(error == 0 && sg.sg_nseg == 3, ("%s: error %d adding set MAC msg to sglist", __func__, error)); vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1); return (ack == VIRTIO_NET_OK ? 0 : EIO); } static int vtnet_ctrl_mq_cmd(struct vtnet_softc *sc, uint16_t npairs) { struct sglist_seg segs[3]; struct sglist sg; struct { struct virtio_net_ctrl_hdr hdr; uint8_t pad1; struct virtio_net_ctrl_mq mq; uint8_t pad2; uint8_t ack; } s __aligned(2); int error; s.hdr.class = VIRTIO_NET_CTRL_MQ; s.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET; s.mq.virtqueue_pairs = npairs; s.ack = VIRTIO_NET_ERR; sglist_init(&sg, 3, segs); error = 0; error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); error |= sglist_append(&sg, &s.mq, sizeof(struct virtio_net_ctrl_mq)); error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); KASSERT(error == 0 && sg.sg_nseg == 3, ("%s: error %d adding MQ message to sglist", __func__, error)); vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); return (s.ack == VIRTIO_NET_OK ? 0 : EIO); } static int vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, int cmd, int on) { struct sglist_seg segs[3]; struct sglist sg; struct { struct virtio_net_ctrl_hdr hdr; uint8_t pad1; uint8_t onoff; uint8_t pad2; uint8_t ack; } s __aligned(2); int error; KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX, ("%s: CTRL_RX feature not negotiated", __func__)); s.hdr.class = VIRTIO_NET_CTRL_RX; s.hdr.cmd = cmd; s.onoff = !!on; s.ack = VIRTIO_NET_ERR; sglist_init(&sg, 3, segs); error = 0; error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); error |= sglist_append(&sg, &s.onoff, sizeof(uint8_t)); error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); KASSERT(error == 0 && sg.sg_nseg == 3, ("%s: error %d adding Rx message to sglist", __func__, error)); vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); return (s.ack == VIRTIO_NET_OK ? 0 : EIO); } static int vtnet_set_promisc(struct vtnet_softc *sc, int on) { return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on)); } static int vtnet_set_allmulti(struct vtnet_softc *sc, int on) { return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on)); } /* * The device defaults to promiscuous mode for backwards compatibility. * Turn it off at attach time if possible. */ static void vtnet_attach_disable_promisc(struct vtnet_softc *sc) { struct ifnet *ifp; ifp = sc->vtnet_ifp; VTNET_CORE_LOCK(sc); if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0) { ifp->if_flags |= IFF_PROMISC; } else if (vtnet_set_promisc(sc, 0) != 0) { ifp->if_flags |= IFF_PROMISC; device_printf(sc->vtnet_dev, "cannot disable default promiscuous mode\n"); } VTNET_CORE_UNLOCK(sc); } static void vtnet_rx_filter(struct vtnet_softc *sc) { device_t dev; struct ifnet *ifp; dev = sc->vtnet_dev; ifp = sc->vtnet_ifp; VTNET_CORE_LOCK_ASSERT(sc); if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0) device_printf(dev, "cannot %s promiscuous mode\n", ifp->if_flags & IFF_PROMISC ? "enable" : "disable"); if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0) device_printf(dev, "cannot %s all-multicast mode\n", ifp->if_flags & IFF_ALLMULTI ? "enable" : "disable"); } static u_int vtnet_copy_ifaddr(void *arg, struct sockaddr_dl *sdl, u_int ucnt) { struct vtnet_softc *sc = arg; if (memcmp(LLADDR(sdl), sc->vtnet_hwaddr, ETHER_ADDR_LEN) == 0) return (0); if (ucnt < VTNET_MAX_MAC_ENTRIES) bcopy(LLADDR(sdl), &sc->vtnet_mac_filter->vmf_unicast.macs[ucnt], ETHER_ADDR_LEN); return (1); } static u_int vtnet_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt) { struct vtnet_mac_filter *filter = arg; if (mcnt < VTNET_MAX_MAC_ENTRIES) bcopy(LLADDR(sdl), &filter->vmf_multicast.macs[mcnt], ETHER_ADDR_LEN); return (1); } static void vtnet_rx_filter_mac(struct vtnet_softc *sc) { struct virtio_net_ctrl_hdr hdr __aligned(2); struct vtnet_mac_filter *filter; struct sglist_seg segs[4]; struct sglist sg; struct ifnet *ifp; bool promisc, allmulti; u_int ucnt, mcnt; int error; uint8_t ack; ifp = sc->vtnet_ifp; filter = sc->vtnet_mac_filter; VTNET_CORE_LOCK_ASSERT(sc); KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX, ("%s: CTRL_RX feature not negotiated", __func__)); /* Unicast MAC addresses: */ ucnt = if_foreach_lladdr(ifp, vtnet_copy_ifaddr, sc); promisc = (ucnt > VTNET_MAX_MAC_ENTRIES); if (promisc) { filter->vmf_unicast.nentries = 0; if_printf(ifp, "more than %d MAC addresses assigned, " "falling back to promiscuous mode\n", VTNET_MAX_MAC_ENTRIES); } else filter->vmf_unicast.nentries = ucnt; /* Multicast MAC addresses: */ mcnt = if_foreach_llmaddr(ifp, vtnet_copy_maddr, filter); allmulti = (mcnt > VTNET_MAX_MAC_ENTRIES); if (allmulti) { filter->vmf_multicast.nentries = 0; if_printf(ifp, "more than %d multicast MAC addresses " "assigned, falling back to all-multicast mode\n", VTNET_MAX_MAC_ENTRIES); } else filter->vmf_multicast.nentries = mcnt; if (promisc && allmulti) goto out; hdr.class = VIRTIO_NET_CTRL_MAC; hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET; ack = VIRTIO_NET_ERR; sglist_init(&sg, 4, segs); error = 0; error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr)); error |= sglist_append(&sg, &filter->vmf_unicast, sizeof(uint32_t) + filter->vmf_unicast.nentries * ETHER_ADDR_LEN); error |= sglist_append(&sg, &filter->vmf_multicast, sizeof(uint32_t) + filter->vmf_multicast.nentries * ETHER_ADDR_LEN); error |= sglist_append(&sg, &ack, sizeof(uint8_t)); KASSERT(error == 0 && sg.sg_nseg == 4, ("%s: error %d adding MAC filter msg to sglist", __func__, error)); vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1); if (ack != VIRTIO_NET_OK) if_printf(ifp, "error setting host MAC filter table\n"); out: if (promisc != 0 && vtnet_set_promisc(sc, 1) != 0) if_printf(ifp, "cannot enable promiscuous mode\n"); if (allmulti != 0 && vtnet_set_allmulti(sc, 1) != 0) if_printf(ifp, "cannot enable all-multicast mode\n"); } static int vtnet_exec_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag) { struct sglist_seg segs[3]; struct sglist sg; struct { struct virtio_net_ctrl_hdr hdr; uint8_t pad1; uint16_t tag; uint8_t pad2; uint8_t ack; } s __aligned(2); int error; s.hdr.class = VIRTIO_NET_CTRL_VLAN; s.hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL; s.tag = tag; s.ack = VIRTIO_NET_ERR; sglist_init(&sg, 3, segs); error = 0; error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); error |= sglist_append(&sg, &s.tag, sizeof(uint16_t)); error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); KASSERT(error == 0 && sg.sg_nseg == 3, ("%s: error %d adding VLAN message to sglist", __func__, error)); vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); return (s.ack == VIRTIO_NET_OK ? 0 : EIO); } static void vtnet_rx_filter_vlan(struct vtnet_softc *sc) { uint32_t w; uint16_t tag; int i, bit; VTNET_CORE_LOCK_ASSERT(sc); KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER, ("%s: VLAN_FILTER feature not negotiated", __func__)); /* Enable the filter for each configured VLAN. */ for (i = 0; i < VTNET_VLAN_FILTER_NWORDS; i++) { w = sc->vtnet_vlan_filter[i]; while ((bit = ffs(w) - 1) != -1) { w &= ~(1 << bit); tag = sizeof(w) * CHAR_BIT * i + bit; if (vtnet_exec_vlan_filter(sc, 1, tag) != 0) { device_printf(sc->vtnet_dev, "cannot enable VLAN %d filter\n", tag); } } } } static void vtnet_update_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag) { struct ifnet *ifp; int idx, bit; ifp = sc->vtnet_ifp; idx = (tag >> 5) & 0x7F; bit = tag & 0x1F; if (tag == 0 || tag > 4095) return; VTNET_CORE_LOCK(sc); if (add) sc->vtnet_vlan_filter[idx] |= (1 << bit); else sc->vtnet_vlan_filter[idx] &= ~(1 << bit); if (ifp->if_capenable & IFCAP_VLAN_HWFILTER && ifp->if_drv_flags & IFF_DRV_RUNNING && vtnet_exec_vlan_filter(sc, add, tag) != 0) { device_printf(sc->vtnet_dev, "cannot %s VLAN %d %s the host filter table\n", add ? "add" : "remove", tag, add ? "to" : "from"); } VTNET_CORE_UNLOCK(sc); } static void vtnet_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag) { if (ifp->if_softc != arg) return; vtnet_update_vlan_filter(arg, 1, tag); } static void vtnet_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag) { if (ifp->if_softc != arg) return; vtnet_update_vlan_filter(arg, 0, tag); } static int vtnet_is_link_up(struct vtnet_softc *sc) { device_t dev; struct ifnet *ifp; uint16_t status; dev = sc->vtnet_dev; ifp = sc->vtnet_ifp; if ((ifp->if_capabilities & IFCAP_LINKSTATE) == 0) status = VIRTIO_NET_S_LINK_UP; else status = virtio_read_dev_config_2(dev, offsetof(struct virtio_net_config, status)); return ((status & VIRTIO_NET_S_LINK_UP) != 0); } static void vtnet_update_link_status(struct vtnet_softc *sc) { struct ifnet *ifp; int link; ifp = sc->vtnet_ifp; VTNET_CORE_LOCK_ASSERT(sc); link = vtnet_is_link_up(sc); /* Notify if the link status has changed. */ if (link != 0 && sc->vtnet_link_active == 0) { sc->vtnet_link_active = 1; if_link_state_change(ifp, LINK_STATE_UP); } else if (link == 0 && sc->vtnet_link_active != 0) { sc->vtnet_link_active = 0; if_link_state_change(ifp, LINK_STATE_DOWN); } } static int vtnet_ifmedia_upd(struct ifnet *ifp) { struct vtnet_softc *sc; struct ifmedia *ifm; sc = ifp->if_softc; ifm = &sc->vtnet_media; if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); return (0); } static void vtnet_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct vtnet_softc *sc; sc = ifp->if_softc; ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; VTNET_CORE_LOCK(sc); if (vtnet_is_link_up(sc) != 0) { ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_active |= VTNET_MEDIATYPE; } else ifmr->ifm_active |= IFM_NONE; VTNET_CORE_UNLOCK(sc); } static void vtnet_set_hwaddr(struct vtnet_softc *sc) { device_t dev; int i; dev = sc->vtnet_dev; if (sc->vtnet_flags & VTNET_FLAG_CTRL_MAC) { if (vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr) != 0) device_printf(dev, "unable to set MAC address\n"); } else if (sc->vtnet_flags & VTNET_FLAG_MAC) { for (i = 0; i < ETHER_ADDR_LEN; i++) { virtio_write_dev_config_1(dev, offsetof(struct virtio_net_config, mac) + i, sc->vtnet_hwaddr[i]); } } } static void vtnet_get_hwaddr(struct vtnet_softc *sc) { device_t dev; int i; dev = sc->vtnet_dev; if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0) { /* * Generate a random locally administered unicast address. * * It would be nice to generate the same MAC address across * reboots, but it seems all the hosts currently available * support the MAC feature, so this isn't too important. */ sc->vtnet_hwaddr[0] = 0xB2; arc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1, 0); vtnet_set_hwaddr(sc); return; } for (i = 0; i < ETHER_ADDR_LEN; i++) { sc->vtnet_hwaddr[i] = virtio_read_dev_config_1(dev, offsetof(struct virtio_net_config, mac) + i); } } static void vtnet_vlan_tag_remove(struct mbuf *m) { struct ether_vlan_header *evh; evh = mtod(m, struct ether_vlan_header *); m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag); m->m_flags |= M_VLANTAG; /* Strip the 802.1Q header. */ bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN, ETHER_HDR_LEN - ETHER_TYPE_LEN); m_adj(m, ETHER_VLAN_ENCAP_LEN); } static void vtnet_set_rx_process_limit(struct vtnet_softc *sc) { int limit; limit = vtnet_tunable_int(sc, "rx_process_limit", vtnet_rx_process_limit); if (limit < 0) limit = INT_MAX; sc->vtnet_rx_process_limit = limit; } static void vtnet_set_tx_intr_threshold(struct vtnet_softc *sc) { int size, thresh; size = virtqueue_size(sc->vtnet_txqs[0].vtntx_vq); /* * The Tx interrupt is disabled until the queue free count falls * below our threshold. Completed frames are drained from the Tx * virtqueue before transmitting new frames and in the watchdog * callout, so the frequency of Tx interrupts is greatly reduced, * at the cost of not freeing mbufs as quickly as they otherwise * would be. * * N.B. We assume all the Tx queues are the same size. */ thresh = size / 4; /* * Without indirect descriptors, leave enough room for the most * segments we handle. */ if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) == 0 && thresh < sc->vtnet_tx_nsegs) thresh = sc->vtnet_tx_nsegs; sc->vtnet_tx_intr_thresh = thresh; } static void vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child, struct vtnet_rxq *rxq) { struct sysctl_oid *node; struct sysctl_oid_list *list; struct vtnet_rxq_stats *stats; char namebuf[16]; snprintf(namebuf, sizeof(namebuf), "rxq%d", rxq->vtnrx_id); node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "Receive Queue"); list = SYSCTL_CHILDREN(node); stats = &rxq->vtnrx_stats; SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ipackets", CTLFLAG_RD, &stats->vrxs_ipackets, "Receive packets"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ibytes", CTLFLAG_RD, &stats->vrxs_ibytes, "Receive bytes"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "iqdrops", CTLFLAG_RD, &stats->vrxs_iqdrops, "Receive drops"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ierrors", CTLFLAG_RD, &stats->vrxs_ierrors, "Receive errors"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD, &stats->vrxs_csum, "Receive checksum offloaded"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum_failed", CTLFLAG_RD, &stats->vrxs_csum_failed, "Receive checksum offload failed"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD, &stats->vrxs_rescheduled, "Receive interrupt handler rescheduled"); } static void vtnet_setup_txq_sysctl(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child, struct vtnet_txq *txq) { struct sysctl_oid *node; struct sysctl_oid_list *list; struct vtnet_txq_stats *stats; char namebuf[16]; snprintf(namebuf, sizeof(namebuf), "txq%d", txq->vtntx_id); node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "Transmit Queue"); list = SYSCTL_CHILDREN(node); stats = &txq->vtntx_stats; SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "opackets", CTLFLAG_RD, &stats->vtxs_opackets, "Transmit packets"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "obytes", CTLFLAG_RD, &stats->vtxs_obytes, "Transmit bytes"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "omcasts", CTLFLAG_RD, &stats->vtxs_omcasts, "Transmit multicasts"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD, &stats->vtxs_csum, "Transmit checksum offloaded"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD, &stats->vtxs_tso, "Transmit segmentation offloaded"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD, &stats->vtxs_rescheduled, "Transmit interrupt handler rescheduled"); } static void vtnet_setup_queue_sysctl(struct vtnet_softc *sc) { device_t dev; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; struct sysctl_oid_list *child; int i; dev = sc->vtnet_dev; ctx = device_get_sysctl_ctx(dev); tree = device_get_sysctl_tree(dev); child = SYSCTL_CHILDREN(tree); for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { vtnet_setup_rxq_sysctl(ctx, child, &sc->vtnet_rxqs[i]); vtnet_setup_txq_sysctl(ctx, child, &sc->vtnet_txqs[i]); } } static void vtnet_setup_stat_sysctl(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child, struct vtnet_softc *sc) { struct vtnet_statistics *stats; struct vtnet_rxq_stats rxaccum; struct vtnet_txq_stats txaccum; vtnet_accum_stats(sc, &rxaccum, &txaccum); stats = &sc->vtnet_stats; stats->rx_csum_offloaded = rxaccum.vrxs_csum; stats->rx_csum_failed = rxaccum.vrxs_csum_failed; stats->rx_task_rescheduled = rxaccum.vrxs_rescheduled; stats->tx_csum_offloaded = txaccum.vtxs_csum; stats->tx_tso_offloaded = txaccum.vtxs_tso; stats->tx_task_rescheduled = txaccum.vtxs_rescheduled; SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "mbuf_alloc_failed", CTLFLAG_RD, &stats->mbuf_alloc_failed, "Mbuf cluster allocation failures"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_frame_too_large", CTLFLAG_RD, &stats->rx_frame_too_large, "Received frame larger than the mbuf chain"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_enq_replacement_failed", CTLFLAG_RD, &stats->rx_enq_replacement_failed, "Enqueuing the replacement receive mbuf failed"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_mergeable_failed", CTLFLAG_RD, &stats->rx_mergeable_failed, "Mergeable buffers receive failures"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ethtype", CTLFLAG_RD, &stats->rx_csum_bad_ethtype, "Received checksum offloaded buffer with unsupported " "Ethernet type"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ipproto", CTLFLAG_RD, &stats->rx_csum_bad_ipproto, "Received checksum offloaded buffer with incorrect IP protocol"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_offset", CTLFLAG_RD, &stats->rx_csum_bad_offset, "Received checksum offloaded buffer with incorrect offset"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_proto", CTLFLAG_RD, &stats->rx_csum_bad_proto, "Received checksum offloaded buffer with incorrect protocol"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_failed", CTLFLAG_RD, &stats->rx_csum_failed, "Received buffer checksum offload failed"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_offloaded", CTLFLAG_RD, &stats->rx_csum_offloaded, "Received buffer checksum offload succeeded"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_task_rescheduled", CTLFLAG_RD, &stats->rx_task_rescheduled, "Times the receive interrupt task rescheduled itself"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_bad_ethtype", CTLFLAG_RD, &stats->tx_csum_bad_ethtype, "Aborted transmit of checksum offloaded buffer with unknown " "Ethernet type"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_bad_ethtype", CTLFLAG_RD, &stats->tx_tso_bad_ethtype, "Aborted transmit of TSO buffer with unknown Ethernet type"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_not_tcp", CTLFLAG_RD, &stats->tx_tso_not_tcp, "Aborted transmit of TSO buffer with non TCP protocol"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged", CTLFLAG_RD, &stats->tx_defragged, "Transmit mbufs defragged"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defrag_failed", CTLFLAG_RD, &stats->tx_defrag_failed, "Aborted transmit of buffer because defrag failed"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_offloaded", CTLFLAG_RD, &stats->tx_csum_offloaded, "Offloaded checksum of transmitted buffer"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_offloaded", CTLFLAG_RD, &stats->tx_tso_offloaded, "Segmentation offload of transmitted buffer"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_task_rescheduled", CTLFLAG_RD, &stats->tx_task_rescheduled, "Times the transmit interrupt task rescheduled itself"); } static void vtnet_setup_sysctl(struct vtnet_softc *sc) { device_t dev; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; struct sysctl_oid_list *child; dev = sc->vtnet_dev; ctx = device_get_sysctl_ctx(dev); tree = device_get_sysctl_tree(dev); child = SYSCTL_CHILDREN(tree); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_vq_pairs", CTLFLAG_RD, &sc->vtnet_max_vq_pairs, 0, "Maximum number of supported virtqueue pairs"); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "requested_vq_pairs", CTLFLAG_RD, &sc->vtnet_requested_vq_pairs, 0, "Requested number of virtqueue pairs"); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "act_vq_pairs", CTLFLAG_RD, &sc->vtnet_act_vq_pairs, 0, "Number of active virtqueue pairs"); vtnet_setup_stat_sysctl(ctx, child, sc); } static int vtnet_rxq_enable_intr(struct vtnet_rxq *rxq) { return (virtqueue_enable_intr(rxq->vtnrx_vq)); } static void vtnet_rxq_disable_intr(struct vtnet_rxq *rxq) { virtqueue_disable_intr(rxq->vtnrx_vq); } static int vtnet_txq_enable_intr(struct vtnet_txq *txq) { struct virtqueue *vq; vq = txq->vtntx_vq; if (vtnet_txq_below_threshold(txq) != 0) return (virtqueue_postpone_intr(vq, VQ_POSTPONE_LONG)); /* * The free count is above our threshold. Keep the Tx interrupt * disabled until the queue is fuller. */ return (0); } static void vtnet_txq_disable_intr(struct vtnet_txq *txq) { virtqueue_disable_intr(txq->vtntx_vq); } static void vtnet_enable_rx_interrupts(struct vtnet_softc *sc) { int i; for (i = 0; i < sc->vtnet_act_vq_pairs; i++) vtnet_rxq_enable_intr(&sc->vtnet_rxqs[i]); } static void vtnet_enable_tx_interrupts(struct vtnet_softc *sc) { int i; for (i = 0; i < sc->vtnet_act_vq_pairs; i++) vtnet_txq_enable_intr(&sc->vtnet_txqs[i]); } static void vtnet_enable_interrupts(struct vtnet_softc *sc) { vtnet_enable_rx_interrupts(sc); vtnet_enable_tx_interrupts(sc); } static void vtnet_disable_rx_interrupts(struct vtnet_softc *sc) { int i; for (i = 0; i < sc->vtnet_act_vq_pairs; i++) vtnet_rxq_disable_intr(&sc->vtnet_rxqs[i]); } static void vtnet_disable_tx_interrupts(struct vtnet_softc *sc) { int i; for (i = 0; i < sc->vtnet_act_vq_pairs; i++) vtnet_txq_disable_intr(&sc->vtnet_txqs[i]); } static void vtnet_disable_interrupts(struct vtnet_softc *sc) { vtnet_disable_rx_interrupts(sc); vtnet_disable_tx_interrupts(sc); } static int vtnet_tunable_int(struct vtnet_softc *sc, const char *knob, int def) { char path[64]; snprintf(path, sizeof(path), "hw.vtnet.%d.%s", device_get_unit(sc->vtnet_dev), knob); TUNABLE_INT_FETCH(path, &def); return (def); } #ifdef DEBUGNET static void vtnet_debugnet_init(struct ifnet *ifp, int *nrxr, int *ncl, int *clsize) { struct vtnet_softc *sc; sc = if_getsoftc(ifp); VTNET_CORE_LOCK(sc); *nrxr = sc->vtnet_max_vq_pairs; *ncl = DEBUGNET_MAX_IN_FLIGHT; *clsize = sc->vtnet_rx_clsize; VTNET_CORE_UNLOCK(sc); } static void vtnet_debugnet_event(struct ifnet *ifp __unused, enum debugnet_ev event __unused) { } static int vtnet_debugnet_transmit(struct ifnet *ifp, struct mbuf *m) { struct vtnet_softc *sc; struct vtnet_txq *txq; int error; sc = if_getsoftc(ifp); if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return (EBUSY); txq = &sc->vtnet_txqs[0]; error = vtnet_txq_encap(txq, &m, M_NOWAIT | M_USE_RESERVE); if (error == 0) (void)vtnet_txq_notify(txq); return (error); } static int vtnet_debugnet_poll(struct ifnet *ifp, int count) { struct vtnet_softc *sc; int i; sc = if_getsoftc(ifp); if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return (EBUSY); (void)vtnet_txq_eof(&sc->vtnet_txqs[0]); for (i = 0; i < sc->vtnet_max_vq_pairs; i++) (void)vtnet_rxq_eof(&sc->vtnet_rxqs[i]); return (0); } #endif /* DEBUGNET */ diff --git a/sys/dev/vnic/nicvf_queues.c b/sys/dev/vnic/nicvf_queues.c index d469067c1cf8..ebef89e31ab1 100644 --- a/sys/dev/vnic/nicvf_queues.c +++ b/sys/dev/vnic/nicvf_queues.c @@ -1,2366 +1,2366 @@ /* * Copyright (C) 2015 Cavium Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "thunder_bgx.h" #include "nic_reg.h" #include "nic.h" #include "q_struct.h" #include "nicvf_queues.h" #define DEBUG #undef DEBUG #ifdef DEBUG #define dprintf(dev, fmt, ...) device_printf(dev, fmt, ##__VA_ARGS__) #else #define dprintf(dev, fmt, ...) #endif MALLOC_DECLARE(M_NICVF); static void nicvf_free_snd_queue(struct nicvf *, struct snd_queue *); static struct mbuf * nicvf_get_rcv_mbuf(struct nicvf *, struct cqe_rx_t *); static void nicvf_sq_disable(struct nicvf *, int); static void nicvf_sq_enable(struct nicvf *, struct snd_queue *, int); static void nicvf_put_sq_desc(struct snd_queue *, int); static void nicvf_cmp_queue_config(struct nicvf *, struct queue_set *, int, boolean_t); static void nicvf_sq_free_used_descs(struct nicvf *, struct snd_queue *, int); static int nicvf_tx_mbuf_locked(struct snd_queue *, struct mbuf **); static void nicvf_rbdr_task(void *, int); static void nicvf_rbdr_task_nowait(void *, int); struct rbuf_info { bus_dma_tag_t dmat; bus_dmamap_t dmap; struct mbuf * mbuf; }; #define GET_RBUF_INFO(x) ((struct rbuf_info *)((x) - NICVF_RCV_BUF_ALIGN_BYTES)) /* Poll a register for a specific value */ static int nicvf_poll_reg(struct nicvf *nic, int qidx, uint64_t reg, int bit_pos, int bits, int val) { uint64_t bit_mask; uint64_t reg_val; int timeout = 10; bit_mask = (1UL << bits) - 1; bit_mask = (bit_mask << bit_pos); while (timeout) { reg_val = nicvf_queue_reg_read(nic, reg, qidx); if (((reg_val & bit_mask) >> bit_pos) == val) return (0); DELAY(1000); timeout--; } device_printf(nic->dev, "Poll on reg 0x%lx failed\n", reg); return (ETIMEDOUT); } /* Callback for bus_dmamap_load() */ static void nicvf_dmamap_q_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *paddr; KASSERT(nseg == 1, ("wrong number of segments, should be 1")); paddr = arg; *paddr = segs->ds_addr; } /* Allocate memory for a queue's descriptors */ static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem, int q_len, int desc_size, int align_bytes) { int err, err_dmat; /* Create DMA tag first */ err = bus_dma_tag_create( bus_get_dma_tag(nic->dev), /* parent tag */ align_bytes, /* alignment */ 0, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ (q_len * desc_size), /* maxsize */ 1, /* nsegments */ (q_len * desc_size), /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &dmem->dmat); /* dmat */ if (err != 0) { device_printf(nic->dev, "Failed to create busdma tag for descriptors ring\n"); return (err); } /* Allocate segment of continuous DMA safe memory */ err = bus_dmamem_alloc( dmem->dmat, /* DMA tag */ &dmem->base, /* virtual address */ (BUS_DMA_NOWAIT | BUS_DMA_ZERO), /* flags */ &dmem->dmap); /* DMA map */ if (err != 0) { device_printf(nic->dev, "Failed to allocate DMA safe memory for" "descriptors ring\n"); goto dmamem_fail; } err = bus_dmamap_load( dmem->dmat, dmem->dmap, dmem->base, (q_len * desc_size), /* allocation size */ nicvf_dmamap_q_cb, /* map to DMA address cb. */ &dmem->phys_base, /* physical address */ BUS_DMA_NOWAIT); if (err != 0) { device_printf(nic->dev, "Cannot load DMA map of descriptors ring\n"); goto dmamap_fail; } dmem->q_len = q_len; dmem->size = (desc_size * q_len); return (0); dmamap_fail: bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap); dmem->phys_base = 0; dmamem_fail: err_dmat = bus_dma_tag_destroy(dmem->dmat); dmem->base = NULL; KASSERT(err_dmat == 0, ("%s: Trying to destroy BUSY DMA tag", __func__)); return (err); } /* Free queue's descriptor memory */ static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem) { int err; if ((dmem == NULL) || (dmem->base == NULL)) return; /* Unload a map */ bus_dmamap_sync(dmem->dmat, dmem->dmap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(dmem->dmat, dmem->dmap); /* Free DMA memory */ bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap); /* Destroy DMA tag */ err = bus_dma_tag_destroy(dmem->dmat); KASSERT(err == 0, ("%s: Trying to destroy BUSY DMA tag", __func__)); dmem->phys_base = 0; dmem->base = NULL; } /* * Allocate buffer for packet reception * HW returns memory address where packet is DMA'ed but not a pointer * into RBDR ring, so save buffer address at the start of fragment and * align the start address to a cache aligned address */ static __inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr, bus_dmamap_t dmap, int mflags, uint32_t buf_len, bus_addr_t *rbuf) { struct mbuf *mbuf; struct rbuf_info *rinfo; bus_dma_segment_t segs[1]; int nsegs; int err; mbuf = m_getjcl(mflags, MT_DATA, M_PKTHDR, MCLBYTES); if (mbuf == NULL) return (ENOMEM); /* * The length is equal to the actual length + one 128b line * used as a room for rbuf_info structure. */ mbuf->m_len = mbuf->m_pkthdr.len = buf_len; err = bus_dmamap_load_mbuf_sg(rbdr->rbdr_buff_dmat, dmap, mbuf, segs, &nsegs, BUS_DMA_NOWAIT); if (err != 0) { device_printf(nic->dev, "Failed to map mbuf into DMA visible memory, err: %d\n", err); m_freem(mbuf); bus_dmamap_destroy(rbdr->rbdr_buff_dmat, dmap); return (err); } if (nsegs != 1) panic("Unexpected number of DMA segments for RB: %d", nsegs); /* * Now use the room for rbuf_info structure * and adjust mbuf data and length. */ rinfo = (struct rbuf_info *)mbuf->m_data; m_adj(mbuf, NICVF_RCV_BUF_ALIGN_BYTES); rinfo->dmat = rbdr->rbdr_buff_dmat; rinfo->dmap = dmap; rinfo->mbuf = mbuf; *rbuf = segs[0].ds_addr + NICVF_RCV_BUF_ALIGN_BYTES; return (0); } /* Retrieve mbuf for received packet */ static struct mbuf * nicvf_rb_ptr_to_mbuf(struct nicvf *nic, bus_addr_t rb_ptr) { struct mbuf *mbuf; struct rbuf_info *rinfo; /* Get buffer start address and alignment offset */ rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(rb_ptr)); /* Now retrieve mbuf to give to stack */ mbuf = rinfo->mbuf; if (__predict_false(mbuf == NULL)) { panic("%s: Received packet fragment with NULL mbuf", device_get_nameunit(nic->dev)); } /* * Clear the mbuf in the descriptor to indicate * that this slot is processed and free to use. */ rinfo->mbuf = NULL; bus_dmamap_sync(rinfo->dmat, rinfo->dmap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rinfo->dmat, rinfo->dmap); return (mbuf); } /* Allocate RBDR ring and populate receive buffers */ static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, int ring_len, int buf_size, int qidx) { bus_dmamap_t dmap; bus_addr_t rbuf; struct rbdr_entry_t *desc; int idx; int err; /* Allocate rbdr descriptors ring */ err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len, sizeof(struct rbdr_entry_t), NICVF_RCV_BUF_ALIGN_BYTES); if (err != 0) { device_printf(nic->dev, "Failed to create RBDR descriptors ring\n"); return (err); } rbdr->desc = rbdr->dmem.base; /* * Buffer size has to be in multiples of 128 bytes. * Make room for metadata of size of one line (128 bytes). */ rbdr->dma_size = buf_size - NICVF_RCV_BUF_ALIGN_BYTES; rbdr->enable = TRUE; rbdr->thresh = RBDR_THRESH; rbdr->nic = nic; rbdr->idx = qidx; /* * Create DMA tag for Rx buffers. * Each map created using this tag is intended to store Rx payload for * one fragment and one header structure containing rbuf_info (thus * additional 128 byte line since RB must be a multiple of 128 byte * cache line). */ if (buf_size > MCLBYTES) { device_printf(nic->dev, "Buffer size to large for mbuf cluster\n"); return (EINVAL); } err = bus_dma_tag_create( bus_get_dma_tag(nic->dev), /* parent tag */ NICVF_RCV_BUF_ALIGN_BYTES, /* alignment */ 0, /* boundary */ DMAP_MAX_PHYSADDR, /* lowaddr */ DMAP_MIN_PHYSADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ roundup2(buf_size, MCLBYTES), /* maxsize */ 1, /* nsegments */ roundup2(buf_size, MCLBYTES), /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &rbdr->rbdr_buff_dmat); /* dmat */ if (err != 0) { device_printf(nic->dev, "Failed to create busdma tag for RBDR buffers\n"); return (err); } rbdr->rbdr_buff_dmaps = malloc(sizeof(*rbdr->rbdr_buff_dmaps) * ring_len, M_NICVF, (M_WAITOK | M_ZERO)); for (idx = 0; idx < ring_len; idx++) { err = bus_dmamap_create(rbdr->rbdr_buff_dmat, 0, &dmap); if (err != 0) { device_printf(nic->dev, "Failed to create DMA map for RB\n"); return (err); } rbdr->rbdr_buff_dmaps[idx] = dmap; err = nicvf_alloc_rcv_buffer(nic, rbdr, dmap, M_WAITOK, DMA_BUFFER_LEN, &rbuf); if (err != 0) return (err); desc = GET_RBDR_DESC(rbdr, idx); desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN); } /* Allocate taskqueue */ TASK_INIT(&rbdr->rbdr_task, 0, nicvf_rbdr_task, rbdr); TASK_INIT(&rbdr->rbdr_task_nowait, 0, nicvf_rbdr_task_nowait, rbdr); rbdr->rbdr_taskq = taskqueue_create_fast("nicvf_rbdr_taskq", M_WAITOK, taskqueue_thread_enqueue, &rbdr->rbdr_taskq); taskqueue_start_threads(&rbdr->rbdr_taskq, 1, PI_NET, "%s: rbdr_taskq", device_get_nameunit(nic->dev)); return (0); } /* Free RBDR ring and its receive buffers */ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) { struct mbuf *mbuf; struct queue_set *qs; struct rbdr_entry_t *desc; struct rbuf_info *rinfo; bus_addr_t buf_addr; int head, tail, idx; int err; qs = nic->qs; if ((qs == NULL) || (rbdr == NULL)) return; rbdr->enable = FALSE; if (rbdr->rbdr_taskq != NULL) { /* Remove tasks */ while (taskqueue_cancel(rbdr->rbdr_taskq, &rbdr->rbdr_task_nowait, NULL) != 0) { /* Finish the nowait task first */ taskqueue_drain(rbdr->rbdr_taskq, &rbdr->rbdr_task_nowait); } taskqueue_free(rbdr->rbdr_taskq); rbdr->rbdr_taskq = NULL; while (taskqueue_cancel(taskqueue_thread, &rbdr->rbdr_task, NULL) != 0) { /* Now finish the sleepable task */ taskqueue_drain(taskqueue_thread, &rbdr->rbdr_task); } } /* * Free all of the memory under the RB descriptors. * There are assumptions here: * 1. Corresponding RBDR is disabled * - it is safe to operate using head and tail indexes * 2. All bffers that were received are properly freed by * the receive handler * - there is no need to unload DMA map and free MBUF for other * descriptors than unused ones */ if (rbdr->rbdr_buff_dmat != NULL) { head = rbdr->head; tail = rbdr->tail; while (head != tail) { desc = GET_RBDR_DESC(rbdr, head); buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr)); bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap); mbuf = rinfo->mbuf; /* This will destroy everything including rinfo! */ m_freem(mbuf); head++; head &= (rbdr->dmem.q_len - 1); } /* Free tail descriptor */ desc = GET_RBDR_DESC(rbdr, tail); buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr)); bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap); mbuf = rinfo->mbuf; /* This will destroy everything including rinfo! */ m_freem(mbuf); /* Destroy DMA maps */ for (idx = 0; idx < qs->rbdr_len; idx++) { if (rbdr->rbdr_buff_dmaps[idx] == NULL) continue; err = bus_dmamap_destroy(rbdr->rbdr_buff_dmat, rbdr->rbdr_buff_dmaps[idx]); KASSERT(err == 0, ("%s: Could not destroy DMA map for RB, desc: %d", __func__, idx)); rbdr->rbdr_buff_dmaps[idx] = NULL; } /* Now destroy the tag */ err = bus_dma_tag_destroy(rbdr->rbdr_buff_dmat); KASSERT(err == 0, ("%s: Trying to destroy BUSY DMA tag", __func__)); rbdr->head = 0; rbdr->tail = 0; } /* Free RBDR ring */ nicvf_free_q_desc_mem(nic, &rbdr->dmem); } /* * Refill receive buffer descriptors with new buffers. */ static int nicvf_refill_rbdr(struct rbdr *rbdr, int mflags) { struct nicvf *nic; struct queue_set *qs; int rbdr_idx; int tail, qcount; int refill_rb_cnt; struct rbdr_entry_t *desc; bus_dmamap_t dmap; bus_addr_t rbuf; boolean_t rb_alloc_fail; int new_rb; rb_alloc_fail = TRUE; new_rb = 0; nic = rbdr->nic; qs = nic->qs; rbdr_idx = rbdr->idx; /* Check if it's enabled */ if (!rbdr->enable) return (0); /* Get no of desc's to be refilled */ qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx); qcount &= 0x7FFFF; /* Doorbell can be ringed with a max of ring size minus 1 */ if (qcount >= (qs->rbdr_len - 1)) { rb_alloc_fail = FALSE; goto out; } else refill_rb_cnt = qs->rbdr_len - qcount - 1; /* Start filling descs from tail */ tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3; while (refill_rb_cnt) { tail++; tail &= (rbdr->dmem.q_len - 1); dmap = rbdr->rbdr_buff_dmaps[tail]; if (nicvf_alloc_rcv_buffer(nic, rbdr, dmap, mflags, DMA_BUFFER_LEN, &rbuf)) { /* Something went wrong. Resign */ break; } desc = GET_RBDR_DESC(rbdr, tail); desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN); refill_rb_cnt--; new_rb++; } /* make sure all memory stores are done before ringing doorbell */ wmb(); /* Check if buffer allocation failed */ if (refill_rb_cnt == 0) rb_alloc_fail = FALSE; /* Notify HW */ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, rbdr_idx, new_rb); out: if (!rb_alloc_fail) { /* * Re-enable RBDR interrupts only * if buffer allocation is success. */ nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx); return (0); } return (ENOMEM); } /* Refill RBs even if sleep is needed to reclaim memory */ static void nicvf_rbdr_task(void *arg, int pending) { struct rbdr *rbdr; int err; rbdr = (struct rbdr *)arg; err = nicvf_refill_rbdr(rbdr, M_WAITOK); if (__predict_false(err != 0)) { panic("%s: Failed to refill RBs even when sleep enabled", __func__); } } /* Refill RBs as soon as possible without waiting */ static void nicvf_rbdr_task_nowait(void *arg, int pending) { struct rbdr *rbdr; int err; rbdr = (struct rbdr *)arg; err = nicvf_refill_rbdr(rbdr, M_NOWAIT); if (err != 0) { /* * Schedule another, sleepable kernel thread * that will for sure refill the buffers. */ taskqueue_enqueue(taskqueue_thread, &rbdr->rbdr_task); } } static int nicvf_rcv_pkt_handler(struct nicvf *nic, struct cmp_queue *cq, struct cqe_rx_t *cqe_rx, int cqe_type) { struct mbuf *mbuf; struct rcv_queue *rq; int rq_idx; int err = 0; rq_idx = cqe_rx->rq_idx; rq = &nic->qs->rq[rq_idx]; /* Check for errors */ err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx); if (err && !cqe_rx->rb_cnt) return (0); mbuf = nicvf_get_rcv_mbuf(nic, cqe_rx); if (mbuf == NULL) { dprintf(nic->dev, "Packet not received\n"); return (0); } /* If error packet */ if (err != 0) { m_freem(mbuf); return (0); } if (rq->lro_enabled && ((cqe_rx->l3_type == L3TYPE_IPV4) && (cqe_rx->l4_type == L4TYPE_TCP)) && (mbuf->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) == (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) { /* * At this point it is known that there are no errors in the * packet. Attempt to LRO enqueue. Send to stack if no resources * or enqueue error. */ if ((rq->lro.lro_cnt != 0) && (tcp_lro_rx(&rq->lro, mbuf, 0) == 0)) return (0); } /* * Push this packet to the stack later to avoid * unlocking completion task in the middle of work. */ err = buf_ring_enqueue(cq->rx_br, mbuf); if (err != 0) { /* * Failed to enqueue this mbuf. * We don't drop it, just schedule another task. */ return (err); } return (0); } static void nicvf_snd_pkt_handler(struct nicvf *nic, struct cmp_queue *cq, struct cqe_send_t *cqe_tx, int cqe_type) { bus_dmamap_t dmap; struct mbuf *mbuf; struct snd_queue *sq; struct sq_hdr_subdesc *hdr; mbuf = NULL; sq = &nic->qs->sq[cqe_tx->sq_idx]; hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr); if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) return; dprintf(nic->dev, "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n", __func__, cqe_tx->sq_qs, cqe_tx->sq_idx, cqe_tx->sqe_ptr, hdr->subdesc_cnt); dmap = (bus_dmamap_t)sq->snd_buff[cqe_tx->sqe_ptr].dmap; bus_dmamap_unload(sq->snd_buff_dmat, dmap); mbuf = (struct mbuf *)sq->snd_buff[cqe_tx->sqe_ptr].mbuf; if (mbuf != NULL) { m_freem(mbuf); sq->snd_buff[cqe_tx->sqe_ptr].mbuf = NULL; nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); } nicvf_check_cqe_tx_errs(nic, cq, cqe_tx); } static int nicvf_cq_intr_handler(struct nicvf *nic, uint8_t cq_idx) { struct mbuf *mbuf; struct ifnet *ifp; int processed_cqe, work_done = 0, tx_done = 0; int cqe_count, cqe_head; struct queue_set *qs = nic->qs; struct cmp_queue *cq = &qs->cq[cq_idx]; struct snd_queue *sq = &qs->sq[cq_idx]; struct rcv_queue *rq; struct cqe_rx_t *cq_desc; struct lro_ctrl *lro; int rq_idx; int cmp_err; NICVF_CMP_LOCK(cq); cmp_err = 0; processed_cqe = 0; /* Get no of valid CQ entries to process */ cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx); cqe_count &= CQ_CQE_COUNT; if (cqe_count == 0) goto out; /* Get head of the valid CQ entries */ cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9; cqe_head &= 0xFFFF; dprintf(nic->dev, "%s CQ%d cqe_count %d cqe_head %d\n", __func__, cq_idx, cqe_count, cqe_head); while (processed_cqe < cqe_count) { /* Get the CQ descriptor */ cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head); cqe_head++; cqe_head &= (cq->dmem.q_len - 1); /* Prefetch next CQ descriptor */ __builtin_prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head)); dprintf(nic->dev, "CQ%d cq_desc->cqe_type %d\n", cq_idx, cq_desc->cqe_type); switch (cq_desc->cqe_type) { case CQE_TYPE_RX: cmp_err = nicvf_rcv_pkt_handler(nic, cq, cq_desc, CQE_TYPE_RX); if (__predict_false(cmp_err != 0)) { /* * Ups. Cannot finish now. * Let's try again later. */ goto done; } work_done++; break; case CQE_TYPE_SEND: nicvf_snd_pkt_handler(nic, cq, (void *)cq_desc, CQE_TYPE_SEND); tx_done++; break; case CQE_TYPE_INVALID: case CQE_TYPE_RX_SPLIT: case CQE_TYPE_RX_TCP: case CQE_TYPE_SEND_PTP: /* Ignore for now */ break; } processed_cqe++; } done: dprintf(nic->dev, "%s CQ%d processed_cqe %d work_done %d\n", __func__, cq_idx, processed_cqe, work_done); /* Ring doorbell to inform H/W to reuse processed CQEs */ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR, cq_idx, processed_cqe); if ((tx_done > 0) && ((if_getdrvflags(nic->ifp) & IFF_DRV_RUNNING) != 0)) { /* Reenable TXQ if its stopped earlier due to SQ full */ if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); taskqueue_enqueue(sq->snd_taskq, &sq->snd_task); } out: /* * Flush any outstanding LRO work */ rq_idx = cq_idx; rq = &nic->qs->rq[rq_idx]; lro = &rq->lro; tcp_lro_flush_all(lro); NICVF_CMP_UNLOCK(cq); ifp = nic->ifp; /* Push received MBUFs to the stack */ while (!buf_ring_empty(cq->rx_br)) { mbuf = buf_ring_dequeue_mc(cq->rx_br); if (__predict_true(mbuf != NULL)) (*ifp->if_input)(ifp, mbuf); } return (cmp_err); } /* * Qset error interrupt handler * * As of now only CQ errors are handled */ static void nicvf_qs_err_task(void *arg, int pending) { struct nicvf *nic; struct queue_set *qs; int qidx; uint64_t status; boolean_t enable = TRUE; nic = (struct nicvf *)arg; qs = nic->qs; /* Deactivate network interface */ if_setdrvflagbits(nic->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); /* Check if it is CQ err */ for (qidx = 0; qidx < qs->cq_cnt; qidx++) { status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, qidx); if ((status & CQ_ERR_MASK) == 0) continue; /* Process already queued CQEs and reconfig CQ */ nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx); nicvf_sq_disable(nic, qidx); (void)nicvf_cq_intr_handler(nic, qidx); nicvf_cmp_queue_config(nic, qs, qidx, enable); nicvf_sq_free_used_descs(nic, &qs->sq[qidx], qidx); nicvf_sq_enable(nic, &qs->sq[qidx], qidx); nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx); } if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); /* Re-enable Qset error interrupt */ nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0); } static void nicvf_cmp_task(void *arg, int pending) { struct cmp_queue *cq; struct nicvf *nic; int cmp_err; cq = (struct cmp_queue *)arg; nic = cq->nic; /* Handle CQ descriptors */ cmp_err = nicvf_cq_intr_handler(nic, cq->idx); if (__predict_false(cmp_err != 0)) { /* * Schedule another thread here since we did not * process the entire CQ due to Tx or Rx CQ parse error. */ taskqueue_enqueue(cq->cmp_taskq, &cq->cmp_task); } nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx); /* Reenable interrupt (previously disabled in nicvf_intr_handler() */ nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->idx); } /* Initialize completion queue */ static int nicvf_init_cmp_queue(struct nicvf *nic, struct cmp_queue *cq, int q_len, int qidx) { int err; /* Initizalize lock */ snprintf(cq->mtx_name, sizeof(cq->mtx_name), "%s: CQ(%d) lock", device_get_nameunit(nic->dev), qidx); mtx_init(&cq->mtx, cq->mtx_name, NULL, MTX_DEF); err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE, NICVF_CQ_BASE_ALIGN_BYTES); if (err != 0) { device_printf(nic->dev, "Could not allocate DMA memory for CQ\n"); return (err); } cq->desc = cq->dmem.base; cq->thresh = pass1_silicon(nic->dev) ? 0 : CMP_QUEUE_CQE_THRESH; cq->nic = nic; cq->idx = qidx; nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1; cq->rx_br = buf_ring_alloc(CMP_QUEUE_LEN * 8, M_DEVBUF, M_WAITOK, &cq->mtx); /* Allocate taskqueue */ - TASK_INIT(&cq->cmp_task, 0, nicvf_cmp_task, cq); + NET_TASK_INIT(&cq->cmp_task, 0, nicvf_cmp_task, cq); cq->cmp_taskq = taskqueue_create_fast("nicvf_cmp_taskq", M_WAITOK, taskqueue_thread_enqueue, &cq->cmp_taskq); taskqueue_start_threads(&cq->cmp_taskq, 1, PI_NET, "%s: cmp_taskq(%d)", device_get_nameunit(nic->dev), qidx); return (0); } static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq) { if (cq == NULL) return; /* * The completion queue itself should be disabled by now * (ref. nicvf_snd_queue_config()). * Ensure that it is safe to disable it or panic. */ if (cq->enable) panic("%s: Trying to free working CQ(%d)", __func__, cq->idx); if (cq->cmp_taskq != NULL) { /* Remove task */ while (taskqueue_cancel(cq->cmp_taskq, &cq->cmp_task, NULL) != 0) taskqueue_drain(cq->cmp_taskq, &cq->cmp_task); taskqueue_free(cq->cmp_taskq); cq->cmp_taskq = NULL; } /* * Completion interrupt will possibly enable interrupts again * so disable interrupting now after we finished processing * completion task. It is safe to do so since the corresponding CQ * was already disabled. */ nicvf_disable_intr(nic, NICVF_INTR_CQ, cq->idx); nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx); NICVF_CMP_LOCK(cq); nicvf_free_q_desc_mem(nic, &cq->dmem); drbr_free(cq->rx_br, M_DEVBUF); NICVF_CMP_UNLOCK(cq); mtx_destroy(&cq->mtx); memset(cq->mtx_name, 0, sizeof(cq->mtx_name)); } int nicvf_xmit_locked(struct snd_queue *sq) { struct nicvf *nic; struct ifnet *ifp; struct mbuf *next; int err; NICVF_TX_LOCK_ASSERT(sq); nic = sq->nic; ifp = nic->ifp; err = 0; while ((next = drbr_peek(ifp, sq->br)) != NULL) { /* Send a copy of the frame to the BPF listener */ ETHER_BPF_MTAP(ifp, next); err = nicvf_tx_mbuf_locked(sq, &next); if (err != 0) { if (next == NULL) drbr_advance(ifp, sq->br); else drbr_putback(ifp, sq->br, next); break; } drbr_advance(ifp, sq->br); } return (err); } static void nicvf_snd_task(void *arg, int pending) { struct snd_queue *sq = (struct snd_queue *)arg; struct nicvf *nic; struct ifnet *ifp; int err; nic = sq->nic; ifp = nic->ifp; /* * Skip sending anything if the driver is not running, * SQ full or link is down. */ if (((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) || !nic->link_up) return; NICVF_TX_LOCK(sq); err = nicvf_xmit_locked(sq); NICVF_TX_UNLOCK(sq); /* Try again */ if (err != 0) taskqueue_enqueue(sq->snd_taskq, &sq->snd_task); } /* Initialize transmit queue */ static int nicvf_init_snd_queue(struct nicvf *nic, struct snd_queue *sq, int q_len, int qidx) { size_t i; int err; /* Initizalize TX lock for this queue */ snprintf(sq->mtx_name, sizeof(sq->mtx_name), "%s: SQ(%d) lock", device_get_nameunit(nic->dev), qidx); mtx_init(&sq->mtx, sq->mtx_name, NULL, MTX_DEF); NICVF_TX_LOCK(sq); /* Allocate buffer ring */ sq->br = buf_ring_alloc(q_len / MIN_SQ_DESC_PER_PKT_XMIT, M_DEVBUF, M_NOWAIT, &sq->mtx); if (sq->br == NULL) { device_printf(nic->dev, "ERROR: Could not set up buf ring for SQ(%d)\n", qidx); err = ENOMEM; goto error; } /* Allocate DMA memory for Tx descriptors */ err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE, NICVF_SQ_BASE_ALIGN_BYTES); if (err != 0) { device_printf(nic->dev, "Could not allocate DMA memory for SQ\n"); goto error; } sq->desc = sq->dmem.base; sq->head = sq->tail = 0; atomic_store_rel_int(&sq->free_cnt, q_len - 1); sq->thresh = SND_QUEUE_THRESH; sq->idx = qidx; sq->nic = nic; /* * Allocate DMA maps for Tx buffers */ /* Create DMA tag first */ err = bus_dma_tag_create( bus_get_dma_tag(nic->dev), /* parent tag */ 1, /* alignment */ 0, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ NICVF_TSO_MAXSIZE, /* maxsize */ NICVF_TSO_NSEGS, /* nsegments */ MCLBYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &sq->snd_buff_dmat); /* dmat */ if (err != 0) { device_printf(nic->dev, "Failed to create busdma tag for Tx buffers\n"); goto error; } /* Allocate send buffers array */ sq->snd_buff = malloc(sizeof(*sq->snd_buff) * q_len, M_NICVF, (M_NOWAIT | M_ZERO)); if (sq->snd_buff == NULL) { device_printf(nic->dev, "Could not allocate memory for Tx buffers array\n"); err = ENOMEM; goto error; } /* Now populate maps */ for (i = 0; i < q_len; i++) { err = bus_dmamap_create(sq->snd_buff_dmat, 0, &sq->snd_buff[i].dmap); if (err != 0) { device_printf(nic->dev, "Failed to create DMA maps for Tx buffers\n"); goto error; } } NICVF_TX_UNLOCK(sq); /* Allocate taskqueue */ TASK_INIT(&sq->snd_task, 0, nicvf_snd_task, sq); sq->snd_taskq = taskqueue_create_fast("nicvf_snd_taskq", M_WAITOK, taskqueue_thread_enqueue, &sq->snd_taskq); taskqueue_start_threads(&sq->snd_taskq, 1, PI_NET, "%s: snd_taskq(%d)", device_get_nameunit(nic->dev), qidx); return (0); error: NICVF_TX_UNLOCK(sq); return (err); } static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) { struct queue_set *qs = nic->qs; size_t i; int err; if (sq == NULL) return; if (sq->snd_taskq != NULL) { /* Remove task */ while (taskqueue_cancel(sq->snd_taskq, &sq->snd_task, NULL) != 0) taskqueue_drain(sq->snd_taskq, &sq->snd_task); taskqueue_free(sq->snd_taskq); sq->snd_taskq = NULL; } NICVF_TX_LOCK(sq); if (sq->snd_buff_dmat != NULL) { if (sq->snd_buff != NULL) { for (i = 0; i < qs->sq_len; i++) { m_freem(sq->snd_buff[i].mbuf); sq->snd_buff[i].mbuf = NULL; bus_dmamap_unload(sq->snd_buff_dmat, sq->snd_buff[i].dmap); err = bus_dmamap_destroy(sq->snd_buff_dmat, sq->snd_buff[i].dmap); /* * If bus_dmamap_destroy fails it can cause * random panic later if the tag is also * destroyed in the process. */ KASSERT(err == 0, ("%s: Could not destroy DMA map for SQ", __func__)); } } free(sq->snd_buff, M_NICVF); err = bus_dma_tag_destroy(sq->snd_buff_dmat); KASSERT(err == 0, ("%s: Trying to destroy BUSY DMA tag", __func__)); } /* Free private driver ring for this send queue */ if (sq->br != NULL) drbr_free(sq->br, M_DEVBUF); if (sq->dmem.base != NULL) nicvf_free_q_desc_mem(nic, &sq->dmem); NICVF_TX_UNLOCK(sq); /* Destroy Tx lock */ mtx_destroy(&sq->mtx); memset(sq->mtx_name, 0, sizeof(sq->mtx_name)); } static void nicvf_reclaim_snd_queue(struct nicvf *nic, struct queue_set *qs, int qidx) { /* Disable send queue */ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0); /* Check if SQ is stopped */ if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01)) return; /* Reset send queue */ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); } static void nicvf_reclaim_rcv_queue(struct nicvf *nic, struct queue_set *qs, int qidx) { union nic_mbx mbx = {}; /* Make sure all packets in the pipeline are written back into mem */ mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC; nicvf_send_msg_to_pf(nic, &mbx); } static void nicvf_reclaim_cmp_queue(struct nicvf *nic, struct queue_set *qs, int qidx) { /* Disable timer threshold (doesn't get reset upon CQ reset */ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0); /* Disable completion queue */ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0); /* Reset completion queue */ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); } static void nicvf_reclaim_rbdr(struct nicvf *nic, struct rbdr *rbdr, int qidx) { uint64_t tmp, fifo_state; int timeout = 10; /* Save head and tail pointers for feeing up buffers */ rbdr->head = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, qidx) >> 3; rbdr->tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, qidx) >> 3; /* * If RBDR FIFO is in 'FAIL' state then do a reset first * before relaiming. */ fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx); if (((fifo_state >> 62) & 0x03) == 0x3) { nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, NICVF_RBDR_RESET); } /* Disable RBDR */ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0); if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) return; while (1) { tmp = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_PREFETCH_STATUS, qidx); if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF)) break; DELAY(1000); timeout--; if (!timeout) { device_printf(nic->dev, "Failed polling on prefetch status\n"); return; } } nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, NICVF_RBDR_RESET); if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02)) return; nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00); if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) return; } /* Configures receive queue */ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, int qidx, bool enable) { union nic_mbx mbx = {}; struct rcv_queue *rq; struct rq_cfg rq_cfg; struct ifnet *ifp; struct lro_ctrl *lro; ifp = nic->ifp; rq = &qs->rq[qidx]; rq->enable = enable; lro = &rq->lro; /* Disable receive queue */ nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0); if (!rq->enable) { nicvf_reclaim_rcv_queue(nic, qs, qidx); /* Free LRO memory */ tcp_lro_free(lro); rq->lro_enabled = FALSE; return; } /* Configure LRO if enabled */ rq->lro_enabled = FALSE; if ((if_getcapenable(ifp) & IFCAP_LRO) != 0) { if (tcp_lro_init(lro) != 0) { device_printf(nic->dev, "Failed to initialize LRO for RXQ%d\n", qidx); } else { rq->lro_enabled = TRUE; lro->ifp = nic->ifp; } } rq->cq_qs = qs->vnic_id; rq->cq_idx = qidx; rq->start_rbdr_qs = qs->vnic_id; rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1; rq->cont_rbdr_qs = qs->vnic_id; rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1; /* all writes of RBDR data to be loaded into L2 Cache as well*/ rq->caching = 1; /* Send a mailbox msg to PF to config RQ */ mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG; mbx.rq.qs_num = qs->vnic_id; mbx.rq.rq_num = qidx; mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) | (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) | (rq->cont_qs_rbdr_idx << 8) | (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx); nicvf_send_msg_to_pf(nic, &mbx); mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG; mbx.rq.cfg = (1UL << 63) | (1UL << 62) | (qs->vnic_id << 0); nicvf_send_msg_to_pf(nic, &mbx); /* * RQ drop config * Enable CQ drop to reserve sufficient CQEs for all tx packets */ mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG; mbx.rq.cfg = (1UL << 62) | (RQ_CQ_DROP << 8); nicvf_send_msg_to_pf(nic, &mbx); nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00); /* Enable Receive queue */ rq_cfg.ena = 1; rq_cfg.tcp_ena = 0; nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(uint64_t *)&rq_cfg); } /* Configures completion queue */ static void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, int qidx, boolean_t enable) { struct cmp_queue *cq; struct cq_cfg cq_cfg; cq = &qs->cq[qidx]; cq->enable = enable; if (!cq->enable) { nicvf_reclaim_cmp_queue(nic, qs, qidx); return; } /* Reset completion queue */ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); /* Set completion queue base address */ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, qidx, (uint64_t)(cq->dmem.phys_base)); /* Enable Completion queue */ cq_cfg.ena = 1; cq_cfg.reset = 0; cq_cfg.caching = 0; cq_cfg.qsize = CMP_QSIZE; cq_cfg.avg_con = 0; nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(uint64_t *)&cq_cfg); /* Set threshold value for interrupt generation */ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh); nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, nic->cq_coalesce_usecs); } /* Configures transmit queue */ static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, int qidx, boolean_t enable) { union nic_mbx mbx = {}; struct snd_queue *sq; struct sq_cfg sq_cfg; sq = &qs->sq[qidx]; sq->enable = enable; if (!sq->enable) { nicvf_reclaim_snd_queue(nic, qs, qidx); return; } /* Reset send queue */ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); sq->cq_qs = qs->vnic_id; sq->cq_idx = qidx; /* Send a mailbox msg to PF to config SQ */ mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG; mbx.sq.qs_num = qs->vnic_id; mbx.sq.sq_num = qidx; mbx.sq.sqs_mode = nic->sqs_mode; mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx; nicvf_send_msg_to_pf(nic, &mbx); /* Set queue base address */ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, qidx, (uint64_t)(sq->dmem.phys_base)); /* Enable send queue & set queue size */ sq_cfg.ena = 1; sq_cfg.reset = 0; sq_cfg.ldwb = 0; sq_cfg.qsize = SND_QSIZE; sq_cfg.tstmp_bgx_intf = 0; nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(uint64_t *)&sq_cfg); /* Set threshold value for interrupt generation */ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh); } /* Configures receive buffer descriptor ring */ static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, int qidx, boolean_t enable) { struct rbdr *rbdr; struct rbdr_cfg rbdr_cfg; rbdr = &qs->rbdr[qidx]; nicvf_reclaim_rbdr(nic, rbdr, qidx); if (!enable) return; /* Set descriptor base address */ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, qidx, (uint64_t)(rbdr->dmem.phys_base)); /* Enable RBDR & set queue size */ /* Buffer size should be in multiples of 128 bytes */ rbdr_cfg.ena = 1; rbdr_cfg.reset = 0; rbdr_cfg.ldwb = 0; rbdr_cfg.qsize = RBDR_SIZE; rbdr_cfg.avg_con = 0; rbdr_cfg.lines = rbdr->dma_size / 128; nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, *(uint64_t *)&rbdr_cfg); /* Notify HW */ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, qidx, qs->rbdr_len - 1); /* Set threshold value for interrupt generation */ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH, qidx, rbdr->thresh - 1); } /* Requests PF to assign and enable Qset */ void nicvf_qset_config(struct nicvf *nic, boolean_t enable) { union nic_mbx mbx = {}; struct queue_set *qs; struct qs_cfg *qs_cfg; qs = nic->qs; if (qs == NULL) { device_printf(nic->dev, "Qset is still not allocated, don't init queues\n"); return; } qs->enable = enable; qs->vnic_id = nic->vf_id; /* Send a mailbox msg to PF to config Qset */ mbx.qs.msg = NIC_MBOX_MSG_QS_CFG; mbx.qs.num = qs->vnic_id; mbx.qs.cfg = 0; qs_cfg = (struct qs_cfg *)&mbx.qs.cfg; if (qs->enable) { qs_cfg->ena = 1; qs_cfg->vnic = qs->vnic_id; } nicvf_send_msg_to_pf(nic, &mbx); } static void nicvf_free_resources(struct nicvf *nic) { int qidx; struct queue_set *qs; qs = nic->qs; /* * Remove QS error task first since it has to be dead * to safely free completion queue tasks. */ if (qs->qs_err_taskq != NULL) { /* Shut down QS error tasks */ while (taskqueue_cancel(qs->qs_err_taskq, &qs->qs_err_task, NULL) != 0) { taskqueue_drain(qs->qs_err_taskq, &qs->qs_err_task); } taskqueue_free(qs->qs_err_taskq); qs->qs_err_taskq = NULL; } /* Free receive buffer descriptor ring */ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) nicvf_free_rbdr(nic, &qs->rbdr[qidx]); /* Free completion queue */ for (qidx = 0; qidx < qs->cq_cnt; qidx++) nicvf_free_cmp_queue(nic, &qs->cq[qidx]); /* Free send queue */ for (qidx = 0; qidx < qs->sq_cnt; qidx++) nicvf_free_snd_queue(nic, &qs->sq[qidx]); } static int nicvf_alloc_resources(struct nicvf *nic) { struct queue_set *qs = nic->qs; int qidx; /* Alloc receive buffer descriptor ring */ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len, DMA_BUFFER_LEN, qidx)) goto alloc_fail; } /* Alloc send queue */ for (qidx = 0; qidx < qs->sq_cnt; qidx++) { if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len, qidx)) goto alloc_fail; } /* Alloc completion queue */ for (qidx = 0; qidx < qs->cq_cnt; qidx++) { if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len, qidx)) goto alloc_fail; } /* Allocate QS error taskqueue */ - TASK_INIT(&qs->qs_err_task, 0, nicvf_qs_err_task, nic); + NET_TASK_INIT(&qs->qs_err_task, 0, nicvf_qs_err_task, nic); qs->qs_err_taskq = taskqueue_create_fast("nicvf_qs_err_taskq", M_WAITOK, taskqueue_thread_enqueue, &qs->qs_err_taskq); taskqueue_start_threads(&qs->qs_err_taskq, 1, PI_NET, "%s: qs_taskq", device_get_nameunit(nic->dev)); return (0); alloc_fail: nicvf_free_resources(nic); return (ENOMEM); } int nicvf_set_qset_resources(struct nicvf *nic) { struct queue_set *qs; qs = malloc(sizeof(*qs), M_NICVF, (M_ZERO | M_WAITOK)); nic->qs = qs; /* Set count of each queue */ qs->rbdr_cnt = RBDR_CNT; qs->rq_cnt = RCV_QUEUE_CNT; qs->sq_cnt = SND_QUEUE_CNT; qs->cq_cnt = CMP_QUEUE_CNT; /* Set queue lengths */ qs->rbdr_len = RCV_BUF_COUNT; qs->sq_len = SND_QUEUE_LEN; qs->cq_len = CMP_QUEUE_LEN; nic->rx_queues = qs->rq_cnt; nic->tx_queues = qs->sq_cnt; return (0); } int nicvf_config_data_transfer(struct nicvf *nic, boolean_t enable) { boolean_t disable = FALSE; struct queue_set *qs; int qidx; qs = nic->qs; if (qs == NULL) return (0); if (enable) { if (nicvf_alloc_resources(nic) != 0) return (ENOMEM); for (qidx = 0; qidx < qs->sq_cnt; qidx++) nicvf_snd_queue_config(nic, qs, qidx, enable); for (qidx = 0; qidx < qs->cq_cnt; qidx++) nicvf_cmp_queue_config(nic, qs, qidx, enable); for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) nicvf_rbdr_config(nic, qs, qidx, enable); for (qidx = 0; qidx < qs->rq_cnt; qidx++) nicvf_rcv_queue_config(nic, qs, qidx, enable); } else { for (qidx = 0; qidx < qs->rq_cnt; qidx++) nicvf_rcv_queue_config(nic, qs, qidx, disable); for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) nicvf_rbdr_config(nic, qs, qidx, disable); for (qidx = 0; qidx < qs->sq_cnt; qidx++) nicvf_snd_queue_config(nic, qs, qidx, disable); for (qidx = 0; qidx < qs->cq_cnt; qidx++) nicvf_cmp_queue_config(nic, qs, qidx, disable); nicvf_free_resources(nic); } return (0); } /* * Get a free desc from SQ * returns descriptor ponter & descriptor number */ static __inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt) { int qentry; qentry = sq->tail; atomic_subtract_int(&sq->free_cnt, desc_cnt); sq->tail += desc_cnt; sq->tail &= (sq->dmem.q_len - 1); return (qentry); } /* Free descriptor back to SQ for future use */ static void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt) { atomic_add_int(&sq->free_cnt, desc_cnt); sq->head += desc_cnt; sq->head &= (sq->dmem.q_len - 1); } static __inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry) { qentry++; qentry &= (sq->dmem.q_len - 1); return (qentry); } static void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx) { uint64_t sq_cfg; sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); sq_cfg |= NICVF_SQ_EN; nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); /* Ring doorbell so that H/W restarts processing SQEs */ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0); } static void nicvf_sq_disable(struct nicvf *nic, int qidx) { uint64_t sq_cfg; sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); sq_cfg &= ~NICVF_SQ_EN; nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); } static void nicvf_sq_free_used_descs(struct nicvf *nic, struct snd_queue *sq, int qidx) { uint64_t head; struct snd_buff *snd_buff; struct sq_hdr_subdesc *hdr; NICVF_TX_LOCK(sq); head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4; while (sq->head != head) { hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head); if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) { nicvf_put_sq_desc(sq, 1); continue; } snd_buff = &sq->snd_buff[sq->head]; if (snd_buff->mbuf != NULL) { bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap); m_freem(snd_buff->mbuf); sq->snd_buff[sq->head].mbuf = NULL; } nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); } NICVF_TX_UNLOCK(sq); } /* * Add SQ HEADER subdescriptor. * First subdescriptor for every send descriptor. */ static __inline int nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry, int subdesc_cnt, struct mbuf *mbuf, int len) { struct nicvf *nic; struct sq_hdr_subdesc *hdr; struct ether_vlan_header *eh; #ifdef INET struct ip *ip; struct tcphdr *th; #endif uint16_t etype; int ehdrlen, iphlen, poff, proto; nic = sq->nic; hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); sq->snd_buff[qentry].mbuf = mbuf; memset(hdr, 0, SND_QUEUE_DESC_SIZE); hdr->subdesc_type = SQ_DESC_TYPE_HEADER; /* Enable notification via CQE after processing SQE */ hdr->post_cqe = 1; /* No of subdescriptors following this */ hdr->subdesc_cnt = subdesc_cnt; hdr->tot_len = len; eh = mtod(mbuf, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; etype = ntohs(eh->evl_proto); } else { ehdrlen = ETHER_HDR_LEN; etype = ntohs(eh->evl_encap_proto); } poff = proto = -1; switch (etype) { #ifdef INET6 case ETHERTYPE_IPV6: if (mbuf->m_len < ehdrlen + sizeof(struct ip6_hdr)) { mbuf = m_pullup(mbuf, ehdrlen +sizeof(struct ip6_hdr)); sq->snd_buff[qentry].mbuf = NULL; if (mbuf == NULL) return (ENOBUFS); } poff = ip6_lasthdr(mbuf, ehdrlen, IPPROTO_IPV6, &proto); if (poff < 0) return (ENOBUFS); poff += ehdrlen; break; #endif #ifdef INET case ETHERTYPE_IP: if (mbuf->m_len < ehdrlen + sizeof(struct ip)) { mbuf = m_pullup(mbuf, ehdrlen + sizeof(struct ip)); sq->snd_buff[qentry].mbuf = mbuf; if (mbuf == NULL) return (ENOBUFS); } if (mbuf->m_pkthdr.csum_flags & CSUM_IP) hdr->csum_l3 = 1; /* Enable IP csum calculation */ ip = (struct ip *)(mbuf->m_data + ehdrlen); iphlen = ip->ip_hl << 2; poff = ehdrlen + iphlen; proto = ip->ip_p; break; #endif } #if defined(INET6) || defined(INET) if (poff > 0 && mbuf->m_pkthdr.csum_flags != 0) { switch (proto) { case IPPROTO_TCP: if ((mbuf->m_pkthdr.csum_flags & CSUM_TCP) == 0) break; if (mbuf->m_len < (poff + sizeof(struct tcphdr))) { mbuf = m_pullup(mbuf, poff + sizeof(struct tcphdr)); sq->snd_buff[qentry].mbuf = mbuf; if (mbuf == NULL) return (ENOBUFS); } hdr->csum_l4 = SEND_L4_CSUM_TCP; break; case IPPROTO_UDP: if ((mbuf->m_pkthdr.csum_flags & CSUM_UDP) == 0) break; if (mbuf->m_len < (poff + sizeof(struct udphdr))) { mbuf = m_pullup(mbuf, poff + sizeof(struct udphdr)); sq->snd_buff[qentry].mbuf = mbuf; if (mbuf == NULL) return (ENOBUFS); } hdr->csum_l4 = SEND_L4_CSUM_UDP; break; case IPPROTO_SCTP: if ((mbuf->m_pkthdr.csum_flags & CSUM_SCTP) == 0) break; if (mbuf->m_len < (poff + sizeof(struct sctphdr))) { mbuf = m_pullup(mbuf, poff + sizeof(struct sctphdr)); sq->snd_buff[qentry].mbuf = mbuf; if (mbuf == NULL) return (ENOBUFS); } hdr->csum_l4 = SEND_L4_CSUM_SCTP; break; default: break; } hdr->l3_offset = ehdrlen; hdr->l4_offset = poff; } if ((mbuf->m_pkthdr.tso_segsz != 0) && nic->hw_tso) { th = (struct tcphdr *)((caddr_t)(mbuf->m_data + poff)); hdr->tso = 1; hdr->tso_start = poff + (th->th_off * 4); hdr->tso_max_paysize = mbuf->m_pkthdr.tso_segsz; hdr->inner_l3_offset = ehdrlen - 2; nic->drv_stats.tx_tso++; } #endif return (0); } /* * SQ GATHER subdescriptor * Must follow HDR descriptor */ static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, int size, uint64_t data) { struct sq_gather_subdesc *gather; qentry &= (sq->dmem.q_len - 1); gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry); memset(gather, 0, SND_QUEUE_DESC_SIZE); gather->subdesc_type = SQ_DESC_TYPE_GATHER; gather->ld_type = NIC_SEND_LD_TYPE_E_LDD; gather->size = size; gather->addr = data; } /* Put an mbuf to a SQ for packet transfer. */ static int nicvf_tx_mbuf_locked(struct snd_queue *sq, struct mbuf **mbufp) { bus_dma_segment_t segs[256]; struct snd_buff *snd_buff; size_t seg; int nsegs, qentry; int subdesc_cnt; int err; NICVF_TX_LOCK_ASSERT(sq); if (sq->free_cnt == 0) return (ENOBUFS); snd_buff = &sq->snd_buff[sq->tail]; err = bus_dmamap_load_mbuf_sg(sq->snd_buff_dmat, snd_buff->dmap, *mbufp, segs, &nsegs, BUS_DMA_NOWAIT); if (__predict_false(err != 0)) { /* ARM64TODO: Add mbuf defragmenting if we lack maps */ m_freem(*mbufp); *mbufp = NULL; return (err); } /* Set how many subdescriptors is required */ subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT + nsegs - 1; if (subdesc_cnt > sq->free_cnt) { /* ARM64TODO: Add mbuf defragmentation if we lack descriptors */ bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap); return (ENOBUFS); } qentry = nicvf_get_sq_desc(sq, subdesc_cnt); /* Add SQ header subdesc */ err = nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, *mbufp, (*mbufp)->m_pkthdr.len); if (err != 0) { nicvf_put_sq_desc(sq, subdesc_cnt); bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap); if (err == ENOBUFS) { m_freem(*mbufp); *mbufp = NULL; } return (err); } /* Add SQ gather subdescs */ for (seg = 0; seg < nsegs; seg++) { qentry = nicvf_get_nxt_sqentry(sq, qentry); nicvf_sq_add_gather_subdesc(sq, qentry, segs[seg].ds_len, segs[seg].ds_addr); } /* make sure all memory stores are done before ringing doorbell */ bus_dmamap_sync(sq->dmem.dmat, sq->dmem.dmap, BUS_DMASYNC_PREWRITE); dprintf(sq->nic->dev, "%s: sq->idx: %d, subdesc_cnt: %d\n", __func__, sq->idx, subdesc_cnt); /* Inform HW to xmit new packet */ nicvf_queue_reg_write(sq->nic, NIC_QSET_SQ_0_7_DOOR, sq->idx, subdesc_cnt); return (0); } static __inline u_int frag_num(u_int i) { #if BYTE_ORDER == BIG_ENDIAN return ((i & ~3) + 3 - (i & 3)); #else return (i); #endif } /* Returns MBUF for a received packet */ struct mbuf * nicvf_get_rcv_mbuf(struct nicvf *nic, struct cqe_rx_t *cqe_rx) { int frag; int payload_len = 0; struct mbuf *mbuf; struct mbuf *mbuf_frag; uint16_t *rb_lens = NULL; uint64_t *rb_ptrs = NULL; mbuf = NULL; rb_lens = (uint16_t *)((uint8_t *)cqe_rx + (3 * sizeof(uint64_t))); rb_ptrs = (uint64_t *)((uint8_t *)cqe_rx + (6 * sizeof(uint64_t))); dprintf(nic->dev, "%s rb_cnt %d rb0_ptr %lx rb0_sz %d\n", __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz); for (frag = 0; frag < cqe_rx->rb_cnt; frag++) { payload_len = rb_lens[frag_num(frag)]; if (frag == 0) { /* First fragment */ mbuf = nicvf_rb_ptr_to_mbuf(nic, (*rb_ptrs - cqe_rx->align_pad)); mbuf->m_len = payload_len; mbuf->m_data += cqe_rx->align_pad; if_setrcvif(mbuf, nic->ifp); } else { /* Add fragments */ mbuf_frag = nicvf_rb_ptr_to_mbuf(nic, *rb_ptrs); m_append(mbuf, payload_len, mbuf_frag->m_data); m_freem(mbuf_frag); } /* Next buffer pointer */ rb_ptrs++; } if (__predict_true(mbuf != NULL)) { m_fixhdr(mbuf); mbuf->m_pkthdr.flowid = cqe_rx->rq_idx; M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE); if (__predict_true((if_getcapenable(nic->ifp) & IFCAP_RXCSUM) != 0)) { /* * HW by default verifies IP & TCP/UDP/SCTP checksums */ if (__predict_true(cqe_rx->l3_type == L3TYPE_IPV4)) { mbuf->m_pkthdr.csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID); } switch (cqe_rx->l4_type) { case L4TYPE_UDP: case L4TYPE_TCP: /* fall through */ mbuf->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); mbuf->m_pkthdr.csum_data = 0xffff; break; case L4TYPE_SCTP: mbuf->m_pkthdr.csum_flags |= CSUM_SCTP_VALID; break; default: break; } } } return (mbuf); } /* Enable interrupt */ void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx) { uint64_t reg_val; reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S); switch (int_type) { case NICVF_INTR_CQ: reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT); break; case NICVF_INTR_SQ: reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT); break; case NICVF_INTR_RBDR: reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT); break; case NICVF_INTR_PKT_DROP: reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT); break; case NICVF_INTR_TCP_TIMER: reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT); break; case NICVF_INTR_MBOX: reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT); break; case NICVF_INTR_QS_ERR: reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT); break; default: device_printf(nic->dev, "Failed to enable interrupt: unknown type\n"); break; } nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val); } /* Disable interrupt */ void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx) { uint64_t reg_val = 0; switch (int_type) { case NICVF_INTR_CQ: reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT); break; case NICVF_INTR_SQ: reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT); break; case NICVF_INTR_RBDR: reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT); break; case NICVF_INTR_PKT_DROP: reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT); break; case NICVF_INTR_TCP_TIMER: reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT); break; case NICVF_INTR_MBOX: reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT); break; case NICVF_INTR_QS_ERR: reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT); break; default: device_printf(nic->dev, "Failed to disable interrupt: unknown type\n"); break; } nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val); } /* Clear interrupt */ void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx) { uint64_t reg_val = 0; switch (int_type) { case NICVF_INTR_CQ: reg_val = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT); break; case NICVF_INTR_SQ: reg_val = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT); break; case NICVF_INTR_RBDR: reg_val = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT); break; case NICVF_INTR_PKT_DROP: reg_val = (1UL << NICVF_INTR_PKT_DROP_SHIFT); break; case NICVF_INTR_TCP_TIMER: reg_val = (1UL << NICVF_INTR_TCP_TIMER_SHIFT); break; case NICVF_INTR_MBOX: reg_val = (1UL << NICVF_INTR_MBOX_SHIFT); break; case NICVF_INTR_QS_ERR: reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT); break; default: device_printf(nic->dev, "Failed to clear interrupt: unknown type\n"); break; } nicvf_reg_write(nic, NIC_VF_INT, reg_val); } /* Check if interrupt is enabled */ int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx) { uint64_t reg_val; uint64_t mask = 0xff; reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S); switch (int_type) { case NICVF_INTR_CQ: mask = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT); break; case NICVF_INTR_SQ: mask = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT); break; case NICVF_INTR_RBDR: mask = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT); break; case NICVF_INTR_PKT_DROP: mask = NICVF_INTR_PKT_DROP_MASK; break; case NICVF_INTR_TCP_TIMER: mask = NICVF_INTR_TCP_TIMER_MASK; break; case NICVF_INTR_MBOX: mask = NICVF_INTR_MBOX_MASK; break; case NICVF_INTR_QS_ERR: mask = NICVF_INTR_QS_ERR_MASK; break; default: device_printf(nic->dev, "Failed to check interrupt enable: unknown type\n"); break; } return (reg_val & mask); } void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx) { struct rcv_queue *rq; #define GET_RQ_STATS(reg) \ nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\ (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) rq = &nic->qs->rq[rq_idx]; rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS); rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS); } void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx) { struct snd_queue *sq; #define GET_SQ_STATS(reg) \ nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\ (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) sq = &nic->qs->sq[sq_idx]; sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS); sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS); } /* Check for errors in the receive cmp.queue entry */ int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cmp_queue *cq, struct cqe_rx_t *cqe_rx) { struct nicvf_hw_stats *stats = &nic->hw_stats; struct nicvf_drv_stats *drv_stats = &nic->drv_stats; if (!cqe_rx->err_level && !cqe_rx->err_opcode) { drv_stats->rx_frames_ok++; return (0); } switch (cqe_rx->err_opcode) { case CQ_RX_ERROP_RE_PARTIAL: stats->rx_bgx_truncated_pkts++; break; case CQ_RX_ERROP_RE_JABBER: stats->rx_jabber_errs++; break; case CQ_RX_ERROP_RE_FCS: stats->rx_fcs_errs++; break; case CQ_RX_ERROP_RE_RX_CTL: stats->rx_bgx_errs++; break; case CQ_RX_ERROP_PREL2_ERR: stats->rx_prel2_errs++; break; case CQ_RX_ERROP_L2_MAL: stats->rx_l2_hdr_malformed++; break; case CQ_RX_ERROP_L2_OVERSIZE: stats->rx_oversize++; break; case CQ_RX_ERROP_L2_UNDERSIZE: stats->rx_undersize++; break; case CQ_RX_ERROP_L2_LENMISM: stats->rx_l2_len_mismatch++; break; case CQ_RX_ERROP_L2_PCLP: stats->rx_l2_pclp++; break; case CQ_RX_ERROP_IP_NOT: stats->rx_ip_ver_errs++; break; case CQ_RX_ERROP_IP_CSUM_ERR: stats->rx_ip_csum_errs++; break; case CQ_RX_ERROP_IP_MAL: stats->rx_ip_hdr_malformed++; break; case CQ_RX_ERROP_IP_MALD: stats->rx_ip_payload_malformed++; break; case CQ_RX_ERROP_IP_HOP: stats->rx_ip_ttl_errs++; break; case CQ_RX_ERROP_L3_PCLP: stats->rx_l3_pclp++; break; case CQ_RX_ERROP_L4_MAL: stats->rx_l4_malformed++; break; case CQ_RX_ERROP_L4_CHK: stats->rx_l4_csum_errs++; break; case CQ_RX_ERROP_UDP_LEN: stats->rx_udp_len_errs++; break; case CQ_RX_ERROP_L4_PORT: stats->rx_l4_port_errs++; break; case CQ_RX_ERROP_TCP_FLAG: stats->rx_tcp_flag_errs++; break; case CQ_RX_ERROP_TCP_OFFSET: stats->rx_tcp_offset_errs++; break; case CQ_RX_ERROP_L4_PCLP: stats->rx_l4_pclp++; break; case CQ_RX_ERROP_RBDR_TRUNC: stats->rx_truncated_pkts++; break; } return (1); } /* Check for errors in the send cmp.queue entry */ int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cmp_queue *cq, struct cqe_send_t *cqe_tx) { struct cmp_queue_stats *stats = &cq->stats; switch (cqe_tx->send_status) { case CQ_TX_ERROP_GOOD: stats->tx.good++; return (0); case CQ_TX_ERROP_DESC_FAULT: stats->tx.desc_fault++; break; case CQ_TX_ERROP_HDR_CONS_ERR: stats->tx.hdr_cons_err++; break; case CQ_TX_ERROP_SUBDC_ERR: stats->tx.subdesc_err++; break; case CQ_TX_ERROP_IMM_SIZE_OFLOW: stats->tx.imm_size_oflow++; break; case CQ_TX_ERROP_DATA_SEQUENCE_ERR: stats->tx.data_seq_err++; break; case CQ_TX_ERROP_MEM_SEQUENCE_ERR: stats->tx.mem_seq_err++; break; case CQ_TX_ERROP_LOCK_VIOL: stats->tx.lock_viol++; break; case CQ_TX_ERROP_DATA_FAULT: stats->tx.data_fault++; break; case CQ_TX_ERROP_TSTMP_CONFLICT: stats->tx.tstmp_conflict++; break; case CQ_TX_ERROP_TSTMP_TIMEOUT: stats->tx.tstmp_timeout++; break; case CQ_TX_ERROP_MEM_FAULT: stats->tx.mem_fault++; break; case CQ_TX_ERROP_CK_OVERLAP: stats->tx.csum_overlap++; break; case CQ_TX_ERROP_CK_OFLOW: stats->tx.csum_overflow++; break; } return (1); } diff --git a/sys/dev/vr/if_vr.c b/sys/dev/vr/if_vr.c index 7cbc36b44803..cf942c5cd197 100644 --- a/sys/dev/vr/if_vr.c +++ b/sys/dev/vr/if_vr.c @@ -1,2670 +1,2670 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 1997, 1998 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * VIA Rhine fast ethernet PCI NIC driver * * Supports various network adapters based on the VIA Rhine * and Rhine II PCI controllers, including the D-Link DFE530TX. * Datasheets are available at http://www.via.com.tw. * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The VIA Rhine controllers are similar in some respects to the * the DEC tulip chips, except less complicated. The controller * uses an MII bus and an external physical layer interface. The * receiver has a one entry perfect filter and a 64-bit hash table * multicast filter. Transmit and receive descriptors are similar * to the tulip. * * Some Rhine chips has a serious flaw in its transmit DMA mechanism: * transmit buffers must be longword aligned. Unfortunately, * FreeBSD doesn't guarantee that mbufs will be filled in starting * at longword boundaries, so we have to do a buffer copy before * transmission. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" MODULE_DEPEND(vr, pci, 1, 1, 1); MODULE_DEPEND(vr, ether, 1, 1, 1); MODULE_DEPEND(vr, miibus, 1, 1, 1); /* Define to show Rx/Tx error status. */ #undef VR_SHOW_ERRORS #define VR_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) /* * Various supported device vendors/types, their names & quirks. */ #define VR_Q_NEEDALIGN (1<<0) #define VR_Q_CSUM (1<<1) #define VR_Q_CAM (1<<2) static const struct vr_type { u_int16_t vr_vid; u_int16_t vr_did; int vr_quirks; const char *vr_name; } vr_devs[] = { { VIA_VENDORID, VIA_DEVICEID_RHINE, VR_Q_NEEDALIGN, "VIA VT3043 Rhine I 10/100BaseTX" }, { VIA_VENDORID, VIA_DEVICEID_RHINE_II, VR_Q_NEEDALIGN, "VIA VT86C100A Rhine II 10/100BaseTX" }, { VIA_VENDORID, VIA_DEVICEID_RHINE_II_2, 0, "VIA VT6102 Rhine II 10/100BaseTX" }, { VIA_VENDORID, VIA_DEVICEID_RHINE_III, 0, "VIA VT6105 Rhine III 10/100BaseTX" }, { VIA_VENDORID, VIA_DEVICEID_RHINE_III_M, VR_Q_CSUM, "VIA VT6105M Rhine III 10/100BaseTX" }, { DELTA_VENDORID, DELTA_DEVICEID_RHINE_II, VR_Q_NEEDALIGN, "Delta Electronics Rhine II 10/100BaseTX" }, { ADDTRON_VENDORID, ADDTRON_DEVICEID_RHINE_II, VR_Q_NEEDALIGN, "Addtron Technology Rhine II 10/100BaseTX" }, { 0, 0, 0, NULL } }; static int vr_probe(device_t); static int vr_attach(device_t); static int vr_detach(device_t); static int vr_shutdown(device_t); static int vr_suspend(device_t); static int vr_resume(device_t); static void vr_dmamap_cb(void *, bus_dma_segment_t *, int, int); static int vr_dma_alloc(struct vr_softc *); static void vr_dma_free(struct vr_softc *); static __inline void vr_discard_rxbuf(struct vr_rxdesc *); static int vr_newbuf(struct vr_softc *, int); #ifndef __NO_STRICT_ALIGNMENT static __inline void vr_fixup_rx(struct mbuf *); #endif static int vr_rxeof(struct vr_softc *); static void vr_txeof(struct vr_softc *); static void vr_tick(void *); static int vr_error(struct vr_softc *, uint16_t); static void vr_tx_underrun(struct vr_softc *); static int vr_intr(void *); static void vr_int_task(void *, int); static void vr_start(struct ifnet *); static void vr_start_locked(struct ifnet *); static int vr_encap(struct vr_softc *, struct mbuf **); static int vr_ioctl(struct ifnet *, u_long, caddr_t); static void vr_init(void *); static void vr_init_locked(struct vr_softc *); static void vr_tx_start(struct vr_softc *); static void vr_rx_start(struct vr_softc *); static int vr_tx_stop(struct vr_softc *); static int vr_rx_stop(struct vr_softc *); static void vr_stop(struct vr_softc *); static void vr_watchdog(struct vr_softc *); static int vr_ifmedia_upd(struct ifnet *); static void vr_ifmedia_sts(struct ifnet *, struct ifmediareq *); static int vr_miibus_readreg(device_t, int, int); static int vr_miibus_writereg(device_t, int, int, int); static void vr_miibus_statchg(device_t); static void vr_cam_mask(struct vr_softc *, uint32_t, int); static int vr_cam_data(struct vr_softc *, int, int, uint8_t *); static void vr_set_filter(struct vr_softc *); static void vr_reset(const struct vr_softc *); static int vr_tx_ring_init(struct vr_softc *); static int vr_rx_ring_init(struct vr_softc *); static void vr_setwol(struct vr_softc *); static void vr_clrwol(struct vr_softc *); static int vr_sysctl_stats(SYSCTL_HANDLER_ARGS); static const struct vr_tx_threshold_table { int tx_cfg; int bcr_cfg; int value; } vr_tx_threshold_tables[] = { { VR_TXTHRESH_64BYTES, VR_BCR1_TXTHRESH64BYTES, 64 }, { VR_TXTHRESH_128BYTES, VR_BCR1_TXTHRESH128BYTES, 128 }, { VR_TXTHRESH_256BYTES, VR_BCR1_TXTHRESH256BYTES, 256 }, { VR_TXTHRESH_512BYTES, VR_BCR1_TXTHRESH512BYTES, 512 }, { VR_TXTHRESH_1024BYTES, VR_BCR1_TXTHRESH1024BYTES, 1024 }, { VR_TXTHRESH_STORENFWD, VR_BCR1_TXTHRESHSTORENFWD, 2048 } }; static device_method_t vr_methods[] = { /* Device interface */ DEVMETHOD(device_probe, vr_probe), DEVMETHOD(device_attach, vr_attach), DEVMETHOD(device_detach, vr_detach), DEVMETHOD(device_shutdown, vr_shutdown), DEVMETHOD(device_suspend, vr_suspend), DEVMETHOD(device_resume, vr_resume), /* MII interface */ DEVMETHOD(miibus_readreg, vr_miibus_readreg), DEVMETHOD(miibus_writereg, vr_miibus_writereg), DEVMETHOD(miibus_statchg, vr_miibus_statchg), DEVMETHOD_END }; static driver_t vr_driver = { "vr", vr_methods, sizeof(struct vr_softc) }; static devclass_t vr_devclass; DRIVER_MODULE(vr, pci, vr_driver, vr_devclass, 0, 0); DRIVER_MODULE(miibus, vr, miibus_driver, miibus_devclass, 0, 0); static int vr_miibus_readreg(device_t dev, int phy, int reg) { struct vr_softc *sc; int i; sc = device_get_softc(dev); /* Set the register address. */ CSR_WRITE_1(sc, VR_MIIADDR, reg); VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB); for (i = 0; i < VR_MII_TIMEOUT; i++) { DELAY(1); if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0) break; } if (i == VR_MII_TIMEOUT) device_printf(sc->vr_dev, "phy read timeout %d:%d\n", phy, reg); return (CSR_READ_2(sc, VR_MIIDATA)); } static int vr_miibus_writereg(device_t dev, int phy, int reg, int data) { struct vr_softc *sc; int i; sc = device_get_softc(dev); /* Set the register address and data to write. */ CSR_WRITE_1(sc, VR_MIIADDR, reg); CSR_WRITE_2(sc, VR_MIIDATA, data); VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB); for (i = 0; i < VR_MII_TIMEOUT; i++) { DELAY(1); if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0) break; } if (i == VR_MII_TIMEOUT) device_printf(sc->vr_dev, "phy write timeout %d:%d\n", phy, reg); return (0); } /* * In order to fiddle with the * 'full-duplex' and '100Mbps' bits in the netconfig register, we * first have to put the transmit and/or receive logic in the idle state. */ static void vr_miibus_statchg(device_t dev) { struct vr_softc *sc; struct mii_data *mii; struct ifnet *ifp; int lfdx, mfdx; uint8_t cr0, cr1, fc; sc = device_get_softc(dev); mii = device_get_softc(sc->vr_miibus); ifp = sc->vr_ifp; if (mii == NULL || ifp == NULL || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; sc->vr_flags &= ~(VR_F_LINK | VR_F_TXPAUSE); if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: sc->vr_flags |= VR_F_LINK; break; default: break; } } if ((sc->vr_flags & VR_F_LINK) != 0) { cr0 = CSR_READ_1(sc, VR_CR0); cr1 = CSR_READ_1(sc, VR_CR1); mfdx = (cr1 & VR_CR1_FULLDUPLEX) != 0; lfdx = (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0; if (mfdx != lfdx) { if ((cr0 & (VR_CR0_TX_ON | VR_CR0_RX_ON)) != 0) { if (vr_tx_stop(sc) != 0 || vr_rx_stop(sc) != 0) { device_printf(sc->vr_dev, "%s: Tx/Rx shutdown error -- " "resetting\n", __func__); sc->vr_flags |= VR_F_RESTART; VR_UNLOCK(sc); return; } } if (lfdx) cr1 |= VR_CR1_FULLDUPLEX; else cr1 &= ~VR_CR1_FULLDUPLEX; CSR_WRITE_1(sc, VR_CR1, cr1); } fc = 0; /* Configure flow-control. */ if (sc->vr_revid >= REV_ID_VT6105_A0) { fc = CSR_READ_1(sc, VR_FLOWCR1); fc &= ~(VR_FLOWCR1_TXPAUSE | VR_FLOWCR1_RXPAUSE); if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) fc |= VR_FLOWCR1_RXPAUSE; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) { fc |= VR_FLOWCR1_TXPAUSE; sc->vr_flags |= VR_F_TXPAUSE; } CSR_WRITE_1(sc, VR_FLOWCR1, fc); } else if (sc->vr_revid >= REV_ID_VT6102_A) { /* No Tx puase capability available for Rhine II. */ fc = CSR_READ_1(sc, VR_MISC_CR0); fc &= ~VR_MISCCR0_RXPAUSE; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) fc |= VR_MISCCR0_RXPAUSE; CSR_WRITE_1(sc, VR_MISC_CR0, fc); } vr_rx_start(sc); vr_tx_start(sc); } else { if (vr_tx_stop(sc) != 0 || vr_rx_stop(sc) != 0) { device_printf(sc->vr_dev, "%s: Tx/Rx shutdown error -- resetting\n", __func__); sc->vr_flags |= VR_F_RESTART; } } } static void vr_cam_mask(struct vr_softc *sc, uint32_t mask, int type) { if (type == VR_MCAST_CAM) CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_MCAST); else CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_VLAN); CSR_WRITE_4(sc, VR_CAMMASK, mask); CSR_WRITE_1(sc, VR_CAMCTL, 0); } static int vr_cam_data(struct vr_softc *sc, int type, int idx, uint8_t *mac) { int i; if (type == VR_MCAST_CAM) { if (idx < 0 || idx >= VR_CAM_MCAST_CNT || mac == NULL) return (EINVAL); CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_MCAST); } else CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_VLAN); /* Set CAM entry address. */ CSR_WRITE_1(sc, VR_CAMADDR, idx); /* Set CAM entry data. */ if (type == VR_MCAST_CAM) { for (i = 0; i < ETHER_ADDR_LEN; i++) CSR_WRITE_1(sc, VR_MCAM0 + i, mac[i]); } else { CSR_WRITE_1(sc, VR_VCAM0, mac[0]); CSR_WRITE_1(sc, VR_VCAM1, mac[1]); } DELAY(10); /* Write CAM and wait for self-clear of VR_CAMCTL_WRITE bit. */ CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_WRITE); for (i = 0; i < VR_TIMEOUT; i++) { DELAY(1); if ((CSR_READ_1(sc, VR_CAMCTL) & VR_CAMCTL_WRITE) == 0) break; } if (i == VR_TIMEOUT) device_printf(sc->vr_dev, "%s: setting CAM filter timeout!\n", __func__); CSR_WRITE_1(sc, VR_CAMCTL, 0); return (i == VR_TIMEOUT ? ETIMEDOUT : 0); } struct vr_hash_maddr_cam_ctx { struct vr_softc *sc; uint32_t mask; int error; }; static u_int vr_hash_maddr_cam(void *arg, struct sockaddr_dl *sdl, u_int mcnt) { struct vr_hash_maddr_cam_ctx *ctx = arg; if (ctx->error != 0) return (0); ctx->error = vr_cam_data(ctx->sc, VR_MCAST_CAM, mcnt, LLADDR(sdl)); if (ctx->error != 0) { ctx->mask = 0; return (0); } ctx->mask |= 1 << mcnt; return (1); } static u_int vr_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { uint32_t *hashes = arg; int h; h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26; if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); return (1); } /* * Program the 64-bit multicast hash filter. */ static void vr_set_filter(struct vr_softc *sc) { struct ifnet *ifp; uint32_t hashes[2] = { 0, 0 }; uint8_t rxfilt; int error, mcnt; VR_LOCK_ASSERT(sc); ifp = sc->vr_ifp; rxfilt = CSR_READ_1(sc, VR_RXCFG); rxfilt &= ~(VR_RXCFG_RX_PROMISC | VR_RXCFG_RX_BROAD | VR_RXCFG_RX_MULTI); if (ifp->if_flags & IFF_BROADCAST) rxfilt |= VR_RXCFG_RX_BROAD; if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { rxfilt |= VR_RXCFG_RX_MULTI; if (ifp->if_flags & IFF_PROMISC) rxfilt |= VR_RXCFG_RX_PROMISC; CSR_WRITE_1(sc, VR_RXCFG, rxfilt); CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF); CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF); return; } /* Now program new ones. */ error = 0; if ((sc->vr_quirks & VR_Q_CAM) != 0) { struct vr_hash_maddr_cam_ctx ctx; /* * For hardwares that have CAM capability, use * 32 entries multicast perfect filter. */ ctx.sc = sc; ctx.mask = 0; ctx.error = 0; mcnt = if_foreach_llmaddr(ifp, vr_hash_maddr_cam, &ctx); vr_cam_mask(sc, VR_MCAST_CAM, ctx.mask); } if ((sc->vr_quirks & VR_Q_CAM) == 0 || error != 0) { /* * If there are too many multicast addresses or * setting multicast CAM filter failed, use hash * table based filtering. */ mcnt = if_foreach_llmaddr(ifp, vr_hash_maddr, hashes); } if (mcnt > 0) rxfilt |= VR_RXCFG_RX_MULTI; CSR_WRITE_4(sc, VR_MAR0, hashes[0]); CSR_WRITE_4(sc, VR_MAR1, hashes[1]); CSR_WRITE_1(sc, VR_RXCFG, rxfilt); } static void vr_reset(const struct vr_softc *sc) { int i; /*VR_LOCK_ASSERT(sc);*/ /* XXX: Called during attach w/o lock. */ CSR_WRITE_1(sc, VR_CR1, VR_CR1_RESET); if (sc->vr_revid < REV_ID_VT6102_A) { /* VT86C100A needs more delay after reset. */ DELAY(100); } for (i = 0; i < VR_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_1(sc, VR_CR1) & VR_CR1_RESET)) break; } if (i == VR_TIMEOUT) { if (sc->vr_revid < REV_ID_VT6102_A) device_printf(sc->vr_dev, "reset never completed!\n"); else { /* Use newer force reset command. */ device_printf(sc->vr_dev, "Using force reset command.\n"); VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST); /* * Wait a little while for the chip to get its brains * in order. */ DELAY(2000); } } } /* * Probe for a VIA Rhine chip. Check the PCI vendor and device * IDs against our list and return a match or NULL */ static const struct vr_type * vr_match(device_t dev) { const struct vr_type *t = vr_devs; for (t = vr_devs; t->vr_name != NULL; t++) if ((pci_get_vendor(dev) == t->vr_vid) && (pci_get_device(dev) == t->vr_did)) return (t); return (NULL); } /* * Probe for a VIA Rhine chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int vr_probe(device_t dev) { const struct vr_type *t; t = vr_match(dev); if (t != NULL) { device_set_desc(dev, t->vr_name); return (BUS_PROBE_DEFAULT); } return (ENXIO); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int vr_attach(device_t dev) { struct vr_softc *sc; struct ifnet *ifp; const struct vr_type *t; uint8_t eaddr[ETHER_ADDR_LEN]; int error, rid; int i, phy, pmc; sc = device_get_softc(dev); sc->vr_dev = dev; t = vr_match(dev); KASSERT(t != NULL, ("Lost if_vr device match")); sc->vr_quirks = t->vr_quirks; device_printf(dev, "Quirks: 0x%x\n", sc->vr_quirks); mtx_init(&sc->vr_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->vr_stat_callout, &sc->vr_mtx, 0); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0, vr_sysctl_stats, "I", "Statistics"); error = 0; /* * Map control/status registers. */ pci_enable_busmaster(dev); sc->vr_revid = pci_get_revid(dev); device_printf(dev, "Revision: 0x%x\n", sc->vr_revid); sc->vr_res_id = PCIR_BAR(0); sc->vr_res_type = SYS_RES_IOPORT; sc->vr_res = bus_alloc_resource_any(dev, sc->vr_res_type, &sc->vr_res_id, RF_ACTIVE); if (sc->vr_res == NULL) { device_printf(dev, "couldn't map ports\n"); error = ENXIO; goto fail; } /* Allocate interrupt. */ rid = 0; sc->vr_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->vr_irq == NULL) { device_printf(dev, "couldn't map interrupt\n"); error = ENXIO; goto fail; } /* Allocate ifnet structure. */ ifp = sc->vr_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "couldn't allocate ifnet structure\n"); error = ENOSPC; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = vr_ioctl; ifp->if_start = vr_start; ifp->if_init = vr_init; IFQ_SET_MAXLEN(&ifp->if_snd, VR_TX_RING_CNT - 1); ifp->if_snd.ifq_maxlen = VR_TX_RING_CNT - 1; IFQ_SET_READY(&ifp->if_snd); - TASK_INIT(&sc->vr_inttask, 0, vr_int_task, sc); + NET_TASK_INIT(&sc->vr_inttask, 0, vr_int_task, sc); /* Configure Tx FIFO threshold. */ sc->vr_txthresh = VR_TXTHRESH_MIN; if (sc->vr_revid < REV_ID_VT6105_A0) { /* * Use store and forward mode for Rhine I/II. * Otherwise they produce a lot of Tx underruns and * it would take a while to get working FIFO threshold * value. */ sc->vr_txthresh = VR_TXTHRESH_MAX; } if ((sc->vr_quirks & VR_Q_CSUM) != 0) { ifp->if_hwassist = VR_CSUM_FEATURES; ifp->if_capabilities |= IFCAP_HWCSUM; /* * To update checksum field the hardware may need to * store entire frames into FIFO before transmitting. */ sc->vr_txthresh = VR_TXTHRESH_MAX; } if (sc->vr_revid >= REV_ID_VT6102_A && pci_find_cap(dev, PCIY_PMG, &pmc) == 0) ifp->if_capabilities |= IFCAP_WOL_UCAST | IFCAP_WOL_MAGIC; /* Rhine supports oversized VLAN frame. */ ifp->if_capabilities |= IFCAP_VLAN_MTU; ifp->if_capenable = ifp->if_capabilities; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif /* * Windows may put the chip in suspend mode when it * shuts down. Be sure to kick it in the head to wake it * up again. */ if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0) VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1)); /* * Get station address. The way the Rhine chips work, * you're not allowed to directly access the EEPROM once * they've been programmed a special way. Consequently, * we need to read the node address from the PAR0 and PAR1 * registers. * Reloading EEPROM also overwrites VR_CFGA, VR_CFGB, * VR_CFGC and VR_CFGD such that memory mapped IO configured * by driver is reset to default state. */ VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD); for (i = VR_TIMEOUT; i > 0; i--) { DELAY(1); if ((CSR_READ_1(sc, VR_EECSR) & VR_EECSR_LOAD) == 0) break; } if (i == 0) device_printf(dev, "Reloading EEPROM timeout!\n"); for (i = 0; i < ETHER_ADDR_LEN; i++) eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i); /* Reset the adapter. */ vr_reset(sc); /* Ack intr & disable further interrupts. */ CSR_WRITE_2(sc, VR_ISR, 0xFFFF); CSR_WRITE_2(sc, VR_IMR, 0); if (sc->vr_revid >= REV_ID_VT6102_A) CSR_WRITE_2(sc, VR_MII_IMR, 0); if (sc->vr_revid < REV_ID_VT6102_A) { pci_write_config(dev, VR_PCI_MODE2, pci_read_config(dev, VR_PCI_MODE2, 1) | VR_MODE2_MODE10T, 1); } else { /* Report error instead of retrying forever. */ pci_write_config(dev, VR_PCI_MODE2, pci_read_config(dev, VR_PCI_MODE2, 1) | VR_MODE2_PCEROPT, 1); /* Detect MII coding error. */ pci_write_config(dev, VR_PCI_MODE3, pci_read_config(dev, VR_PCI_MODE3, 1) | VR_MODE3_MIION, 1); if (sc->vr_revid >= REV_ID_VT6105_LOM && sc->vr_revid < REV_ID_VT6105M_A0) pci_write_config(dev, VR_PCI_MODE2, pci_read_config(dev, VR_PCI_MODE2, 1) | VR_MODE2_MODE10T, 1); /* Enable Memory-Read-Multiple. */ if (sc->vr_revid >= REV_ID_VT6107_A1 && sc->vr_revid < REV_ID_VT6105M_A0) pci_write_config(dev, VR_PCI_MODE2, pci_read_config(dev, VR_PCI_MODE2, 1) | VR_MODE2_MRDPL, 1); } /* Disable MII AUTOPOLL. */ VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL); if (vr_dma_alloc(sc) != 0) { error = ENXIO; goto fail; } /* Do MII setup. */ if (sc->vr_revid >= REV_ID_VT6105_A0) phy = 1; else phy = CSR_READ_1(sc, VR_PHYADDR) & VR_PHYADDR_MASK; error = mii_attach(dev, &sc->vr_miibus, ifp, vr_ifmedia_upd, vr_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, sc->vr_revid >= REV_ID_VT6102_A ? MIIF_DOPAUSE : 0); if (error != 0) { device_printf(dev, "attaching PHYs failed\n"); goto fail; } /* Call MI attach routine. */ ether_ifattach(ifp, eaddr); /* * Tell the upper layer(s) we support long frames. * Must appear after the call to ether_ifattach() because * ether_ifattach() sets ifi_hdrlen to the default value. */ ifp->if_hdrlen = sizeof(struct ether_vlan_header); /* Hook interrupt last to avoid having to lock softc. */ error = bus_setup_intr(dev, sc->vr_irq, INTR_TYPE_NET | INTR_MPSAFE, vr_intr, NULL, sc, &sc->vr_intrhand); if (error) { device_printf(dev, "couldn't set up irq\n"); ether_ifdetach(ifp); goto fail; } fail: if (error) vr_detach(dev); return (error); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int vr_detach(device_t dev) { struct vr_softc *sc = device_get_softc(dev); struct ifnet *ifp = sc->vr_ifp; KASSERT(mtx_initialized(&sc->vr_mtx), ("vr mutex not initialized")); #ifdef DEVICE_POLLING if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(ifp); #endif /* These should only be active if attach succeeded. */ if (device_is_attached(dev)) { VR_LOCK(sc); sc->vr_flags |= VR_F_DETACHED; vr_stop(sc); VR_UNLOCK(sc); callout_drain(&sc->vr_stat_callout); taskqueue_drain(taskqueue_fast, &sc->vr_inttask); ether_ifdetach(ifp); } if (sc->vr_miibus) device_delete_child(dev, sc->vr_miibus); bus_generic_detach(dev); if (sc->vr_intrhand) bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand); if (sc->vr_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq); if (sc->vr_res) bus_release_resource(dev, sc->vr_res_type, sc->vr_res_id, sc->vr_res); if (ifp) if_free(ifp); vr_dma_free(sc); mtx_destroy(&sc->vr_mtx); return (0); } struct vr_dmamap_arg { bus_addr_t vr_busaddr; }; static void vr_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct vr_dmamap_arg *ctx; if (error != 0) return; ctx = arg; ctx->vr_busaddr = segs[0].ds_addr; } static int vr_dma_alloc(struct vr_softc *sc) { struct vr_dmamap_arg ctx; struct vr_txdesc *txd; struct vr_rxdesc *rxd; bus_size_t tx_alignment; int error, i; /* Create parent DMA tag. */ error = bus_dma_tag_create( bus_get_dma_tag(sc->vr_dev), /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 0, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->vr_cdata.vr_parent_tag); if (error != 0) { device_printf(sc->vr_dev, "failed to create parent DMA tag\n"); goto fail; } /* Create tag for Tx ring. */ error = bus_dma_tag_create( sc->vr_cdata.vr_parent_tag, /* parent */ VR_RING_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ VR_TX_RING_SIZE, /* maxsize */ 1, /* nsegments */ VR_TX_RING_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->vr_cdata.vr_tx_ring_tag); if (error != 0) { device_printf(sc->vr_dev, "failed to create Tx ring DMA tag\n"); goto fail; } /* Create tag for Rx ring. */ error = bus_dma_tag_create( sc->vr_cdata.vr_parent_tag, /* parent */ VR_RING_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ VR_RX_RING_SIZE, /* maxsize */ 1, /* nsegments */ VR_RX_RING_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->vr_cdata.vr_rx_ring_tag); if (error != 0) { device_printf(sc->vr_dev, "failed to create Rx ring DMA tag\n"); goto fail; } if ((sc->vr_quirks & VR_Q_NEEDALIGN) != 0) tx_alignment = sizeof(uint32_t); else tx_alignment = 1; /* Create tag for Tx buffers. */ error = bus_dma_tag_create( sc->vr_cdata.vr_parent_tag, /* parent */ tx_alignment, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES * VR_MAXFRAGS, /* maxsize */ VR_MAXFRAGS, /* nsegments */ MCLBYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->vr_cdata.vr_tx_tag); if (error != 0) { device_printf(sc->vr_dev, "failed to create Tx DMA tag\n"); goto fail; } /* Create tag for Rx buffers. */ error = bus_dma_tag_create( sc->vr_cdata.vr_parent_tag, /* parent */ VR_RX_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES, /* maxsize */ 1, /* nsegments */ MCLBYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->vr_cdata.vr_rx_tag); if (error != 0) { device_printf(sc->vr_dev, "failed to create Rx DMA tag\n"); goto fail; } /* Allocate DMA'able memory and load the DMA map for Tx ring. */ error = bus_dmamem_alloc(sc->vr_cdata.vr_tx_ring_tag, (void **)&sc->vr_rdata.vr_tx_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->vr_cdata.vr_tx_ring_map); if (error != 0) { device_printf(sc->vr_dev, "failed to allocate DMA'able memory for Tx ring\n"); goto fail; } ctx.vr_busaddr = 0; error = bus_dmamap_load(sc->vr_cdata.vr_tx_ring_tag, sc->vr_cdata.vr_tx_ring_map, sc->vr_rdata.vr_tx_ring, VR_TX_RING_SIZE, vr_dmamap_cb, &ctx, 0); if (error != 0 || ctx.vr_busaddr == 0) { device_printf(sc->vr_dev, "failed to load DMA'able memory for Tx ring\n"); goto fail; } sc->vr_rdata.vr_tx_ring_paddr = ctx.vr_busaddr; /* Allocate DMA'able memory and load the DMA map for Rx ring. */ error = bus_dmamem_alloc(sc->vr_cdata.vr_rx_ring_tag, (void **)&sc->vr_rdata.vr_rx_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->vr_cdata.vr_rx_ring_map); if (error != 0) { device_printf(sc->vr_dev, "failed to allocate DMA'able memory for Rx ring\n"); goto fail; } ctx.vr_busaddr = 0; error = bus_dmamap_load(sc->vr_cdata.vr_rx_ring_tag, sc->vr_cdata.vr_rx_ring_map, sc->vr_rdata.vr_rx_ring, VR_RX_RING_SIZE, vr_dmamap_cb, &ctx, 0); if (error != 0 || ctx.vr_busaddr == 0) { device_printf(sc->vr_dev, "failed to load DMA'able memory for Rx ring\n"); goto fail; } sc->vr_rdata.vr_rx_ring_paddr = ctx.vr_busaddr; /* Create DMA maps for Tx buffers. */ for (i = 0; i < VR_TX_RING_CNT; i++) { txd = &sc->vr_cdata.vr_txdesc[i]; txd->tx_m = NULL; txd->tx_dmamap = NULL; error = bus_dmamap_create(sc->vr_cdata.vr_tx_tag, 0, &txd->tx_dmamap); if (error != 0) { device_printf(sc->vr_dev, "failed to create Tx dmamap\n"); goto fail; } } /* Create DMA maps for Rx buffers. */ if ((error = bus_dmamap_create(sc->vr_cdata.vr_rx_tag, 0, &sc->vr_cdata.vr_rx_sparemap)) != 0) { device_printf(sc->vr_dev, "failed to create spare Rx dmamap\n"); goto fail; } for (i = 0; i < VR_RX_RING_CNT; i++) { rxd = &sc->vr_cdata.vr_rxdesc[i]; rxd->rx_m = NULL; rxd->rx_dmamap = NULL; error = bus_dmamap_create(sc->vr_cdata.vr_rx_tag, 0, &rxd->rx_dmamap); if (error != 0) { device_printf(sc->vr_dev, "failed to create Rx dmamap\n"); goto fail; } } fail: return (error); } static void vr_dma_free(struct vr_softc *sc) { struct vr_txdesc *txd; struct vr_rxdesc *rxd; int i; /* Tx ring. */ if (sc->vr_cdata.vr_tx_ring_tag) { if (sc->vr_rdata.vr_tx_ring_paddr) bus_dmamap_unload(sc->vr_cdata.vr_tx_ring_tag, sc->vr_cdata.vr_tx_ring_map); if (sc->vr_rdata.vr_tx_ring) bus_dmamem_free(sc->vr_cdata.vr_tx_ring_tag, sc->vr_rdata.vr_tx_ring, sc->vr_cdata.vr_tx_ring_map); sc->vr_rdata.vr_tx_ring = NULL; sc->vr_rdata.vr_tx_ring_paddr = 0; bus_dma_tag_destroy(sc->vr_cdata.vr_tx_ring_tag); sc->vr_cdata.vr_tx_ring_tag = NULL; } /* Rx ring. */ if (sc->vr_cdata.vr_rx_ring_tag) { if (sc->vr_rdata.vr_rx_ring_paddr) bus_dmamap_unload(sc->vr_cdata.vr_rx_ring_tag, sc->vr_cdata.vr_rx_ring_map); if (sc->vr_rdata.vr_rx_ring) bus_dmamem_free(sc->vr_cdata.vr_rx_ring_tag, sc->vr_rdata.vr_rx_ring, sc->vr_cdata.vr_rx_ring_map); sc->vr_rdata.vr_rx_ring = NULL; sc->vr_rdata.vr_rx_ring_paddr = 0; bus_dma_tag_destroy(sc->vr_cdata.vr_rx_ring_tag); sc->vr_cdata.vr_rx_ring_tag = NULL; } /* Tx buffers. */ if (sc->vr_cdata.vr_tx_tag) { for (i = 0; i < VR_TX_RING_CNT; i++) { txd = &sc->vr_cdata.vr_txdesc[i]; if (txd->tx_dmamap) { bus_dmamap_destroy(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap); txd->tx_dmamap = NULL; } } bus_dma_tag_destroy(sc->vr_cdata.vr_tx_tag); sc->vr_cdata.vr_tx_tag = NULL; } /* Rx buffers. */ if (sc->vr_cdata.vr_rx_tag) { for (i = 0; i < VR_RX_RING_CNT; i++) { rxd = &sc->vr_cdata.vr_rxdesc[i]; if (rxd->rx_dmamap) { bus_dmamap_destroy(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap); rxd->rx_dmamap = NULL; } } if (sc->vr_cdata.vr_rx_sparemap) { bus_dmamap_destroy(sc->vr_cdata.vr_rx_tag, sc->vr_cdata.vr_rx_sparemap); sc->vr_cdata.vr_rx_sparemap = 0; } bus_dma_tag_destroy(sc->vr_cdata.vr_rx_tag); sc->vr_cdata.vr_rx_tag = NULL; } if (sc->vr_cdata.vr_parent_tag) { bus_dma_tag_destroy(sc->vr_cdata.vr_parent_tag); sc->vr_cdata.vr_parent_tag = NULL; } } /* * Initialize the transmit descriptors. */ static int vr_tx_ring_init(struct vr_softc *sc) { struct vr_ring_data *rd; struct vr_txdesc *txd; bus_addr_t addr; int i; sc->vr_cdata.vr_tx_prod = 0; sc->vr_cdata.vr_tx_cons = 0; sc->vr_cdata.vr_tx_cnt = 0; sc->vr_cdata.vr_tx_pkts = 0; rd = &sc->vr_rdata; bzero(rd->vr_tx_ring, VR_TX_RING_SIZE); for (i = 0; i < VR_TX_RING_CNT; i++) { if (i == VR_TX_RING_CNT - 1) addr = VR_TX_RING_ADDR(sc, 0); else addr = VR_TX_RING_ADDR(sc, i + 1); rd->vr_tx_ring[i].vr_nextphys = htole32(VR_ADDR_LO(addr)); txd = &sc->vr_cdata.vr_txdesc[i]; txd->tx_m = NULL; } bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, sc->vr_cdata.vr_tx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } /* * Initialize the RX descriptors and allocate mbufs for them. Note that * we arrange the descriptors in a closed ring, so that the last descriptor * points back to the first. */ static int vr_rx_ring_init(struct vr_softc *sc) { struct vr_ring_data *rd; struct vr_rxdesc *rxd; bus_addr_t addr; int i; sc->vr_cdata.vr_rx_cons = 0; rd = &sc->vr_rdata; bzero(rd->vr_rx_ring, VR_RX_RING_SIZE); for (i = 0; i < VR_RX_RING_CNT; i++) { rxd = &sc->vr_cdata.vr_rxdesc[i]; rxd->rx_m = NULL; rxd->desc = &rd->vr_rx_ring[i]; if (i == VR_RX_RING_CNT - 1) addr = VR_RX_RING_ADDR(sc, 0); else addr = VR_RX_RING_ADDR(sc, i + 1); rd->vr_rx_ring[i].vr_nextphys = htole32(VR_ADDR_LO(addr)); if (vr_newbuf(sc, i) != 0) return (ENOBUFS); } bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag, sc->vr_cdata.vr_rx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } static __inline void vr_discard_rxbuf(struct vr_rxdesc *rxd) { struct vr_desc *desc; desc = rxd->desc; desc->vr_ctl = htole32(VR_RXCTL | (MCLBYTES - sizeof(uint64_t))); desc->vr_status = htole32(VR_RXSTAT_OWN); } /* * Initialize an RX descriptor and attach an MBUF cluster. * Note: the length fields are only 11 bits wide, which means the * largest size we can specify is 2047. This is important because * MCLBYTES is 2048, so we have to subtract one otherwise we'll * overflow the field and make a mess. */ static int vr_newbuf(struct vr_softc *sc, int idx) { struct vr_desc *desc; struct vr_rxdesc *rxd; struct mbuf *m; bus_dma_segment_t segs[1]; bus_dmamap_t map; int nsegs; m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = MCLBYTES; m_adj(m, sizeof(uint64_t)); if (bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_rx_tag, sc->vr_cdata.vr_rx_sparemap, m, segs, &nsegs, 0) != 0) { m_freem(m); return (ENOBUFS); } KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); rxd = &sc->vr_cdata.vr_rxdesc[idx]; if (rxd->rx_m != NULL) { bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap); } map = rxd->rx_dmamap; rxd->rx_dmamap = sc->vr_cdata.vr_rx_sparemap; sc->vr_cdata.vr_rx_sparemap = map; bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_PREREAD); rxd->rx_m = m; desc = rxd->desc; desc->vr_data = htole32(VR_ADDR_LO(segs[0].ds_addr)); desc->vr_ctl = htole32(VR_RXCTL | segs[0].ds_len); desc->vr_status = htole32(VR_RXSTAT_OWN); return (0); } #ifndef __NO_STRICT_ALIGNMENT static __inline void vr_fixup_rx(struct mbuf *m) { uint16_t *src, *dst; int i; src = mtod(m, uint16_t *); dst = src - 1; for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) *dst++ = *src++; m->m_data -= ETHER_ALIGN; } #endif /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ static int vr_rxeof(struct vr_softc *sc) { struct vr_rxdesc *rxd; struct mbuf *m; struct ifnet *ifp; struct vr_desc *cur_rx; int cons, prog, total_len, rx_npkts; uint32_t rxstat, rxctl; VR_LOCK_ASSERT(sc); ifp = sc->vr_ifp; cons = sc->vr_cdata.vr_rx_cons; rx_npkts = 0; bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag, sc->vr_cdata.vr_rx_ring_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); for (prog = 0; prog < VR_RX_RING_CNT; VR_INC(cons, VR_RX_RING_CNT)) { #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { if (sc->rxcycles <= 0) break; sc->rxcycles--; } #endif cur_rx = &sc->vr_rdata.vr_rx_ring[cons]; rxstat = le32toh(cur_rx->vr_status); rxctl = le32toh(cur_rx->vr_ctl); if ((rxstat & VR_RXSTAT_OWN) == VR_RXSTAT_OWN) break; prog++; rxd = &sc->vr_cdata.vr_rxdesc[cons]; m = rxd->rx_m; /* * If an error occurs, update stats, clear the * status word and leave the mbuf cluster in place: * it should simply get re-used next time this descriptor * comes up in the ring. * We don't support SG in Rx path yet, so discard * partial frame. */ if ((rxstat & VR_RXSTAT_RX_OK) == 0 || (rxstat & (VR_RXSTAT_FIRSTFRAG | VR_RXSTAT_LASTFRAG)) != (VR_RXSTAT_FIRSTFRAG | VR_RXSTAT_LASTFRAG)) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); sc->vr_stat.rx_errors++; if (rxstat & VR_RXSTAT_CRCERR) sc->vr_stat.rx_crc_errors++; if (rxstat & VR_RXSTAT_FRAMEALIGNERR) sc->vr_stat.rx_alignment++; if (rxstat & VR_RXSTAT_FIFOOFLOW) sc->vr_stat.rx_fifo_overflows++; if (rxstat & VR_RXSTAT_GIANT) sc->vr_stat.rx_giants++; if (rxstat & VR_RXSTAT_RUNT) sc->vr_stat.rx_runts++; if (rxstat & VR_RXSTAT_BUFFERR) sc->vr_stat.rx_no_buffers++; #ifdef VR_SHOW_ERRORS device_printf(sc->vr_dev, "%s: receive error = 0x%b\n", __func__, rxstat & 0xff, VR_RXSTAT_ERR_BITS); #endif vr_discard_rxbuf(rxd); continue; } if (vr_newbuf(sc, cons) != 0) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); sc->vr_stat.rx_errors++; sc->vr_stat.rx_no_mbufs++; vr_discard_rxbuf(rxd); continue; } /* * XXX The VIA Rhine chip includes the CRC with every * received frame, and there's no way to turn this * behavior off (at least, I can't find anything in * the manual that explains how to do it) so we have * to trim off the CRC manually. */ total_len = VR_RXBYTES(rxstat); total_len -= ETHER_CRC_LEN; m->m_pkthdr.len = m->m_len = total_len; #ifndef __NO_STRICT_ALIGNMENT /* * RX buffers must be 32-bit aligned. * Ignore the alignment problems on the non-strict alignment * platform. The performance hit incurred due to unaligned * accesses is much smaller than the hit produced by forcing * buffer copies all the time. */ vr_fixup_rx(m); #endif m->m_pkthdr.rcvif = ifp; if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); sc->vr_stat.rx_ok++; if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 && (rxstat & VR_RXSTAT_FRAG) == 0 && (rxctl & VR_RXCTL_IP) != 0) { /* Checksum is valid for non-fragmented IP packets. */ m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if ((rxctl & VR_RXCTL_IPOK) == VR_RXCTL_IPOK) { m->m_pkthdr.csum_flags |= CSUM_IP_VALID; if (rxctl & (VR_RXCTL_TCP | VR_RXCTL_UDP)) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; if ((rxctl & VR_RXCTL_TCPUDPOK) != 0) m->m_pkthdr.csum_data = 0xffff; } } } VR_UNLOCK(sc); (*ifp->if_input)(ifp, m); VR_LOCK(sc); rx_npkts++; } if (prog > 0) { /* * Let controller know how many number of RX buffers * are posted but avoid expensive register access if * TX pause capability was not negotiated with link * partner. */ if ((sc->vr_flags & VR_F_TXPAUSE) != 0) { if (prog >= VR_RX_RING_CNT) prog = VR_RX_RING_CNT - 1; CSR_WRITE_1(sc, VR_FLOWCR0, prog); } sc->vr_cdata.vr_rx_cons = cons; bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag, sc->vr_cdata.vr_rx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } return (rx_npkts); } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ static void vr_txeof(struct vr_softc *sc) { struct vr_txdesc *txd; struct vr_desc *cur_tx; struct ifnet *ifp; uint32_t txctl, txstat; int cons, prod; VR_LOCK_ASSERT(sc); cons = sc->vr_cdata.vr_tx_cons; prod = sc->vr_cdata.vr_tx_prod; if (cons == prod) return; bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, sc->vr_cdata.vr_tx_ring_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); ifp = sc->vr_ifp; /* * Go through our tx list and free mbufs for those * frames that have been transmitted. */ for (; cons != prod; VR_INC(cons, VR_TX_RING_CNT)) { cur_tx = &sc->vr_rdata.vr_tx_ring[cons]; txctl = le32toh(cur_tx->vr_ctl); txstat = le32toh(cur_tx->vr_status); if ((txstat & VR_TXSTAT_OWN) == VR_TXSTAT_OWN) break; sc->vr_cdata.vr_tx_cnt--; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; /* Only the first descriptor in the chain is valid. */ if ((txctl & VR_TXCTL_FIRSTFRAG) == 0) continue; txd = &sc->vr_cdata.vr_txdesc[cons]; KASSERT(txd->tx_m != NULL, ("%s: accessing NULL mbuf!\n", __func__)); if ((txstat & VR_TXSTAT_ERRSUM) != 0) { if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); sc->vr_stat.tx_errors++; if ((txstat & VR_TXSTAT_ABRT) != 0) { /* Give up and restart Tx. */ sc->vr_stat.tx_abort++; bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap); m_freem(txd->tx_m); txd->tx_m = NULL; VR_INC(cons, VR_TX_RING_CNT); sc->vr_cdata.vr_tx_cons = cons; if (vr_tx_stop(sc) != 0) { device_printf(sc->vr_dev, "%s: Tx shutdown error -- " "resetting\n", __func__); sc->vr_flags |= VR_F_RESTART; return; } vr_tx_start(sc); break; } if ((sc->vr_revid < REV_ID_VT3071_A && (txstat & VR_TXSTAT_UNDERRUN)) || (txstat & (VR_TXSTAT_UDF | VR_TXSTAT_TBUFF))) { sc->vr_stat.tx_underrun++; /* Retry and restart Tx. */ sc->vr_cdata.vr_tx_cnt++; sc->vr_cdata.vr_tx_cons = cons; cur_tx->vr_status = htole32(VR_TXSTAT_OWN); bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, sc->vr_cdata.vr_tx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); vr_tx_underrun(sc); return; } if ((txstat & VR_TXSTAT_DEFER) != 0) { if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); sc->vr_stat.tx_collisions++; } if ((txstat & VR_TXSTAT_LATECOLL) != 0) { if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); sc->vr_stat.tx_late_collisions++; } } else { sc->vr_stat.tx_ok++; if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); } bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap); if (sc->vr_revid < REV_ID_VT3071_A) { if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (txstat & VR_TXSTAT_COLLCNT) >> 3); sc->vr_stat.tx_collisions += (txstat & VR_TXSTAT_COLLCNT) >> 3; } else { if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (txstat & 0x0f)); sc->vr_stat.tx_collisions += (txstat & 0x0f); } m_freem(txd->tx_m); txd->tx_m = NULL; } sc->vr_cdata.vr_tx_cons = cons; if (sc->vr_cdata.vr_tx_cnt == 0) sc->vr_watchdog_timer = 0; } static void vr_tick(void *xsc) { struct vr_softc *sc; struct mii_data *mii; sc = (struct vr_softc *)xsc; VR_LOCK_ASSERT(sc); if ((sc->vr_flags & VR_F_RESTART) != 0) { device_printf(sc->vr_dev, "restarting\n"); sc->vr_stat.num_restart++; sc->vr_ifp->if_drv_flags &= ~IFF_DRV_RUNNING; vr_init_locked(sc); sc->vr_flags &= ~VR_F_RESTART; } mii = device_get_softc(sc->vr_miibus); mii_tick(mii); if ((sc->vr_flags & VR_F_LINK) == 0) vr_miibus_statchg(sc->vr_dev); vr_watchdog(sc); callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc); } #ifdef DEVICE_POLLING static poll_handler_t vr_poll; static poll_handler_t vr_poll_locked; static int vr_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct vr_softc *sc; int rx_npkts; sc = ifp->if_softc; rx_npkts = 0; VR_LOCK(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) rx_npkts = vr_poll_locked(ifp, cmd, count); VR_UNLOCK(sc); return (rx_npkts); } static int vr_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct vr_softc *sc; int rx_npkts; sc = ifp->if_softc; VR_LOCK_ASSERT(sc); sc->rxcycles = count; rx_npkts = vr_rxeof(sc); vr_txeof(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) vr_start_locked(ifp); if (cmd == POLL_AND_CHECK_STATUS) { uint16_t status; /* Also check status register. */ status = CSR_READ_2(sc, VR_ISR); if (status) CSR_WRITE_2(sc, VR_ISR, status); if ((status & VR_INTRS) == 0) return (rx_npkts); if ((status & (VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | VR_ISR_STATSOFLOW)) != 0) { if (vr_error(sc, status) != 0) return (rx_npkts); } if ((status & (VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) != 0) { #ifdef VR_SHOW_ERRORS device_printf(sc->vr_dev, "%s: receive error : 0x%b\n", __func__, status, VR_ISR_ERR_BITS); #endif vr_rx_start(sc); } } return (rx_npkts); } #endif /* DEVICE_POLLING */ /* Back off the transmit threshold. */ static void vr_tx_underrun(struct vr_softc *sc) { int thresh; device_printf(sc->vr_dev, "Tx underrun -- "); if (sc->vr_txthresh < VR_TXTHRESH_MAX) { thresh = sc->vr_txthresh; sc->vr_txthresh++; if (sc->vr_txthresh >= VR_TXTHRESH_MAX) { sc->vr_txthresh = VR_TXTHRESH_MAX; printf("using store and forward mode\n"); } else printf("increasing Tx threshold(%d -> %d)\n", vr_tx_threshold_tables[thresh].value, vr_tx_threshold_tables[thresh + 1].value); } else printf("\n"); sc->vr_stat.tx_underrun++; if (vr_tx_stop(sc) != 0) { device_printf(sc->vr_dev, "%s: Tx shutdown error -- " "resetting\n", __func__); sc->vr_flags |= VR_F_RESTART; return; } vr_tx_start(sc); } static int vr_intr(void *arg) { struct vr_softc *sc; uint16_t status; sc = (struct vr_softc *)arg; status = CSR_READ_2(sc, VR_ISR); if (status == 0 || status == 0xffff || (status & VR_INTRS) == 0) return (FILTER_STRAY); /* Disable interrupts. */ CSR_WRITE_2(sc, VR_IMR, 0x0000); taskqueue_enqueue(taskqueue_fast, &sc->vr_inttask); return (FILTER_HANDLED); } static void vr_int_task(void *arg, int npending) { struct vr_softc *sc; struct ifnet *ifp; uint16_t status; sc = (struct vr_softc *)arg; VR_LOCK(sc); if ((sc->vr_flags & VR_F_SUSPENDED) != 0) goto done_locked; status = CSR_READ_2(sc, VR_ISR); ifp = sc->vr_ifp; #ifdef DEVICE_POLLING if ((ifp->if_capenable & IFCAP_POLLING) != 0) goto done_locked; #endif /* Suppress unwanted interrupts. */ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || (sc->vr_flags & VR_F_RESTART) != 0) { CSR_WRITE_2(sc, VR_IMR, 0); CSR_WRITE_2(sc, VR_ISR, status); goto done_locked; } for (; (status & VR_INTRS) != 0;) { CSR_WRITE_2(sc, VR_ISR, status); if ((status & (VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | VR_ISR_STATSOFLOW)) != 0) { if (vr_error(sc, status) != 0) { VR_UNLOCK(sc); return; } } vr_rxeof(sc); if ((status & (VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) != 0) { #ifdef VR_SHOW_ERRORS device_printf(sc->vr_dev, "%s: receive error = 0x%b\n", __func__, status, VR_ISR_ERR_BITS); #endif /* Restart Rx if RxDMA SM was stopped. */ vr_rx_start(sc); } vr_txeof(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) vr_start_locked(ifp); status = CSR_READ_2(sc, VR_ISR); } /* Re-enable interrupts. */ CSR_WRITE_2(sc, VR_IMR, VR_INTRS); done_locked: VR_UNLOCK(sc); } static int vr_error(struct vr_softc *sc, uint16_t status) { uint16_t pcis; status &= VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | VR_ISR_STATSOFLOW; if ((status & VR_ISR_BUSERR) != 0) { status &= ~VR_ISR_BUSERR; sc->vr_stat.bus_errors++; /* Disable further interrupts. */ CSR_WRITE_2(sc, VR_IMR, 0); pcis = pci_read_config(sc->vr_dev, PCIR_STATUS, 2); device_printf(sc->vr_dev, "PCI bus error(0x%04x) -- " "resetting\n", pcis); pci_write_config(sc->vr_dev, PCIR_STATUS, pcis, 2); sc->vr_flags |= VR_F_RESTART; return (EAGAIN); } if ((status & VR_ISR_LINKSTAT2) != 0) { /* Link state change, duplex changes etc. */ status &= ~VR_ISR_LINKSTAT2; } if ((status & VR_ISR_STATSOFLOW) != 0) { status &= ~VR_ISR_STATSOFLOW; if (sc->vr_revid >= REV_ID_VT6105M_A0) { /* Update MIB counters. */ } } if (status != 0) device_printf(sc->vr_dev, "unhandled interrupt, status = 0x%04x\n", status); return (0); } /* * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data * pointers to the fragment pointers. */ static int vr_encap(struct vr_softc *sc, struct mbuf **m_head) { struct vr_txdesc *txd; struct vr_desc *desc; struct mbuf *m; bus_dma_segment_t txsegs[VR_MAXFRAGS]; uint32_t csum_flags, txctl; int error, i, nsegs, prod, si; int padlen; VR_LOCK_ASSERT(sc); M_ASSERTPKTHDR((*m_head)); /* * Some VIA Rhine wants packet buffers to be longword * aligned, but very often our mbufs aren't. Rather than * waste time trying to decide when to copy and when not * to copy, just do it all the time. */ if ((sc->vr_quirks & VR_Q_NEEDALIGN) != 0) { m = m_defrag(*m_head, M_NOWAIT); if (m == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } *m_head = m; } /* * The Rhine chip doesn't auto-pad, so we have to make * sure to pad short frames out to the minimum frame length * ourselves. */ if ((*m_head)->m_pkthdr.len < VR_MIN_FRAMELEN) { m = *m_head; padlen = VR_MIN_FRAMELEN - m->m_pkthdr.len; if (M_WRITABLE(m) == 0) { /* Get a writable copy. */ m = m_dup(*m_head, M_NOWAIT); m_freem(*m_head); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } *m_head = m; } if (m->m_next != NULL || M_TRAILINGSPACE(m) < padlen) { m = m_defrag(m, M_NOWAIT); if (m == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } } /* * Manually pad short frames, and zero the pad space * to avoid leaking data. */ bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); m->m_pkthdr.len += padlen; m->m_len = m->m_pkthdr.len; *m_head = m; } prod = sc->vr_cdata.vr_tx_prod; txd = &sc->vr_cdata.vr_txdesc[prod]; error = bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); if (error == EFBIG) { m = m_collapse(*m_head, M_NOWAIT, VR_MAXFRAGS); if (m == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } *m_head = m; error = bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { m_freem(*m_head); *m_head = NULL; return (error); } } else if (error != 0) return (error); if (nsegs == 0) { m_freem(*m_head); *m_head = NULL; return (EIO); } /* Check number of available descriptors. */ if (sc->vr_cdata.vr_tx_cnt + nsegs >= (VR_TX_RING_CNT - 1)) { bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap); return (ENOBUFS); } txd->tx_m = *m_head; bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, BUS_DMASYNC_PREWRITE); /* Set checksum offload. */ csum_flags = 0; if (((*m_head)->m_pkthdr.csum_flags & VR_CSUM_FEATURES) != 0) { if ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) csum_flags |= VR_TXCTL_IPCSUM; if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP) csum_flags |= VR_TXCTL_TCPCSUM; if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP) csum_flags |= VR_TXCTL_UDPCSUM; } /* * Quite contrary to datasheet for VIA Rhine, VR_TXCTL_TLINK bit * is required for all descriptors regardless of single or * multiple buffers. Also VR_TXSTAT_OWN bit is valid only for * the first descriptor for a multi-fragmented frames. Without * that VIA Rhine chip generates Tx underrun interrupts and can't * send any frames. */ si = prod; for (i = 0; i < nsegs; i++) { desc = &sc->vr_rdata.vr_tx_ring[prod]; desc->vr_status = 0; txctl = txsegs[i].ds_len | VR_TXCTL_TLINK | csum_flags; if (i == 0) txctl |= VR_TXCTL_FIRSTFRAG; desc->vr_ctl = htole32(txctl); desc->vr_data = htole32(VR_ADDR_LO(txsegs[i].ds_addr)); sc->vr_cdata.vr_tx_cnt++; VR_INC(prod, VR_TX_RING_CNT); } /* Update producer index. */ sc->vr_cdata.vr_tx_prod = prod; prod = (prod + VR_TX_RING_CNT - 1) % VR_TX_RING_CNT; desc = &sc->vr_rdata.vr_tx_ring[prod]; /* * Set EOP on the last desciptor and reuqest Tx completion * interrupt for every VR_TX_INTR_THRESH-th frames. */ VR_INC(sc->vr_cdata.vr_tx_pkts, VR_TX_INTR_THRESH); if (sc->vr_cdata.vr_tx_pkts == 0) desc->vr_ctl |= htole32(VR_TXCTL_LASTFRAG | VR_TXCTL_FINT); else desc->vr_ctl |= htole32(VR_TXCTL_LASTFRAG); /* Lastly turn the first descriptor ownership to hardware. */ desc = &sc->vr_rdata.vr_tx_ring[si]; desc->vr_status |= htole32(VR_TXSTAT_OWN); /* Sync descriptors. */ bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, sc->vr_cdata.vr_tx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } static void vr_start(struct ifnet *ifp) { struct vr_softc *sc; sc = ifp->if_softc; VR_LOCK(sc); vr_start_locked(ifp); VR_UNLOCK(sc); } static void vr_start_locked(struct ifnet *ifp) { struct vr_softc *sc; struct mbuf *m_head; int enq; sc = ifp->if_softc; VR_LOCK_ASSERT(sc); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING || (sc->vr_flags & VR_F_LINK) == 0) return; for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->vr_cdata.vr_tx_cnt < VR_TX_RING_CNT - 2; ) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* * Pack the data into the transmit ring. If we * don't have room, set the OACTIVE flag and wait * for the NIC to drain the ring. */ if (vr_encap(sc, &m_head)) { if (m_head == NULL) break; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } enq++; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ ETHER_BPF_MTAP(ifp, m_head); } if (enq > 0) { /* Tell the chip to start transmitting. */ VR_SETBIT(sc, VR_CR0, VR_CR0_TX_GO); /* Set a timeout in case the chip goes out to lunch. */ sc->vr_watchdog_timer = 5; } } static void vr_init(void *xsc) { struct vr_softc *sc; sc = (struct vr_softc *)xsc; VR_LOCK(sc); vr_init_locked(sc); VR_UNLOCK(sc); } static void vr_init_locked(struct vr_softc *sc) { struct ifnet *ifp; struct mii_data *mii; bus_addr_t addr; int i; VR_LOCK_ASSERT(sc); ifp = sc->vr_ifp; mii = device_get_softc(sc->vr_miibus); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) return; /* Cancel pending I/O and free all RX/TX buffers. */ vr_stop(sc); vr_reset(sc); /* Set our station address. */ for (i = 0; i < ETHER_ADDR_LEN; i++) CSR_WRITE_1(sc, VR_PAR0 + i, IF_LLADDR(sc->vr_ifp)[i]); /* Set DMA size. */ VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH); VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD); /* * BCR0 and BCR1 can override the RXCFG and TXCFG registers, * so we must set both. */ VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH); VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES); VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH); VR_SETBIT(sc, VR_BCR1, vr_tx_threshold_tables[sc->vr_txthresh].bcr_cfg); VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH); VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES); VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH); VR_SETBIT(sc, VR_TXCFG, vr_tx_threshold_tables[sc->vr_txthresh].tx_cfg); /* Init circular RX list. */ if (vr_rx_ring_init(sc) != 0) { device_printf(sc->vr_dev, "initialization failed: no memory for rx buffers\n"); vr_stop(sc); return; } /* Init tx descriptors. */ vr_tx_ring_init(sc); if ((sc->vr_quirks & VR_Q_CAM) != 0) { uint8_t vcam[2] = { 0, 0 }; /* Disable VLAN hardware tag insertion/stripping. */ VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TXTAGEN | VR_TXCFG_RXTAGCTL); /* Disable VLAN hardware filtering. */ VR_CLRBIT(sc, VR_BCR1, VR_BCR1_VLANFILT_ENB); /* Disable all CAM entries. */ vr_cam_mask(sc, VR_MCAST_CAM, 0); vr_cam_mask(sc, VR_VLAN_CAM, 0); /* Enable the first VLAN CAM. */ vr_cam_data(sc, VR_VLAN_CAM, 0, vcam); vr_cam_mask(sc, VR_VLAN_CAM, 1); } /* * Set up receive filter. */ vr_set_filter(sc); /* * Load the address of the RX ring. */ addr = VR_RX_RING_ADDR(sc, 0); CSR_WRITE_4(sc, VR_RXADDR, VR_ADDR_LO(addr)); /* * Load the address of the TX ring. */ addr = VR_TX_RING_ADDR(sc, 0); CSR_WRITE_4(sc, VR_TXADDR, VR_ADDR_LO(addr)); /* Default : full-duplex, no Tx poll. */ CSR_WRITE_1(sc, VR_CR1, VR_CR1_FULLDUPLEX | VR_CR1_TX_NOPOLL); /* Set flow-control parameters for Rhine III. */ if (sc->vr_revid >= REV_ID_VT6105_A0) { /* * Configure Rx buffer count available for incoming * packet. * Even though data sheet says almost nothing about * this register, this register should be updated * whenever driver adds new RX buffers to controller. * Otherwise, XON frame is not sent to link partner * even if controller has enough RX buffers and you * would be isolated from network. * The controller is not smart enough to know number * of available RX buffers so driver have to let * controller know how many RX buffers are posted. * In other words, this register works like a residue * counter for RX buffers and should be initialized * to the number of total RX buffers - 1 before * enabling RX MAC. Note, this register is 8bits so * it effectively limits the maximum number of RX * buffer to be configured by controller is 255. */ CSR_WRITE_1(sc, VR_FLOWCR0, VR_RX_RING_CNT - 1); /* * Tx pause low threshold : 8 free receive buffers * Tx pause XON high threshold : 24 free receive buffers */ CSR_WRITE_1(sc, VR_FLOWCR1, VR_FLOWCR1_TXLO8 | VR_FLOWCR1_TXHI24 | VR_FLOWCR1_XONXOFF); /* Set Tx pause timer. */ CSR_WRITE_2(sc, VR_PAUSETIMER, 0xffff); } /* Enable receiver and transmitter. */ CSR_WRITE_1(sc, VR_CR0, VR_CR0_START | VR_CR0_TX_ON | VR_CR0_RX_ON | VR_CR0_RX_GO); CSR_WRITE_2(sc, VR_ISR, 0xFFFF); #ifdef DEVICE_POLLING /* * Disable interrupts if we are polling. */ if (ifp->if_capenable & IFCAP_POLLING) CSR_WRITE_2(sc, VR_IMR, 0); else #endif /* * Enable interrupts and disable MII intrs. */ CSR_WRITE_2(sc, VR_IMR, VR_INTRS); if (sc->vr_revid > REV_ID_VT6102_A) CSR_WRITE_2(sc, VR_MII_IMR, 0); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; sc->vr_flags &= ~(VR_F_LINK | VR_F_TXPAUSE); mii_mediachg(mii); callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc); } /* * Set media options. */ static int vr_ifmedia_upd(struct ifnet *ifp) { struct vr_softc *sc; struct mii_data *mii; struct mii_softc *miisc; int error; sc = ifp->if_softc; VR_LOCK(sc); mii = device_get_softc(sc->vr_miibus); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); sc->vr_flags &= ~(VR_F_LINK | VR_F_TXPAUSE); error = mii_mediachg(mii); VR_UNLOCK(sc); return (error); } /* * Report current media status. */ static void vr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct vr_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->vr_miibus); VR_LOCK(sc); if ((ifp->if_flags & IFF_UP) == 0) { VR_UNLOCK(sc); return; } mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; VR_UNLOCK(sc); } static int vr_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct vr_softc *sc; struct ifreq *ifr; struct mii_data *mii; int error, mask; sc = ifp->if_softc; ifr = (struct ifreq *)data; error = 0; switch (command) { case SIOCSIFFLAGS: VR_LOCK(sc); if (ifp->if_flags & IFF_UP) { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { if ((ifp->if_flags ^ sc->vr_if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) vr_set_filter(sc); } else { if ((sc->vr_flags & VR_F_DETACHED) == 0) vr_init_locked(sc); } } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) vr_stop(sc); } sc->vr_if_flags = ifp->if_flags; VR_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: VR_LOCK(sc); vr_set_filter(sc); VR_UNLOCK(sc); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->vr_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; case SIOCSIFCAP: mask = ifr->ifr_reqcap ^ ifp->if_capenable; #ifdef DEVICE_POLLING if (mask & IFCAP_POLLING) { if (ifr->ifr_reqcap & IFCAP_POLLING) { error = ether_poll_register(vr_poll, ifp); if (error != 0) break; VR_LOCK(sc); /* Disable interrupts. */ CSR_WRITE_2(sc, VR_IMR, 0x0000); ifp->if_capenable |= IFCAP_POLLING; VR_UNLOCK(sc); } else { error = ether_poll_deregister(ifp); /* Enable interrupts. */ VR_LOCK(sc); CSR_WRITE_2(sc, VR_IMR, VR_INTRS); ifp->if_capenable &= ~IFCAP_POLLING; VR_UNLOCK(sc); } } #endif /* DEVICE_POLLING */ if ((mask & IFCAP_TXCSUM) != 0 && (IFCAP_TXCSUM & ifp->if_capabilities) != 0) { ifp->if_capenable ^= IFCAP_TXCSUM; if ((IFCAP_TXCSUM & ifp->if_capenable) != 0) ifp->if_hwassist |= VR_CSUM_FEATURES; else ifp->if_hwassist &= ~VR_CSUM_FEATURES; } if ((mask & IFCAP_RXCSUM) != 0 && (IFCAP_RXCSUM & ifp->if_capabilities) != 0) ifp->if_capenable ^= IFCAP_RXCSUM; if ((mask & IFCAP_WOL_UCAST) != 0 && (ifp->if_capabilities & IFCAP_WOL_UCAST) != 0) ifp->if_capenable ^= IFCAP_WOL_UCAST; if ((mask & IFCAP_WOL_MAGIC) != 0 && (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0) ifp->if_capenable ^= IFCAP_WOL_MAGIC; break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } static void vr_watchdog(struct vr_softc *sc) { struct ifnet *ifp; VR_LOCK_ASSERT(sc); if (sc->vr_watchdog_timer == 0 || --sc->vr_watchdog_timer) return; ifp = sc->vr_ifp; /* * Reclaim first as we don't request interrupt for every packets. */ vr_txeof(sc); if (sc->vr_cdata.vr_tx_cnt == 0) return; if ((sc->vr_flags & VR_F_LINK) == 0) { if (bootverbose) if_printf(sc->vr_ifp, "watchdog timeout " "(missed link)\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; vr_init_locked(sc); return; } if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); if_printf(ifp, "watchdog timeout\n"); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; vr_init_locked(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) vr_start_locked(ifp); } static void vr_tx_start(struct vr_softc *sc) { bus_addr_t addr; uint8_t cmd; cmd = CSR_READ_1(sc, VR_CR0); if ((cmd & VR_CR0_TX_ON) == 0) { addr = VR_TX_RING_ADDR(sc, sc->vr_cdata.vr_tx_cons); CSR_WRITE_4(sc, VR_TXADDR, VR_ADDR_LO(addr)); cmd |= VR_CR0_TX_ON; CSR_WRITE_1(sc, VR_CR0, cmd); } if (sc->vr_cdata.vr_tx_cnt != 0) { sc->vr_watchdog_timer = 5; VR_SETBIT(sc, VR_CR0, VR_CR0_TX_GO); } } static void vr_rx_start(struct vr_softc *sc) { bus_addr_t addr; uint8_t cmd; cmd = CSR_READ_1(sc, VR_CR0); if ((cmd & VR_CR0_RX_ON) == 0) { addr = VR_RX_RING_ADDR(sc, sc->vr_cdata.vr_rx_cons); CSR_WRITE_4(sc, VR_RXADDR, VR_ADDR_LO(addr)); cmd |= VR_CR0_RX_ON; CSR_WRITE_1(sc, VR_CR0, cmd); } CSR_WRITE_1(sc, VR_CR0, cmd | VR_CR0_RX_GO); } static int vr_tx_stop(struct vr_softc *sc) { int i; uint8_t cmd; cmd = CSR_READ_1(sc, VR_CR0); if ((cmd & VR_CR0_TX_ON) != 0) { cmd &= ~VR_CR0_TX_ON; CSR_WRITE_1(sc, VR_CR0, cmd); for (i = VR_TIMEOUT; i > 0; i--) { DELAY(5); cmd = CSR_READ_1(sc, VR_CR0); if ((cmd & VR_CR0_TX_ON) == 0) break; } if (i == 0) return (ETIMEDOUT); } return (0); } static int vr_rx_stop(struct vr_softc *sc) { int i; uint8_t cmd; cmd = CSR_READ_1(sc, VR_CR0); if ((cmd & VR_CR0_RX_ON) != 0) { cmd &= ~VR_CR0_RX_ON; CSR_WRITE_1(sc, VR_CR0, cmd); for (i = VR_TIMEOUT; i > 0; i--) { DELAY(5); cmd = CSR_READ_1(sc, VR_CR0); if ((cmd & VR_CR0_RX_ON) == 0) break; } if (i == 0) return (ETIMEDOUT); } return (0); } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void vr_stop(struct vr_softc *sc) { struct vr_txdesc *txd; struct vr_rxdesc *rxd; struct ifnet *ifp; int i; VR_LOCK_ASSERT(sc); ifp = sc->vr_ifp; sc->vr_watchdog_timer = 0; callout_stop(&sc->vr_stat_callout); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); CSR_WRITE_1(sc, VR_CR0, VR_CR0_STOP); if (vr_rx_stop(sc) != 0) device_printf(sc->vr_dev, "%s: Rx shutdown error\n", __func__); if (vr_tx_stop(sc) != 0) device_printf(sc->vr_dev, "%s: Tx shutdown error\n", __func__); /* Clear pending interrupts. */ CSR_WRITE_2(sc, VR_ISR, 0xFFFF); CSR_WRITE_2(sc, VR_IMR, 0x0000); CSR_WRITE_4(sc, VR_TXADDR, 0x00000000); CSR_WRITE_4(sc, VR_RXADDR, 0x00000000); /* * Free RX and TX mbufs still in the queues. */ for (i = 0; i < VR_RX_RING_CNT; i++) { rxd = &sc->vr_cdata.vr_rxdesc[i]; if (rxd->rx_m != NULL) { bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap); m_freem(rxd->rx_m); rxd->rx_m = NULL; } } for (i = 0; i < VR_TX_RING_CNT; i++) { txd = &sc->vr_cdata.vr_txdesc[i]; if (txd->tx_m != NULL) { bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap); m_freem(txd->tx_m); txd->tx_m = NULL; } } } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static int vr_shutdown(device_t dev) { return (vr_suspend(dev)); } static int vr_suspend(device_t dev) { struct vr_softc *sc; sc = device_get_softc(dev); VR_LOCK(sc); vr_stop(sc); vr_setwol(sc); sc->vr_flags |= VR_F_SUSPENDED; VR_UNLOCK(sc); return (0); } static int vr_resume(device_t dev) { struct vr_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); VR_LOCK(sc); ifp = sc->vr_ifp; vr_clrwol(sc); vr_reset(sc); if (ifp->if_flags & IFF_UP) vr_init_locked(sc); sc->vr_flags &= ~VR_F_SUSPENDED; VR_UNLOCK(sc); return (0); } static void vr_setwol(struct vr_softc *sc) { struct ifnet *ifp; int pmc; uint16_t pmstat; uint8_t v; VR_LOCK_ASSERT(sc); if (sc->vr_revid < REV_ID_VT6102_A || pci_find_cap(sc->vr_dev, PCIY_PMG, &pmc) != 0) return; ifp = sc->vr_ifp; /* Clear WOL configuration. */ CSR_WRITE_1(sc, VR_WOLCR_CLR, 0xFF); CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_SAB | VR_WOLCFG_SAM); CSR_WRITE_1(sc, VR_PWRCSR_CLR, 0xFF); CSR_WRITE_1(sc, VR_PWRCFG_CLR, VR_PWRCFG_WOLEN); if (sc->vr_revid > REV_ID_VT6105_B0) { /* Newer Rhine III supports two additional patterns. */ CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_PATTERN_PAGE); CSR_WRITE_1(sc, VR_TESTREG_CLR, 3); CSR_WRITE_1(sc, VR_PWRCSR1_CLR, 3); } if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0) CSR_WRITE_1(sc, VR_WOLCR_SET, VR_WOLCR_UCAST); if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) CSR_WRITE_1(sc, VR_WOLCR_SET, VR_WOLCR_MAGIC); /* * It seems that multicast wakeup frames require programming pattern * registers and valid CRC as well as pattern mask for each pattern. * While it's possible to setup such a pattern it would complicate * WOL configuration so ignore multicast wakeup frames. */ if ((ifp->if_capenable & IFCAP_WOL) != 0) { CSR_WRITE_1(sc, VR_WOLCFG_SET, VR_WOLCFG_SAB | VR_WOLCFG_SAM); v = CSR_READ_1(sc, VR_STICKHW); CSR_WRITE_1(sc, VR_STICKHW, v | VR_STICKHW_WOL_ENB); CSR_WRITE_1(sc, VR_PWRCFG_SET, VR_PWRCFG_WOLEN); } /* Put hardware into sleep. */ v = CSR_READ_1(sc, VR_STICKHW); v |= VR_STICKHW_DS0 | VR_STICKHW_DS1; CSR_WRITE_1(sc, VR_STICKHW, v); /* Request PME if WOL is requested. */ pmstat = pci_read_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, 2); pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); if ((ifp->if_capenable & IFCAP_WOL) != 0) pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; pci_write_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); } static void vr_clrwol(struct vr_softc *sc) { uint8_t v; VR_LOCK_ASSERT(sc); if (sc->vr_revid < REV_ID_VT6102_A) return; /* Take hardware out of sleep. */ v = CSR_READ_1(sc, VR_STICKHW); v &= ~(VR_STICKHW_DS0 | VR_STICKHW_DS1 | VR_STICKHW_WOL_ENB); CSR_WRITE_1(sc, VR_STICKHW, v); /* Clear WOL configuration as WOL may interfere normal operation. */ CSR_WRITE_1(sc, VR_WOLCR_CLR, 0xFF); CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_SAB | VR_WOLCFG_SAM | VR_WOLCFG_PMEOVR); CSR_WRITE_1(sc, VR_PWRCSR_CLR, 0xFF); CSR_WRITE_1(sc, VR_PWRCFG_CLR, VR_PWRCFG_WOLEN); if (sc->vr_revid > REV_ID_VT6105_B0) { /* Newer Rhine III supports two additional patterns. */ CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_PATTERN_PAGE); CSR_WRITE_1(sc, VR_TESTREG_CLR, 3); CSR_WRITE_1(sc, VR_PWRCSR1_CLR, 3); } } static int vr_sysctl_stats(SYSCTL_HANDLER_ARGS) { struct vr_softc *sc; struct vr_statistics *stat; int error; int result; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (result == 1) { sc = (struct vr_softc *)arg1; stat = &sc->vr_stat; printf("%s statistics:\n", device_get_nameunit(sc->vr_dev)); printf("Outbound good frames : %ju\n", (uintmax_t)stat->tx_ok); printf("Inbound good frames : %ju\n", (uintmax_t)stat->rx_ok); printf("Outbound errors : %u\n", stat->tx_errors); printf("Inbound errors : %u\n", stat->rx_errors); printf("Inbound no buffers : %u\n", stat->rx_no_buffers); printf("Inbound no mbuf clusters: %d\n", stat->rx_no_mbufs); printf("Inbound FIFO overflows : %d\n", stat->rx_fifo_overflows); printf("Inbound CRC errors : %u\n", stat->rx_crc_errors); printf("Inbound frame alignment errors : %u\n", stat->rx_alignment); printf("Inbound giant frames : %u\n", stat->rx_giants); printf("Inbound runt frames : %u\n", stat->rx_runts); printf("Outbound aborted with excessive collisions : %u\n", stat->tx_abort); printf("Outbound collisions : %u\n", stat->tx_collisions); printf("Outbound late collisions : %u\n", stat->tx_late_collisions); printf("Outbound underrun : %u\n", stat->tx_underrun); printf("PCI bus errors : %u\n", stat->bus_errors); printf("driver restarted due to Rx/Tx shutdown failure : %u\n", stat->num_restart); } return (error); } diff --git a/sys/dev/wtap/if_wtap.c b/sys/dev/wtap/if_wtap.c index 3244c67213d8..e7a2986d0543 100644 --- a/sys/dev/wtap/if_wtap.c +++ b/sys/dev/wtap/if_wtap.c @@ -1,751 +1,751 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2010-2011 Monthadar Al Jaberi, TerraNet AB * All rights reserved. * * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. * * $FreeBSD$ */ #include "if_wtapvar.h" #include /* uio struct */ #include #include #include #include #include "if_medium.h" /* * This _requires_ vimage to be useful. */ #ifndef VIMAGE #error if_wtap requires VIMAGE. #endif /* VIMAGE */ /* device for IOCTL and read/write for debuggin purposes */ /* Function prototypes */ static d_open_t wtap_node_open; static d_close_t wtap_node_close; static d_write_t wtap_node_write; static d_ioctl_t wtap_node_ioctl; static struct cdevsw wtap_cdevsw = { .d_version = D_VERSION, .d_flags = 0, .d_open = wtap_node_open, .d_close = wtap_node_close, .d_write = wtap_node_write, .d_ioctl = wtap_node_ioctl, .d_name = "wtapnode", }; static int wtap_node_open(struct cdev *dev, int oflags, int devtype, struct thread *p) { int err = 0; uprintf("Opened device \"echo\" successfully.\n"); return(err); } static int wtap_node_close(struct cdev *dev, int fflag, int devtype, struct thread *p) { uprintf("Closing device \"echo.\"\n"); return(0); } static int wtap_node_write(struct cdev *dev, struct uio *uio, int ioflag) { int err = 0; struct mbuf *m; struct ifnet *ifp; struct wtap_softc *sc; uint8_t buf[1024]; struct epoch_tracker et; int buf_len; uprintf("write device %s \"echo.\"\n", devtoname(dev)); buf_len = MIN(uio->uio_iov->iov_len, 1024); err = copyin(uio->uio_iov->iov_base, buf, buf_len); if (err != 0) { uprintf("Write failed: bad address!\n"); return (err); } MGETHDR(m, M_NOWAIT, MT_DATA); m_copyback(m, 0, buf_len, buf); CURVNET_SET(TD_TO_VNET(curthread)); NET_EPOCH_ENTER(et); CK_STAILQ_FOREACH(ifp, &V_ifnet, if_link) { printf("ifp->if_xname = %s\n", ifp->if_xname); if(strcmp(devtoname(dev), ifp->if_xname) == 0){ printf("found match, correspoding wtap = %s\n", ifp->if_xname); sc = (struct wtap_softc *)ifp->if_softc; printf("wtap id = %d\n", sc->id); wtap_inject(sc, m); } } NET_EPOCH_EXIT(et); CURVNET_RESTORE(); return(err); } int wtap_node_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { int error = 0; switch(cmd) { default: DWTAP_PRINTF("Unknown WTAP IOCTL\n"); error = EINVAL; } return error; } static int wtap_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params); static int wtap_medium_enqueue(struct wtap_vap *avp, struct mbuf *m) { return medium_transmit(avp->av_md, avp->id, m); } static int wtap_media_change(struct ifnet *ifp) { DWTAP_PRINTF("%s\n", __func__); int error = ieee80211_media_change(ifp); /* NB: only the fixed rate can change and that doesn't need a reset */ return (error == ENETRESET ? 0 : error); } /* * Intercept management frames to collect beacon rssi data * and to do ibss merges. */ static void wtap_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, int subtype, const struct ieee80211_rx_stats *stats, int rssi, int nf) { struct ieee80211vap *vap = ni->ni_vap; #if 0 DWTAP_PRINTF("[%d] %s\n", myath_id(ni), __func__); #endif WTAP_VAP(vap)->av_recv_mgmt(ni, m, subtype, stats, rssi, nf); } static int wtap_reset_vap(struct ieee80211vap *vap, u_long cmd) { DWTAP_PRINTF("%s\n", __func__); return 0; } static void wtap_beacon_update(struct ieee80211vap *vap, int item) { struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off; DWTAP_PRINTF("%s\n", __func__); setbit(bo->bo_flags, item); } /* * Allocate and setup an initial beacon frame. */ static int wtap_beacon_alloc(struct wtap_softc *sc, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct wtap_vap *avp = WTAP_VAP(vap); DWTAP_PRINTF("[%s] %s\n", ether_sprintf(ni->ni_macaddr), __func__); /* * NB: the beacon data buffer must be 32-bit aligned; * we assume the mbuf routines will return us something * with this alignment (perhaps should assert). */ avp->beacon = ieee80211_beacon_alloc(ni); if (avp->beacon == NULL) { printf("%s: cannot get mbuf\n", __func__); return ENOMEM; } callout_init(&avp->av_swba, 0); avp->bf_node = ieee80211_ref_node(ni); return 0; } static void wtap_beacon_config(struct wtap_softc *sc, struct ieee80211vap *vap) { DWTAP_PRINTF("%s\n", __func__); } static void wtap_beacon_intrp(void *arg) { struct wtap_vap *avp = arg; struct ieee80211vap *vap = arg; struct mbuf *m; if (vap->iv_state < IEEE80211_S_RUN) { DWTAP_PRINTF("Skip beacon, not running, state %d", vap->iv_state); return ; } DWTAP_PRINTF("[%d] beacon intrp\n", avp->id); //burst mode /* * Update dynamic beacon contents. If this returns * non-zero then we need to remap the memory because * the beacon frame changed size (probably because * of the TIM bitmap). */ m = m_dup(avp->beacon, M_NOWAIT); if (ieee80211_beacon_update(avp->bf_node, m, 0)) { printf("%s, need to remap the memory because the beacon frame" " changed size.\n",__func__); } if (ieee80211_radiotap_active_vap(vap)) ieee80211_radiotap_tx(vap, m); #if 0 medium_transmit(avp->av_md, avp->id, m); #endif wtap_medium_enqueue(avp, m); callout_schedule(&avp->av_swba, avp->av_bcinterval); } static int wtap_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct ieee80211com *ic = vap->iv_ic; struct wtap_softc *sc = ic->ic_softc; struct wtap_vap *avp = WTAP_VAP(vap); struct ieee80211_node *ni = NULL; int error; DWTAP_PRINTF("%s\n", __func__); ni = ieee80211_ref_node(vap->iv_bss); /* * Invoke the parent method to do net80211 work. */ error = avp->av_newstate(vap, nstate, arg); if (error != 0) goto bad; if (nstate == IEEE80211_S_RUN) { /* NB: collect bss node again, it may have changed */ ieee80211_free_node(ni); ni = ieee80211_ref_node(vap->iv_bss); switch (vap->iv_opmode) { case IEEE80211_M_MBSS: error = wtap_beacon_alloc(sc, ni); if (error != 0) goto bad; wtap_beacon_config(sc, vap); callout_reset(&avp->av_swba, avp->av_bcinterval, wtap_beacon_intrp, vap); break; default: goto bad; } } else if (nstate == IEEE80211_S_INIT) { callout_stop(&avp->av_swba); } ieee80211_free_node(ni); return 0; bad: printf("%s: bad\n", __func__); ieee80211_free_node(ni); return error; } static void wtap_bmiss(struct ieee80211vap *vap) { struct wtap_vap *avp = (struct wtap_vap *)vap; DWTAP_PRINTF("%s\n", __func__); avp->av_bmiss(vap); } static struct ieee80211vap * wtap_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct wtap_softc *sc = ic->ic_softc; struct ieee80211vap *vap; struct wtap_vap *avp; int error; struct ieee80211_node *ni; DWTAP_PRINTF("%s\n", __func__); avp = malloc(sizeof(struct wtap_vap), M_80211_VAP, M_WAITOK | M_ZERO); avp->id = sc->id; avp->av_md = sc->sc_md; avp->av_bcinterval = msecs_to_ticks(BEACON_INTRERVAL + 100*sc->id); vap = (struct ieee80211vap *) avp; error = ieee80211_vap_setup(ic, vap, name, unit, IEEE80211_M_MBSS, flags | IEEE80211_CLONE_NOBEACONS, bssid); if (error) { free(avp, M_80211_VAP); return (NULL); } /* override various methods */ avp->av_recv_mgmt = vap->iv_recv_mgmt; vap->iv_recv_mgmt = wtap_recv_mgmt; vap->iv_reset = wtap_reset_vap; vap->iv_update_beacon = wtap_beacon_update; avp->av_newstate = vap->iv_newstate; vap->iv_newstate = wtap_newstate; avp->av_bmiss = vap->iv_bmiss; vap->iv_bmiss = wtap_bmiss; /* complete setup */ ieee80211_vap_attach(vap, wtap_media_change, ieee80211_media_status, mac); avp->av_dev = make_dev(&wtap_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "%s", (const char *)sc->name); /* TODO this is a hack to force it to choose the rate we want */ ni = ieee80211_ref_node(vap->iv_bss); ni->ni_txrate = 130; ieee80211_free_node(ni); return vap; } static void wtap_vap_delete(struct ieee80211vap *vap) { struct wtap_vap *avp = WTAP_VAP(vap); DWTAP_PRINTF("%s\n", __func__); destroy_dev(avp->av_dev); callout_stop(&avp->av_swba); ieee80211_vap_detach(vap); free(avp, M_80211_VAP); } static void wtap_parent(struct ieee80211com *ic) { struct wtap_softc *sc = ic->ic_softc; if (ic->ic_nrunning > 0) { sc->up = 1; ieee80211_start_all(ic); } else sc->up = 0; } static void wtap_scan_start(struct ieee80211com *ic) { #if 0 DWTAP_PRINTF("%s\n", __func__); #endif } static void wtap_scan_end(struct ieee80211com *ic) { #if 0 DWTAP_PRINTF("%s\n", __func__); #endif } static void wtap_set_channel(struct ieee80211com *ic) { #if 0 DWTAP_PRINTF("%s\n", __func__); #endif } static int wtap_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { #if 0 DWTAP_PRINTF("%s, %p\n", __func__, m); #endif struct ieee80211vap *vap = ni->ni_vap; struct wtap_vap *avp = WTAP_VAP(vap); if (ieee80211_radiotap_active_vap(vap)) { ieee80211_radiotap_tx(vap, m); } if (m->m_flags & M_TXCB) ieee80211_process_callback(ni, m, 0); ieee80211_free_node(ni); return wtap_medium_enqueue(avp, m); } void wtap_inject(struct wtap_softc *sc, struct mbuf *m) { struct wtap_buf *bf = (struct wtap_buf *)malloc(sizeof(struct wtap_buf), M_WTAP_RXBUF, M_NOWAIT | M_ZERO); KASSERT(bf != NULL, ("could not allocated a new wtap_buf\n")); bf->m = m; mtx_lock(&sc->sc_mtx); STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); mtx_unlock(&sc->sc_mtx); } void wtap_rx_deliver(struct wtap_softc *sc, struct mbuf *m) { struct epoch_tracker et; struct ieee80211com *ic = &sc->sc_ic; struct ieee80211_node *ni; int type; #if 0 DWTAP_PRINTF("%s\n", __func__); #endif DWTAP_PRINTF("[%d] receiving m=%p\n", sc->id, m); if (m == NULL) { /* NB: shouldn't happen */ ic_printf(ic, "%s: no mbuf!\n", __func__); } ieee80211_dump_pkt(ic, mtod(m, caddr_t), 0,0,0); /* * Locate the node for sender, track state, and then * pass the (referenced) node up to the 802.11 layer * for its use. */ ni = ieee80211_find_rxnode_withkey(ic, mtod(m, const struct ieee80211_frame_min *),IEEE80211_KEYIX_NONE); NET_EPOCH_ENTER(et); if (ni != NULL) { /* * Sending station is known, dispatch directly. */ type = ieee80211_input(ni, m, 1<<7, 10); ieee80211_free_node(ni); } else { type = ieee80211_input_all(ic, m, 1<<7, 10); } NET_EPOCH_EXIT(et); } static void wtap_rx_proc(void *arg, int npending) { struct epoch_tracker et; struct wtap_softc *sc = (struct wtap_softc *)arg; struct ieee80211com *ic = &sc->sc_ic; struct mbuf *m; struct ieee80211_node *ni; int type; struct wtap_buf *bf; #if 0 DWTAP_PRINTF("%s\n", __func__); #endif for(;;) { mtx_lock(&sc->sc_mtx); bf = STAILQ_FIRST(&sc->sc_rxbuf); if (bf == NULL) { mtx_unlock(&sc->sc_mtx); return; } STAILQ_REMOVE_HEAD(&sc->sc_rxbuf, bf_list); mtx_unlock(&sc->sc_mtx); KASSERT(bf != NULL, ("wtap_buf is NULL\n")); m = bf->m; DWTAP_PRINTF("[%d] receiving m=%p\n", sc->id, bf->m); if (m == NULL) { /* NB: shouldn't happen */ ic_printf(ic, "%s: no mbuf!\n", __func__); free(bf, M_WTAP_RXBUF); return; } #if 0 ieee80211_dump_pkt(ic, mtod(m, caddr_t), 0,0,0); #endif /* * Locate the node for sender, track state, and then * pass the (referenced) node up to the 802.11 layer * for its use. */ ni = ieee80211_find_rxnode_withkey(ic, mtod(m, const struct ieee80211_frame_min *), IEEE80211_KEYIX_NONE); NET_EPOCH_ENTER(et); if (ni != NULL) { /* * Sending station is known, dispatch directly. */ type = ieee80211_input(ni, m, 1<<7, 10); ieee80211_free_node(ni); } else { type = ieee80211_input_all(ic, m, 1<<7, 10); } NET_EPOCH_EXIT(et); /* The mbufs are freed by the Net80211 stack */ free(bf, M_WTAP_RXBUF); } } static void wtap_newassoc(struct ieee80211_node *ni, int isnew) { DWTAP_PRINTF("%s\n", __func__); } /* * Callback from the 802.11 layer to update WME parameters. */ static int wtap_wme_update(struct ieee80211com *ic) { DWTAP_PRINTF("%s\n", __func__); return 0; } static void wtap_update_mcast(struct ieee80211com *ic) { DWTAP_PRINTF("%s\n", __func__); } static void wtap_update_promisc(struct ieee80211com *ic) { DWTAP_PRINTF("%s\n", __func__); } static int wtap_transmit(struct ieee80211com *ic, struct mbuf *m) { struct ieee80211_node *ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; struct ieee80211vap *vap = ni->ni_vap; struct wtap_vap *avp = WTAP_VAP(vap); if(ni == NULL){ printf("m->m_pkthdr.rcvif is NULL we cant radiotap_tx\n"); }else{ if (ieee80211_radiotap_active_vap(vap)) ieee80211_radiotap_tx(vap, m); } if (m->m_flags & M_TXCB) ieee80211_process_callback(ni, m, 0); ieee80211_free_node(ni); return wtap_medium_enqueue(avp, m); } static struct ieee80211_node * wtap_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) { struct ieee80211_node *ni; DWTAP_PRINTF("%s\n", __func__); ni = malloc(sizeof(struct ieee80211_node), M_80211_NODE, M_NOWAIT|M_ZERO); if (ni == NULL) return (NULL); ni->ni_txrate = 130; return ni; } static void wtap_node_free(struct ieee80211_node *ni) { struct ieee80211com *ic = ni->ni_ic; struct wtap_softc *sc = ic->ic_softc; DWTAP_PRINTF("%s\n", __func__); sc->sc_node_free(ni); } int32_t wtap_attach(struct wtap_softc *sc, const uint8_t *macaddr) { struct ieee80211com *ic = &sc->sc_ic; DWTAP_PRINTF("%s\n", __func__); sc->up = 0; STAILQ_INIT(&sc->sc_rxbuf); sc->sc_tq = taskqueue_create("wtap_taskq", M_NOWAIT | M_ZERO, taskqueue_thread_enqueue, &sc->sc_tq); taskqueue_start_threads(&sc->sc_tq, 1, PI_SOFT, "%s taskQ", sc->name); - TASK_INIT(&sc->sc_rxtask, 0, wtap_rx_proc, sc); + NET_TASK_INIT(&sc->sc_rxtask, 0, wtap_rx_proc, sc); ic->ic_softc = sc; ic->ic_name = sc->name; ic->ic_phytype = IEEE80211_T_DS; ic->ic_opmode = IEEE80211_M_MBSS; ic->ic_caps = IEEE80211_C_MBSS; ic->ic_max_keyix = 128; /* A value read from Atheros ATH_KEYMAX */ ic->ic_regdomain.regdomain = SKU_ETSI; ic->ic_regdomain.country = CTRY_SWEDEN; ic->ic_regdomain.location = 1; /* Indoors */ ic->ic_regdomain.isocc[0] = 'S'; ic->ic_regdomain.isocc[1] = 'E'; ic->ic_nchans = 1; ic->ic_channels[0].ic_flags = IEEE80211_CHAN_B; ic->ic_channels[0].ic_freq = 2412; IEEE80211_ADDR_COPY(ic->ic_macaddr, macaddr); ieee80211_ifattach(ic); /* override default methods */ ic->ic_newassoc = wtap_newassoc; ic->ic_wme.wme_update = wtap_wme_update; ic->ic_vap_create = wtap_vap_create; ic->ic_vap_delete = wtap_vap_delete; ic->ic_raw_xmit = wtap_raw_xmit; ic->ic_update_mcast = wtap_update_mcast; ic->ic_update_promisc = wtap_update_promisc; ic->ic_transmit = wtap_transmit; ic->ic_parent = wtap_parent; sc->sc_node_alloc = ic->ic_node_alloc; ic->ic_node_alloc = wtap_node_alloc; sc->sc_node_free = ic->ic_node_free; ic->ic_node_free = wtap_node_free; ic->ic_scan_start = wtap_scan_start; ic->ic_scan_end = wtap_scan_end; ic->ic_set_channel = wtap_set_channel; ieee80211_radiotap_attach(ic, &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), WTAP_TX_RADIOTAP_PRESENT, &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), WTAP_RX_RADIOTAP_PRESENT); /* Work here, we must find a way to populate the rate table */ #if 0 if(ic->ic_rt == NULL){ printf("no table for ic_curchan\n"); ic->ic_rt = ieee80211_get_ratetable(&ic->ic_channels[0]); } printf("ic->ic_rt =%p\n", ic->ic_rt); printf("rate count %d\n", ic->ic_rt->rateCount); uint8_t code = ic->ic_rt->info[0].dot11Rate; uint8_t cix = ic->ic_rt->info[0].ctlRateIndex; uint8_t ctl_rate = ic->ic_rt->info[cix].dot11Rate; printf("code=%d, cix=%d, ctl_rate=%d\n", code, cix, ctl_rate); uint8_t rix0 = ic->ic_rt->rateCodeToIndex[130]; uint8_t rix1 = ic->ic_rt->rateCodeToIndex[132]; uint8_t rix2 = ic->ic_rt->rateCodeToIndex[139]; uint8_t rix3 = ic->ic_rt->rateCodeToIndex[150]; printf("rix0 %u,rix1 %u,rix2 %u,rix3 %u\n", rix0,rix1,rix2,rix3); printf("lpAckDuration=%u\n", ic->ic_rt->info[0].lpAckDuration); printf("rate=%d\n", ic->ic_rt->info[0].rateKbps); #endif return 0; } int32_t wtap_detach(struct wtap_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; DWTAP_PRINTF("%s\n", __func__); ieee80211_ageq_drain(&ic->ic_stageq); ieee80211_ifdetach(ic); return 0; } void wtap_resume(struct wtap_softc *sc) { DWTAP_PRINTF("%s\n", __func__); } void wtap_suspend(struct wtap_softc *sc) { DWTAP_PRINTF("%s\n", __func__); } void wtap_shutdown(struct wtap_softc *sc) { DWTAP_PRINTF("%s\n", __func__); } void wtap_intr(struct wtap_softc *sc) { DWTAP_PRINTF("%s\n", __func__); } diff --git a/sys/dev/xl/if_xl.c b/sys/dev/xl/if_xl.c index 9daea3dc4fa6..ca6184758815 100644 --- a/sys/dev/xl/if_xl.c +++ b/sys/dev/xl/if_xl.c @@ -1,3299 +1,3299 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 1997, 1998, 1999 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * 3Com 3c90x Etherlink XL PCI NIC driver * * Supports the 3Com "boomerang", "cyclone" and "hurricane" PCI * bus-master chips (3c90x cards and embedded controllers) including * the following: * * 3Com 3c900-TPO 10Mbps/RJ-45 * 3Com 3c900-COMBO 10Mbps/RJ-45,AUI,BNC * 3Com 3c905-TX 10/100Mbps/RJ-45 * 3Com 3c905-T4 10/100Mbps/RJ-45 * 3Com 3c900B-TPO 10Mbps/RJ-45 * 3Com 3c900B-COMBO 10Mbps/RJ-45,AUI,BNC * 3Com 3c900B-TPC 10Mbps/RJ-45,BNC * 3Com 3c900B-FL 10Mbps/Fiber-optic * 3Com 3c905B-COMBO 10/100Mbps/RJ-45,AUI,BNC * 3Com 3c905B-TX 10/100Mbps/RJ-45 * 3Com 3c905B-FL/FX 10/100Mbps/Fiber-optic * 3Com 3c905C-TX 10/100Mbps/RJ-45 (Tornado ASIC) * 3Com 3c980-TX 10/100Mbps server adapter (Hurricane ASIC) * 3Com 3c980C-TX 10/100Mbps server adapter (Tornado ASIC) * 3Com 3cSOHO100-TX 10/100Mbps/RJ-45 (Hurricane ASIC) * 3Com 3c450-TX 10/100Mbps/RJ-45 (Tornado ASIC) * 3Com 3c555 10/100Mbps/RJ-45 (MiniPCI, Laptop Hurricane) * 3Com 3c556 10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC) * 3Com 3c556B 10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC) * 3Com 3c575TX 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) * 3Com 3c575B 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) * 3Com 3c575C 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) * 3Com 3cxfem656 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) * 3Com 3cxfem656b 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) * 3Com 3cxfem656c 10/100Mbps/RJ-45 (Cardbus, Tornado ASIC) * Dell Optiplex GX1 on-board 3c918 10/100Mbps/RJ-45 * Dell on-board 3c920 10/100Mbps/RJ-45 * Dell Precision on-board 3c905B 10/100Mbps/RJ-45 * Dell Latitude laptop docking station embedded 3c905-TX * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The 3c90x series chips use a bus-master DMA interface for transferring * packets to and from the controller chip. Some of the "vortex" cards * (3c59x) also supported a bus master mode, however for those chips * you could only DMA packets to/from a contiguous memory buffer. For * transmission this would mean copying the contents of the queued mbuf * chain into an mbuf cluster and then DMAing the cluster. This extra * copy would sort of defeat the purpose of the bus master support for * any packet that doesn't fit into a single mbuf. * * By contrast, the 3c90x cards support a fragment-based bus master * mode where mbuf chains can be encapsulated using TX descriptors. * This is similar to other PCI chips such as the Texas Instruments * ThunderLAN and the Intel 82557/82558. * * The "vortex" driver (if_vx.c) happens to work for the "boomerang" * bus master chips because they maintain the old PIO interface for * backwards compatibility, but starting with the 3c905B and the * "cyclone" chips, the compatibility interface has been dropped. * Since using bus master DMA is a big win, we use this driver to * support the PCI "boomerang" chips even though they work with the * "vortex" driver in order to obtain better performance. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MODULE_DEPEND(xl, pci, 1, 1, 1); MODULE_DEPEND(xl, ether, 1, 1, 1); MODULE_DEPEND(xl, miibus, 1, 1, 1); /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #include /* * TX Checksumming is disabled by default for two reasons: * - TX Checksumming will occasionally produce corrupt packets * - TX Checksumming seems to reduce performance * * Only 905B/C cards were reported to have this problem, it is possible * that later chips _may_ be immune. */ #define XL905B_TXCSUM_BROKEN 1 #ifdef XL905B_TXCSUM_BROKEN #define XL905B_CSUM_FEATURES 0 #else #define XL905B_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) #endif /* * Various supported device vendors/types and their names. */ static const struct xl_type xl_devs[] = { { TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT, "3Com 3c900-TPO Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT_COMBO, "3Com 3c900-COMBO Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_BOOMERANG_10_100BT, "3Com 3c905-TX Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_BOOMERANG_100BT4, "3Com 3c905-T4 Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT, "3Com 3c900B-TPO Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT_COMBO, "3Com 3c900B-COMBO Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT_TPC, "3Com 3c900B-TPC Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_CYCLONE_10FL, "3Com 3c900B-FL Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_10_100BT, "3Com 3c905B-TX Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100BT4, "3Com 3c905B-T4 Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100FX, "3Com 3c905B-FX/SC Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100_COMBO, "3Com 3c905B-COMBO Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT, "3Com 3c905C-TX Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_920B, "3Com 3c920B-EMB Integrated Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_920B_WNM, "3Com 3c920B-EMB-WNM Integrated Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_10_100BT_SERV, "3Com 3c980 Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_SERV, "3Com 3c980C Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_SOHO100TX, "3Com 3cSOHO100-TX OfficeConnect" }, { TC_VENDORID, TC_DEVICEID_TORNADO_HOMECONNECT, "3Com 3c450-TX HomeConnect" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_555, "3Com 3c555 Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_556, "3Com 3c556 Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_556B, "3Com 3c556B Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_575A, "3Com 3c575TX Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_575B, "3Com 3c575B Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_575C, "3Com 3c575C Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_656, "3Com 3c656 Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_656B, "3Com 3c656B Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_TORNADO_656C, "3Com 3c656C Fast Etherlink XL" }, { 0, 0, NULL } }; static int xl_probe(device_t); static int xl_attach(device_t); static int xl_detach(device_t); static int xl_newbuf(struct xl_softc *, struct xl_chain_onefrag *); static void xl_tick(void *); static void xl_stats_update(struct xl_softc *); static int xl_encap(struct xl_softc *, struct xl_chain *, struct mbuf **); static int xl_rxeof(struct xl_softc *); static void xl_rxeof_task(void *, int); static int xl_rx_resync(struct xl_softc *); static void xl_txeof(struct xl_softc *); static void xl_txeof_90xB(struct xl_softc *); static void xl_txeoc(struct xl_softc *); static void xl_intr(void *); static void xl_start(struct ifnet *); static void xl_start_locked(struct ifnet *); static void xl_start_90xB_locked(struct ifnet *); static int xl_ioctl(struct ifnet *, u_long, caddr_t); static void xl_init(void *); static void xl_init_locked(struct xl_softc *); static void xl_stop(struct xl_softc *); static int xl_watchdog(struct xl_softc *); static int xl_shutdown(device_t); static int xl_suspend(device_t); static int xl_resume(device_t); static void xl_setwol(struct xl_softc *); #ifdef DEVICE_POLLING static int xl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count); static int xl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count); #endif static int xl_ifmedia_upd(struct ifnet *); static void xl_ifmedia_sts(struct ifnet *, struct ifmediareq *); static int xl_eeprom_wait(struct xl_softc *); static int xl_read_eeprom(struct xl_softc *, caddr_t, int, int, int); static void xl_rxfilter(struct xl_softc *); static void xl_rxfilter_90x(struct xl_softc *); static void xl_rxfilter_90xB(struct xl_softc *); static void xl_setcfg(struct xl_softc *); static void xl_setmode(struct xl_softc *, int); static void xl_reset(struct xl_softc *); static int xl_list_rx_init(struct xl_softc *); static int xl_list_tx_init(struct xl_softc *); static int xl_list_tx_init_90xB(struct xl_softc *); static void xl_wait(struct xl_softc *); static void xl_mediacheck(struct xl_softc *); static void xl_choose_media(struct xl_softc *sc, int *media); static void xl_choose_xcvr(struct xl_softc *, int); static void xl_dma_map_addr(void *, bus_dma_segment_t *, int, int); #ifdef notdef static void xl_testpacket(struct xl_softc *); #endif static int xl_miibus_readreg(device_t, int, int); static int xl_miibus_writereg(device_t, int, int, int); static void xl_miibus_statchg(device_t); static void xl_miibus_mediainit(device_t); /* * MII bit-bang glue */ static uint32_t xl_mii_bitbang_read(device_t); static void xl_mii_bitbang_write(device_t, uint32_t); static const struct mii_bitbang_ops xl_mii_bitbang_ops = { xl_mii_bitbang_read, xl_mii_bitbang_write, { XL_MII_DATA, /* MII_BIT_MDO */ XL_MII_DATA, /* MII_BIT_MDI */ XL_MII_CLK, /* MII_BIT_MDC */ XL_MII_DIR, /* MII_BIT_DIR_HOST_PHY */ 0, /* MII_BIT_DIR_PHY_HOST */ } }; static device_method_t xl_methods[] = { /* Device interface */ DEVMETHOD(device_probe, xl_probe), DEVMETHOD(device_attach, xl_attach), DEVMETHOD(device_detach, xl_detach), DEVMETHOD(device_shutdown, xl_shutdown), DEVMETHOD(device_suspend, xl_suspend), DEVMETHOD(device_resume, xl_resume), /* MII interface */ DEVMETHOD(miibus_readreg, xl_miibus_readreg), DEVMETHOD(miibus_writereg, xl_miibus_writereg), DEVMETHOD(miibus_statchg, xl_miibus_statchg), DEVMETHOD(miibus_mediainit, xl_miibus_mediainit), DEVMETHOD_END }; static driver_t xl_driver = { "xl", xl_methods, sizeof(struct xl_softc) }; static devclass_t xl_devclass; DRIVER_MODULE_ORDERED(xl, pci, xl_driver, xl_devclass, NULL, NULL, SI_ORDER_ANY); DRIVER_MODULE(miibus, xl, miibus_driver, miibus_devclass, NULL, NULL); MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, xl, xl_devs, nitems(xl_devs) - 1); static void xl_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { u_int32_t *paddr; paddr = arg; *paddr = segs->ds_addr; } /* * Murphy's law says that it's possible the chip can wedge and * the 'command in progress' bit may never clear. Hence, we wait * only a finite amount of time to avoid getting caught in an * infinite loop. Normally this delay routine would be a macro, * but it isn't called during normal operation so we can afford * to make it a function. Suppress warning when card gone. */ static void xl_wait(struct xl_softc *sc) { int i; for (i = 0; i < XL_TIMEOUT; i++) { if ((CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY) == 0) break; } if (i == XL_TIMEOUT && bus_child_present(sc->xl_dev)) device_printf(sc->xl_dev, "command never completed!\n"); } /* * MII access routines are provided for adapters with external * PHYs (3c905-TX, 3c905-T4, 3c905B-T4) and those with built-in * autoneg logic that's faked up to look like a PHY (3c905B-TX). * Note: if you don't perform the MDIO operations just right, * it's possible to end up with code that works correctly with * some chips/CPUs/processor speeds/bus speeds/etc but not * with others. */ /* * Read the MII serial port for the MII bit-bang module. */ static uint32_t xl_mii_bitbang_read(device_t dev) { struct xl_softc *sc; uint32_t val; sc = device_get_softc(dev); /* We're already in window 4. */ val = CSR_READ_2(sc, XL_W4_PHY_MGMT); CSR_BARRIER(sc, XL_W4_PHY_MGMT, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); return (val); } /* * Write the MII serial port for the MII bit-bang module. */ static void xl_mii_bitbang_write(device_t dev, uint32_t val) { struct xl_softc *sc; sc = device_get_softc(dev); /* We're already in window 4. */ CSR_WRITE_2(sc, XL_W4_PHY_MGMT, val); CSR_BARRIER(sc, XL_W4_PHY_MGMT, 2, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); } static int xl_miibus_readreg(device_t dev, int phy, int reg) { struct xl_softc *sc; sc = device_get_softc(dev); /* Select the window 4. */ XL_SEL_WIN(4); return (mii_bitbang_readreg(dev, &xl_mii_bitbang_ops, phy, reg)); } static int xl_miibus_writereg(device_t dev, int phy, int reg, int data) { struct xl_softc *sc; sc = device_get_softc(dev); /* Select the window 4. */ XL_SEL_WIN(4); mii_bitbang_writereg(dev, &xl_mii_bitbang_ops, phy, reg, data); return (0); } static void xl_miibus_statchg(device_t dev) { struct xl_softc *sc; struct mii_data *mii; uint8_t macctl; sc = device_get_softc(dev); mii = device_get_softc(sc->xl_miibus); xl_setcfg(sc); /* Set ASIC's duplex mode to match the PHY. */ XL_SEL_WIN(3); macctl = CSR_READ_1(sc, XL_W3_MAC_CTRL); if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { macctl |= XL_MACCTRL_DUPLEX; if (sc->xl_type == XL_TYPE_905B) { if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) macctl |= XL_MACCTRL_FLOW_CONTROL_ENB; else macctl &= ~XL_MACCTRL_FLOW_CONTROL_ENB; } } else { macctl &= ~XL_MACCTRL_DUPLEX; if (sc->xl_type == XL_TYPE_905B) macctl &= ~XL_MACCTRL_FLOW_CONTROL_ENB; } CSR_WRITE_1(sc, XL_W3_MAC_CTRL, macctl); } /* * Special support for the 3c905B-COMBO. This card has 10/100 support * plus BNC and AUI ports. This means we will have both an miibus attached * plus some non-MII media settings. In order to allow this, we have to * add the extra media to the miibus's ifmedia struct, but we can't do * that during xl_attach() because the miibus hasn't been attached yet. * So instead, we wait until the miibus probe/attach is done, at which * point we will get a callback telling is that it's safe to add our * extra media. */ static void xl_miibus_mediainit(device_t dev) { struct xl_softc *sc; struct mii_data *mii; struct ifmedia *ifm; sc = device_get_softc(dev); mii = device_get_softc(sc->xl_miibus); ifm = &mii->mii_media; if (sc->xl_media & (XL_MEDIAOPT_AUI | XL_MEDIAOPT_10FL)) { /* * Check for a 10baseFL board in disguise. */ if (sc->xl_type == XL_TYPE_905B && sc->xl_media == XL_MEDIAOPT_10FL) { if (bootverbose) device_printf(sc->xl_dev, "found 10baseFL\n"); ifmedia_add(ifm, IFM_ETHER | IFM_10_FL, 0, NULL); ifmedia_add(ifm, IFM_ETHER | IFM_10_FL|IFM_HDX, 0, NULL); if (sc->xl_caps & XL_CAPS_FULL_DUPLEX) ifmedia_add(ifm, IFM_ETHER | IFM_10_FL | IFM_FDX, 0, NULL); } else { if (bootverbose) device_printf(sc->xl_dev, "found AUI\n"); ifmedia_add(ifm, IFM_ETHER | IFM_10_5, 0, NULL); } } if (sc->xl_media & XL_MEDIAOPT_BNC) { if (bootverbose) device_printf(sc->xl_dev, "found BNC\n"); ifmedia_add(ifm, IFM_ETHER | IFM_10_2, 0, NULL); } } /* * The EEPROM is slow: give it time to come ready after issuing * it a command. */ static int xl_eeprom_wait(struct xl_softc *sc) { int i; for (i = 0; i < 100; i++) { if (CSR_READ_2(sc, XL_W0_EE_CMD) & XL_EE_BUSY) DELAY(162); else break; } if (i == 100) { device_printf(sc->xl_dev, "eeprom failed to come ready\n"); return (1); } return (0); } /* * Read a sequence of words from the EEPROM. Note that ethernet address * data is stored in the EEPROM in network byte order. */ static int xl_read_eeprom(struct xl_softc *sc, caddr_t dest, int off, int cnt, int swap) { int err = 0, i; u_int16_t word = 0, *ptr; #define EEPROM_5BIT_OFFSET(A) ((((A) << 2) & 0x7F00) | ((A) & 0x003F)) #define EEPROM_8BIT_OFFSET(A) ((A) & 0x003F) /* * XXX: WARNING! DANGER! * It's easy to accidentally overwrite the rom content! * Note: the 3c575 uses 8bit EEPROM offsets. */ XL_SEL_WIN(0); if (xl_eeprom_wait(sc)) return (1); if (sc->xl_flags & XL_FLAG_EEPROM_OFFSET_30) off += 0x30; for (i = 0; i < cnt; i++) { if (sc->xl_flags & XL_FLAG_8BITROM) CSR_WRITE_2(sc, XL_W0_EE_CMD, XL_EE_8BIT_READ | EEPROM_8BIT_OFFSET(off + i)); else CSR_WRITE_2(sc, XL_W0_EE_CMD, XL_EE_READ | EEPROM_5BIT_OFFSET(off + i)); err = xl_eeprom_wait(sc); if (err) break; word = CSR_READ_2(sc, XL_W0_EE_DATA); ptr = (u_int16_t *)(dest + (i * 2)); if (swap) *ptr = ntohs(word); else *ptr = word; } return (err ? 1 : 0); } static void xl_rxfilter(struct xl_softc *sc) { if (sc->xl_type == XL_TYPE_905B) xl_rxfilter_90xB(sc); else xl_rxfilter_90x(sc); } /* * NICs older than the 3c905B have only one multicast option, which * is to enable reception of all multicast frames. */ static u_int xl_check_maddr_90x(void *arg, struct sockaddr_dl *sdl, u_int cnt) { uint8_t *rxfilt = arg; *rxfilt |= XL_RXFILTER_ALLMULTI; return (1); } static void xl_rxfilter_90x(struct xl_softc *sc) { struct ifnet *ifp; u_int8_t rxfilt; XL_LOCK_ASSERT(sc); ifp = sc->xl_ifp; XL_SEL_WIN(5); rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER); rxfilt &= ~(XL_RXFILTER_ALLFRAMES | XL_RXFILTER_ALLMULTI | XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL); /* Set the individual bit to receive frames for this host only. */ rxfilt |= XL_RXFILTER_INDIVIDUAL; /* Set capture broadcast bit to capture broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) rxfilt |= XL_RXFILTER_BROADCAST; /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { if (ifp->if_flags & IFF_PROMISC) rxfilt |= XL_RXFILTER_ALLFRAMES; if (ifp->if_flags & IFF_ALLMULTI) rxfilt |= XL_RXFILTER_ALLMULTI; } else if_foreach_llmaddr(sc->xl_ifp, xl_check_maddr_90x, &rxfilt); CSR_WRITE_2(sc, XL_COMMAND, rxfilt | XL_CMD_RX_SET_FILT); XL_SEL_WIN(7); } /* * 3c905B adapters have a hash filter that we can program. * Note: the 3c905B currently only supports a 64-bit * hash table, which means we really only need 6 bits, * but the manual indicates that future chip revisions * will have a 256-bit hash table, hence the routine * is set up to calculate 8 bits of position info in * case we need it some day. * Note II, The Sequel: _CURRENT_ versions of the * 3c905B have a 256 bit hash table. This means we have * to use all 8 bits regardless. On older cards, the * upper 2 bits will be ignored. Grrrr.... */ static u_int xl_check_maddr_90xB(void *arg, struct sockaddr_dl *sdl, u_int count) { struct xl_softc *sc = arg; uint16_t h; h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) & 0xFF; CSR_WRITE_2(sc, XL_COMMAND, h | XL_CMD_RX_SET_HASH | XL_HASH_SET); return (1); } static void xl_rxfilter_90xB(struct xl_softc *sc) { struct ifnet *ifp; int i; u_int8_t rxfilt; XL_LOCK_ASSERT(sc); ifp = sc->xl_ifp; XL_SEL_WIN(5); rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER); rxfilt &= ~(XL_RXFILTER_ALLFRAMES | XL_RXFILTER_ALLMULTI | XL_RXFILTER_BROADCAST | XL_RXFILTER_INDIVIDUAL | XL_RXFILTER_MULTIHASH); /* Set the individual bit to receive frames for this host only. */ rxfilt |= XL_RXFILTER_INDIVIDUAL; /* Set capture broadcast bit to capture broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) rxfilt |= XL_RXFILTER_BROADCAST; /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { if (ifp->if_flags & IFF_PROMISC) rxfilt |= XL_RXFILTER_ALLFRAMES; if (ifp->if_flags & IFF_ALLMULTI) rxfilt |= XL_RXFILTER_ALLMULTI; } else { /* First, zot all the existing hash bits. */ for (i = 0; i < XL_HASHFILT_SIZE; i++) CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH | i); /* Now program new ones. */ if (if_foreach_llmaddr(sc->xl_ifp, xl_check_maddr_90xB, sc) > 0) rxfilt |= XL_RXFILTER_MULTIHASH; } CSR_WRITE_2(sc, XL_COMMAND, rxfilt | XL_CMD_RX_SET_FILT); XL_SEL_WIN(7); } static void xl_setcfg(struct xl_softc *sc) { u_int32_t icfg; /*XL_LOCK_ASSERT(sc);*/ XL_SEL_WIN(3); icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG); icfg &= ~XL_ICFG_CONNECTOR_MASK; if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BT4) icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS); if (sc->xl_media & XL_MEDIAOPT_BTX) icfg |= (XL_XCVR_AUTO << XL_ICFG_CONNECTOR_BITS); CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP); } static void xl_setmode(struct xl_softc *sc, int media) { u_int32_t icfg; u_int16_t mediastat; char *pmsg = "", *dmsg = ""; XL_LOCK_ASSERT(sc); XL_SEL_WIN(4); mediastat = CSR_READ_2(sc, XL_W4_MEDIA_STATUS); XL_SEL_WIN(3); icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG); if (sc->xl_media & XL_MEDIAOPT_BT) { if (IFM_SUBTYPE(media) == IFM_10_T) { pmsg = "10baseT transceiver"; sc->xl_xcvr = XL_XCVR_10BT; icfg &= ~XL_ICFG_CONNECTOR_MASK; icfg |= (XL_XCVR_10BT << XL_ICFG_CONNECTOR_BITS); mediastat |= XL_MEDIASTAT_LINKBEAT | XL_MEDIASTAT_JABGUARD; mediastat &= ~XL_MEDIASTAT_SQEENB; } } if (sc->xl_media & XL_MEDIAOPT_BFX) { if (IFM_SUBTYPE(media) == IFM_100_FX) { pmsg = "100baseFX port"; sc->xl_xcvr = XL_XCVR_100BFX; icfg &= ~XL_ICFG_CONNECTOR_MASK; icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS); mediastat |= XL_MEDIASTAT_LINKBEAT; mediastat &= ~XL_MEDIASTAT_SQEENB; } } if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) { if (IFM_SUBTYPE(media) == IFM_10_5) { pmsg = "AUI port"; sc->xl_xcvr = XL_XCVR_AUI; icfg &= ~XL_ICFG_CONNECTOR_MASK; icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS); mediastat &= ~(XL_MEDIASTAT_LINKBEAT | XL_MEDIASTAT_JABGUARD); mediastat |= ~XL_MEDIASTAT_SQEENB; } if (IFM_SUBTYPE(media) == IFM_10_FL) { pmsg = "10baseFL transceiver"; sc->xl_xcvr = XL_XCVR_AUI; icfg &= ~XL_ICFG_CONNECTOR_MASK; icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS); mediastat &= ~(XL_MEDIASTAT_LINKBEAT | XL_MEDIASTAT_JABGUARD); mediastat |= ~XL_MEDIASTAT_SQEENB; } } if (sc->xl_media & XL_MEDIAOPT_BNC) { if (IFM_SUBTYPE(media) == IFM_10_2) { pmsg = "AUI port"; sc->xl_xcvr = XL_XCVR_COAX; icfg &= ~XL_ICFG_CONNECTOR_MASK; icfg |= (XL_XCVR_COAX << XL_ICFG_CONNECTOR_BITS); mediastat &= ~(XL_MEDIASTAT_LINKBEAT | XL_MEDIASTAT_JABGUARD | XL_MEDIASTAT_SQEENB); } } if ((media & IFM_GMASK) == IFM_FDX || IFM_SUBTYPE(media) == IFM_100_FX) { dmsg = "full"; XL_SEL_WIN(3); CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX); } else { dmsg = "half"; XL_SEL_WIN(3); CSR_WRITE_1(sc, XL_W3_MAC_CTRL, (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX)); } if (IFM_SUBTYPE(media) == IFM_10_2) CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START); else CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP); CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg); XL_SEL_WIN(4); CSR_WRITE_2(sc, XL_W4_MEDIA_STATUS, mediastat); DELAY(800); XL_SEL_WIN(7); device_printf(sc->xl_dev, "selecting %s, %s duplex\n", pmsg, dmsg); } static void xl_reset(struct xl_softc *sc) { int i; XL_LOCK_ASSERT(sc); XL_SEL_WIN(0); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RESET | ((sc->xl_flags & XL_FLAG_WEIRDRESET) ? XL_RESETOPT_DISADVFD:0)); /* * If we're using memory mapped register mode, pause briefly * after issuing the reset command before trying to access any * other registers. With my 3c575C CardBus card, failing to do * this results in the system locking up while trying to poll * the command busy bit in the status register. */ if (sc->xl_flags & XL_FLAG_USE_MMIO) DELAY(100000); for (i = 0; i < XL_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY)) break; } if (i == XL_TIMEOUT) device_printf(sc->xl_dev, "reset didn't complete\n"); /* Reset TX and RX. */ /* Note: the RX reset takes an absurd amount of time * on newer versions of the Tornado chips such as those * on the 3c905CX and newer 3c908C cards. We wait an * extra amount of time so that xl_wait() doesn't complain * and annoy the users. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET); DELAY(100000); xl_wait(sc); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); xl_wait(sc); if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR || sc->xl_flags & XL_FLAG_INVERT_MII_PWR) { XL_SEL_WIN(2); CSR_WRITE_2(sc, XL_W2_RESET_OPTIONS, CSR_READ_2(sc, XL_W2_RESET_OPTIONS) | ((sc->xl_flags & XL_FLAG_INVERT_LED_PWR) ? XL_RESETOPT_INVERT_LED : 0) | ((sc->xl_flags & XL_FLAG_INVERT_MII_PWR) ? XL_RESETOPT_INVERT_MII : 0)); } /* Wait a little while for the chip to get its brains in order. */ DELAY(100000); } /* * Probe for a 3Com Etherlink XL chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int xl_probe(device_t dev) { const struct xl_type *t; t = xl_devs; while (t->xl_name != NULL) { if ((pci_get_vendor(dev) == t->xl_vid) && (pci_get_device(dev) == t->xl_did)) { device_set_desc(dev, t->xl_name); return (BUS_PROBE_DEFAULT); } t++; } return (ENXIO); } /* * This routine is a kludge to work around possible hardware faults * or manufacturing defects that can cause the media options register * (or reset options register, as it's called for the first generation * 3c90x adapters) to return an incorrect result. I have encountered * one Dell Latitude laptop docking station with an integrated 3c905-TX * which doesn't have any of the 'mediaopt' bits set. This screws up * the attach routine pretty badly because it doesn't know what media * to look for. If we find ourselves in this predicament, this routine * will try to guess the media options values and warn the user of a * possible manufacturing defect with his adapter/system/whatever. */ static void xl_mediacheck(struct xl_softc *sc) { /* * If some of the media options bits are set, assume they are * correct. If not, try to figure it out down below. * XXX I should check for 10baseFL, but I don't have an adapter * to test with. */ if (sc->xl_media & (XL_MEDIAOPT_MASK & ~XL_MEDIAOPT_VCO)) { /* * Check the XCVR value. If it's not in the normal range * of values, we need to fake it up here. */ if (sc->xl_xcvr <= XL_XCVR_AUTO) return; else { device_printf(sc->xl_dev, "bogus xcvr value in EEPROM (%x)\n", sc->xl_xcvr); device_printf(sc->xl_dev, "choosing new default based on card type\n"); } } else { if (sc->xl_type == XL_TYPE_905B && sc->xl_media & XL_MEDIAOPT_10FL) return; device_printf(sc->xl_dev, "WARNING: no media options bits set in the media options register!!\n"); device_printf(sc->xl_dev, "this could be a manufacturing defect in your adapter or system\n"); device_printf(sc->xl_dev, "attempting to guess media type; you should probably consult your vendor\n"); } xl_choose_xcvr(sc, 1); } static void xl_choose_xcvr(struct xl_softc *sc, int verbose) { u_int16_t devid; /* * Read the device ID from the EEPROM. * This is what's loaded into the PCI device ID register, so it has * to be correct otherwise we wouldn't have gotten this far. */ xl_read_eeprom(sc, (caddr_t)&devid, XL_EE_PRODID, 1, 0); switch (devid) { case TC_DEVICEID_BOOMERANG_10BT: /* 3c900-TPO */ case TC_DEVICEID_KRAKATOA_10BT: /* 3c900B-TPO */ sc->xl_media = XL_MEDIAOPT_BT; sc->xl_xcvr = XL_XCVR_10BT; if (verbose) device_printf(sc->xl_dev, "guessing 10BaseT transceiver\n"); break; case TC_DEVICEID_BOOMERANG_10BT_COMBO: /* 3c900-COMBO */ case TC_DEVICEID_KRAKATOA_10BT_COMBO: /* 3c900B-COMBO */ sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI; sc->xl_xcvr = XL_XCVR_10BT; if (verbose) device_printf(sc->xl_dev, "guessing COMBO (AUI/BNC/TP)\n"); break; case TC_DEVICEID_KRAKATOA_10BT_TPC: /* 3c900B-TPC */ sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC; sc->xl_xcvr = XL_XCVR_10BT; if (verbose) device_printf(sc->xl_dev, "guessing TPC (BNC/TP)\n"); break; case TC_DEVICEID_CYCLONE_10FL: /* 3c900B-FL */ sc->xl_media = XL_MEDIAOPT_10FL; sc->xl_xcvr = XL_XCVR_AUI; if (verbose) device_printf(sc->xl_dev, "guessing 10baseFL\n"); break; case TC_DEVICEID_BOOMERANG_10_100BT: /* 3c905-TX */ case TC_DEVICEID_HURRICANE_555: /* 3c555 */ case TC_DEVICEID_HURRICANE_556: /* 3c556 */ case TC_DEVICEID_HURRICANE_556B: /* 3c556B */ case TC_DEVICEID_HURRICANE_575A: /* 3c575TX */ case TC_DEVICEID_HURRICANE_575B: /* 3c575B */ case TC_DEVICEID_HURRICANE_575C: /* 3c575C */ case TC_DEVICEID_HURRICANE_656: /* 3c656 */ case TC_DEVICEID_HURRICANE_656B: /* 3c656B */ case TC_DEVICEID_TORNADO_656C: /* 3c656C */ case TC_DEVICEID_TORNADO_10_100BT_920B: /* 3c920B-EMB */ case TC_DEVICEID_TORNADO_10_100BT_920B_WNM: /* 3c920B-EMB-WNM */ sc->xl_media = XL_MEDIAOPT_MII; sc->xl_xcvr = XL_XCVR_MII; if (verbose) device_printf(sc->xl_dev, "guessing MII\n"); break; case TC_DEVICEID_BOOMERANG_100BT4: /* 3c905-T4 */ case TC_DEVICEID_CYCLONE_10_100BT4: /* 3c905B-T4 */ sc->xl_media = XL_MEDIAOPT_BT4; sc->xl_xcvr = XL_XCVR_MII; if (verbose) device_printf(sc->xl_dev, "guessing 100baseT4/MII\n"); break; case TC_DEVICEID_HURRICANE_10_100BT: /* 3c905B-TX */ case TC_DEVICEID_HURRICANE_10_100BT_SERV:/*3c980-TX */ case TC_DEVICEID_TORNADO_10_100BT_SERV: /* 3c980C-TX */ case TC_DEVICEID_HURRICANE_SOHO100TX: /* 3cSOHO100-TX */ case TC_DEVICEID_TORNADO_10_100BT: /* 3c905C-TX */ case TC_DEVICEID_TORNADO_HOMECONNECT: /* 3c450-TX */ sc->xl_media = XL_MEDIAOPT_BTX; sc->xl_xcvr = XL_XCVR_AUTO; if (verbose) device_printf(sc->xl_dev, "guessing 10/100 internal\n"); break; case TC_DEVICEID_CYCLONE_10_100_COMBO: /* 3c905B-COMBO */ sc->xl_media = XL_MEDIAOPT_BTX|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI; sc->xl_xcvr = XL_XCVR_AUTO; if (verbose) device_printf(sc->xl_dev, "guessing 10/100 plus BNC/AUI\n"); break; default: device_printf(sc->xl_dev, "unknown device ID: %x -- defaulting to 10baseT\n", devid); sc->xl_media = XL_MEDIAOPT_BT; break; } } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int xl_attach(device_t dev) { u_char eaddr[ETHER_ADDR_LEN]; u_int16_t sinfo2, xcvr[2]; struct xl_softc *sc; struct ifnet *ifp; int media, pmcap; int error = 0, phy, rid, res, unit; uint16_t did; sc = device_get_softc(dev); sc->xl_dev = dev; unit = device_get_unit(dev); mtx_init(&sc->xl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); ifmedia_init(&sc->ifmedia, 0, xl_ifmedia_upd, xl_ifmedia_sts); did = pci_get_device(dev); sc->xl_flags = 0; if (did == TC_DEVICEID_HURRICANE_555) sc->xl_flags |= XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_PHYOK; if (did == TC_DEVICEID_HURRICANE_556 || did == TC_DEVICEID_HURRICANE_556B) sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK | XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_WEIRDRESET | XL_FLAG_INVERT_LED_PWR | XL_FLAG_INVERT_MII_PWR; if (did == TC_DEVICEID_HURRICANE_555 || did == TC_DEVICEID_HURRICANE_556) sc->xl_flags |= XL_FLAG_8BITROM; if (did == TC_DEVICEID_HURRICANE_556B) sc->xl_flags |= XL_FLAG_NO_XCVR_PWR; if (did == TC_DEVICEID_HURRICANE_575B || did == TC_DEVICEID_HURRICANE_575C || did == TC_DEVICEID_HURRICANE_656B || did == TC_DEVICEID_TORNADO_656C) sc->xl_flags |= XL_FLAG_FUNCREG; if (did == TC_DEVICEID_HURRICANE_575A || did == TC_DEVICEID_HURRICANE_575B || did == TC_DEVICEID_HURRICANE_575C || did == TC_DEVICEID_HURRICANE_656B || did == TC_DEVICEID_TORNADO_656C) sc->xl_flags |= XL_FLAG_PHYOK | XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_8BITROM; if (did == TC_DEVICEID_HURRICANE_656) sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK; if (did == TC_DEVICEID_HURRICANE_575B) sc->xl_flags |= XL_FLAG_INVERT_LED_PWR; if (did == TC_DEVICEID_HURRICANE_575C) sc->xl_flags |= XL_FLAG_INVERT_MII_PWR; if (did == TC_DEVICEID_TORNADO_656C) sc->xl_flags |= XL_FLAG_INVERT_MII_PWR; if (did == TC_DEVICEID_HURRICANE_656 || did == TC_DEVICEID_HURRICANE_656B) sc->xl_flags |= XL_FLAG_INVERT_MII_PWR | XL_FLAG_INVERT_LED_PWR; if (did == TC_DEVICEID_TORNADO_10_100BT_920B || did == TC_DEVICEID_TORNADO_10_100BT_920B_WNM) sc->xl_flags |= XL_FLAG_PHYOK; switch (did) { case TC_DEVICEID_BOOMERANG_10_100BT: /* 3c905-TX */ case TC_DEVICEID_HURRICANE_575A: case TC_DEVICEID_HURRICANE_575B: case TC_DEVICEID_HURRICANE_575C: sc->xl_flags |= XL_FLAG_NO_MMIO; break; default: break; } /* * Map control/status registers. */ pci_enable_busmaster(dev); if ((sc->xl_flags & XL_FLAG_NO_MMIO) == 0) { rid = XL_PCI_LOMEM; res = SYS_RES_MEMORY; sc->xl_res = bus_alloc_resource_any(dev, res, &rid, RF_ACTIVE); } if (sc->xl_res != NULL) { sc->xl_flags |= XL_FLAG_USE_MMIO; if (bootverbose) device_printf(dev, "using memory mapped I/O\n"); } else { rid = XL_PCI_LOIO; res = SYS_RES_IOPORT; sc->xl_res = bus_alloc_resource_any(dev, res, &rid, RF_ACTIVE); if (sc->xl_res == NULL) { device_printf(dev, "couldn't map ports/memory\n"); error = ENXIO; goto fail; } if (bootverbose) device_printf(dev, "using port I/O\n"); } sc->xl_btag = rman_get_bustag(sc->xl_res); sc->xl_bhandle = rman_get_bushandle(sc->xl_res); if (sc->xl_flags & XL_FLAG_FUNCREG) { rid = XL_PCI_FUNCMEM; sc->xl_fres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->xl_fres == NULL) { device_printf(dev, "couldn't map funcreg memory\n"); error = ENXIO; goto fail; } sc->xl_ftag = rman_get_bustag(sc->xl_fres); sc->xl_fhandle = rman_get_bushandle(sc->xl_fres); } /* Allocate interrupt */ rid = 0; sc->xl_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->xl_irq == NULL) { device_printf(dev, "couldn't map interrupt\n"); error = ENXIO; goto fail; } /* Initialize interface name. */ ifp = sc->xl_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); error = ENOSPC; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); /* Reset the adapter. */ XL_LOCK(sc); xl_reset(sc); XL_UNLOCK(sc); /* * Get station address from the EEPROM. */ if (xl_read_eeprom(sc, (caddr_t)&eaddr, XL_EE_OEM_ADR0, 3, 1)) { device_printf(dev, "failed to read station address\n"); error = ENXIO; goto fail; } callout_init_mtx(&sc->xl_tick_callout, &sc->xl_mtx, 0); - TASK_INIT(&sc->xl_task, 0, xl_rxeof_task, sc); + NET_TASK_INIT(&sc->xl_task, 0, xl_rxeof_task, sc); /* * Now allocate a tag for the DMA descriptor lists and a chunk * of DMA-able memory based on the tag. Also obtain the DMA * addresses of the RX and TX ring, which we'll need later. * All of our lists are allocated as a contiguous block * of memory. */ error = bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, XL_RX_LIST_SZ, 1, XL_RX_LIST_SZ, 0, NULL, NULL, &sc->xl_ldata.xl_rx_tag); if (error) { device_printf(dev, "failed to allocate rx dma tag\n"); goto fail; } error = bus_dmamem_alloc(sc->xl_ldata.xl_rx_tag, (void **)&sc->xl_ldata.xl_rx_list, BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->xl_ldata.xl_rx_dmamap); if (error) { device_printf(dev, "no memory for rx list buffers!\n"); bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag); sc->xl_ldata.xl_rx_tag = NULL; goto fail; } error = bus_dmamap_load(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap, sc->xl_ldata.xl_rx_list, XL_RX_LIST_SZ, xl_dma_map_addr, &sc->xl_ldata.xl_rx_dmaaddr, BUS_DMA_NOWAIT); if (error) { device_printf(dev, "cannot get dma address of the rx ring!\n"); bus_dmamem_free(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_list, sc->xl_ldata.xl_rx_dmamap); bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag); sc->xl_ldata.xl_rx_tag = NULL; goto fail; } error = bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, XL_TX_LIST_SZ, 1, XL_TX_LIST_SZ, 0, NULL, NULL, &sc->xl_ldata.xl_tx_tag); if (error) { device_printf(dev, "failed to allocate tx dma tag\n"); goto fail; } error = bus_dmamem_alloc(sc->xl_ldata.xl_tx_tag, (void **)&sc->xl_ldata.xl_tx_list, BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->xl_ldata.xl_tx_dmamap); if (error) { device_printf(dev, "no memory for list buffers!\n"); bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag); sc->xl_ldata.xl_tx_tag = NULL; goto fail; } error = bus_dmamap_load(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap, sc->xl_ldata.xl_tx_list, XL_TX_LIST_SZ, xl_dma_map_addr, &sc->xl_ldata.xl_tx_dmaaddr, BUS_DMA_NOWAIT); if (error) { device_printf(dev, "cannot get dma address of the tx ring!\n"); bus_dmamem_free(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_list, sc->xl_ldata.xl_tx_dmamap); bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag); sc->xl_ldata.xl_tx_tag = NULL; goto fail; } /* * Allocate a DMA tag for the mapping of mbufs. */ error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * XL_MAXFRAGS, XL_MAXFRAGS, MCLBYTES, 0, NULL, NULL, &sc->xl_mtag); if (error) { device_printf(dev, "failed to allocate mbuf dma tag\n"); goto fail; } /* We need a spare DMA map for the RX ring. */ error = bus_dmamap_create(sc->xl_mtag, 0, &sc->xl_tmpmap); if (error) goto fail; /* * Figure out the card type. 3c905B adapters have the * 'supportsNoTxLength' bit set in the capabilities * word in the EEPROM. * Note: my 3c575C CardBus card lies. It returns a value * of 0x1578 for its capabilities word, which is somewhat * nonsensical. Another way to distinguish a 3c90x chip * from a 3c90xB/C chip is to check for the 'supportsLargePackets' * bit. This will only be set for 3c90x boomerage chips. */ xl_read_eeprom(sc, (caddr_t)&sc->xl_caps, XL_EE_CAPS, 1, 0); if (sc->xl_caps & XL_CAPS_NO_TXLENGTH || !(sc->xl_caps & XL_CAPS_LARGE_PKTS)) sc->xl_type = XL_TYPE_905B; else sc->xl_type = XL_TYPE_90X; /* Check availability of WOL. */ if ((sc->xl_caps & XL_CAPS_PWRMGMT) != 0 && pci_find_cap(dev, PCIY_PMG, &pmcap) == 0) { sc->xl_pmcap = pmcap; sc->xl_flags |= XL_FLAG_WOL; sinfo2 = 0; xl_read_eeprom(sc, (caddr_t)&sinfo2, XL_EE_SOFTINFO2, 1, 0); if ((sinfo2 & XL_SINFO2_AUX_WOL_CON) == 0 && bootverbose) device_printf(dev, "No auxiliary remote wakeup connector!\n"); } /* Set the TX start threshold for best performance. */ sc->xl_tx_thresh = XL_MIN_FRAMELEN; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = xl_ioctl; ifp->if_capabilities = IFCAP_VLAN_MTU; if (sc->xl_type == XL_TYPE_905B) { ifp->if_hwassist = XL905B_CSUM_FEATURES; #ifdef XL905B_TXCSUM_BROKEN ifp->if_capabilities |= IFCAP_RXCSUM; #else ifp->if_capabilities |= IFCAP_HWCSUM; #endif } if ((sc->xl_flags & XL_FLAG_WOL) != 0) ifp->if_capabilities |= IFCAP_WOL_MAGIC; ifp->if_capenable = ifp->if_capabilities; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif ifp->if_start = xl_start; ifp->if_init = xl_init; IFQ_SET_MAXLEN(&ifp->if_snd, XL_TX_LIST_CNT - 1); ifp->if_snd.ifq_drv_maxlen = XL_TX_LIST_CNT - 1; IFQ_SET_READY(&ifp->if_snd); /* * Now we have to see what sort of media we have. * This includes probing for an MII interace and a * possible PHY. */ XL_SEL_WIN(3); sc->xl_media = CSR_READ_2(sc, XL_W3_MEDIA_OPT); if (bootverbose) device_printf(dev, "media options word: %x\n", sc->xl_media); xl_read_eeprom(sc, (char *)&xcvr, XL_EE_ICFG_0, 2, 0); sc->xl_xcvr = xcvr[0] | xcvr[1] << 16; sc->xl_xcvr &= XL_ICFG_CONNECTOR_MASK; sc->xl_xcvr >>= XL_ICFG_CONNECTOR_BITS; xl_mediacheck(sc); if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX || sc->xl_media & XL_MEDIAOPT_BT4) { if (bootverbose) device_printf(dev, "found MII/AUTO\n"); xl_setcfg(sc); /* * Attach PHYs only at MII address 24 if !XL_FLAG_PHYOK. * This is to guard against problems with certain 3Com ASIC * revisions that incorrectly map the internal transceiver * control registers at all MII addresses. */ phy = MII_PHY_ANY; if ((sc->xl_flags & XL_FLAG_PHYOK) == 0) phy = 24; error = mii_attach(dev, &sc->xl_miibus, ifp, xl_ifmedia_upd, xl_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, sc->xl_type == XL_TYPE_905B ? MIIF_DOPAUSE : 0); if (error != 0) { device_printf(dev, "attaching PHYs failed\n"); goto fail; } goto done; } /* * Sanity check. If the user has selected "auto" and this isn't * a 10/100 card of some kind, we need to force the transceiver * type to something sane. */ if (sc->xl_xcvr == XL_XCVR_AUTO) xl_choose_xcvr(sc, bootverbose); /* * Do ifmedia setup. */ if (sc->xl_media & XL_MEDIAOPT_BT) { if (bootverbose) device_printf(dev, "found 10baseT\n"); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL); if (sc->xl_caps & XL_CAPS_FULL_DUPLEX) ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); } if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) { /* * Check for a 10baseFL board in disguise. */ if (sc->xl_type == XL_TYPE_905B && sc->xl_media == XL_MEDIAOPT_10FL) { if (bootverbose) device_printf(dev, "found 10baseFL\n"); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL|IFM_HDX, 0, NULL); if (sc->xl_caps & XL_CAPS_FULL_DUPLEX) ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL|IFM_FDX, 0, NULL); } else { if (bootverbose) device_printf(dev, "found AUI\n"); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL); } } if (sc->xl_media & XL_MEDIAOPT_BNC) { if (bootverbose) device_printf(dev, "found BNC\n"); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_2, 0, NULL); } if (sc->xl_media & XL_MEDIAOPT_BFX) { if (bootverbose) device_printf(dev, "found 100baseFX\n"); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_FX, 0, NULL); } media = IFM_ETHER|IFM_100_TX|IFM_FDX; xl_choose_media(sc, &media); if (sc->xl_miibus == NULL) ifmedia_set(&sc->ifmedia, media); done: if (sc->xl_flags & XL_FLAG_NO_XCVR_PWR) { XL_SEL_WIN(0); CSR_WRITE_2(sc, XL_W0_MFG_ID, XL_NO_XCVR_PWR_MAGICBITS); } /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); error = bus_setup_intr(dev, sc->xl_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, xl_intr, sc, &sc->xl_intrhand); if (error) { device_printf(dev, "couldn't set up irq\n"); ether_ifdetach(ifp); goto fail; } fail: if (error) xl_detach(dev); return (error); } /* * Choose a default media. * XXX This is a leaf function only called by xl_attach() and * acquires/releases the non-recursible driver mutex to * satisfy lock assertions. */ static void xl_choose_media(struct xl_softc *sc, int *media) { XL_LOCK(sc); switch (sc->xl_xcvr) { case XL_XCVR_10BT: *media = IFM_ETHER|IFM_10_T; xl_setmode(sc, *media); break; case XL_XCVR_AUI: if (sc->xl_type == XL_TYPE_905B && sc->xl_media == XL_MEDIAOPT_10FL) { *media = IFM_ETHER|IFM_10_FL; xl_setmode(sc, *media); } else { *media = IFM_ETHER|IFM_10_5; xl_setmode(sc, *media); } break; case XL_XCVR_COAX: *media = IFM_ETHER|IFM_10_2; xl_setmode(sc, *media); break; case XL_XCVR_AUTO: case XL_XCVR_100BTX: case XL_XCVR_MII: /* Chosen by miibus */ break; case XL_XCVR_100BFX: *media = IFM_ETHER|IFM_100_FX; break; default: device_printf(sc->xl_dev, "unknown XCVR type: %d\n", sc->xl_xcvr); /* * This will probably be wrong, but it prevents * the ifmedia code from panicking. */ *media = IFM_ETHER|IFM_10_T; break; } XL_UNLOCK(sc); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int xl_detach(device_t dev) { struct xl_softc *sc; struct ifnet *ifp; int rid, res; sc = device_get_softc(dev); ifp = sc->xl_ifp; KASSERT(mtx_initialized(&sc->xl_mtx), ("xl mutex not initialized")); #ifdef DEVICE_POLLING if (ifp && ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(ifp); #endif if (sc->xl_flags & XL_FLAG_USE_MMIO) { rid = XL_PCI_LOMEM; res = SYS_RES_MEMORY; } else { rid = XL_PCI_LOIO; res = SYS_RES_IOPORT; } /* These should only be active if attach succeeded */ if (device_is_attached(dev)) { XL_LOCK(sc); xl_stop(sc); XL_UNLOCK(sc); taskqueue_drain(taskqueue_swi, &sc->xl_task); callout_drain(&sc->xl_tick_callout); ether_ifdetach(ifp); } if (sc->xl_miibus) device_delete_child(dev, sc->xl_miibus); bus_generic_detach(dev); ifmedia_removeall(&sc->ifmedia); if (sc->xl_intrhand) bus_teardown_intr(dev, sc->xl_irq, sc->xl_intrhand); if (sc->xl_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->xl_irq); if (sc->xl_fres != NULL) bus_release_resource(dev, SYS_RES_MEMORY, XL_PCI_FUNCMEM, sc->xl_fres); if (sc->xl_res) bus_release_resource(dev, res, rid, sc->xl_res); if (ifp) if_free(ifp); if (sc->xl_mtag) { bus_dmamap_destroy(sc->xl_mtag, sc->xl_tmpmap); bus_dma_tag_destroy(sc->xl_mtag); } if (sc->xl_ldata.xl_rx_tag) { bus_dmamap_unload(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap); bus_dmamem_free(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_list, sc->xl_ldata.xl_rx_dmamap); bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag); } if (sc->xl_ldata.xl_tx_tag) { bus_dmamap_unload(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap); bus_dmamem_free(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_list, sc->xl_ldata.xl_tx_dmamap); bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag); } mtx_destroy(&sc->xl_mtx); return (0); } /* * Initialize the transmit descriptors. */ static int xl_list_tx_init(struct xl_softc *sc) { struct xl_chain_data *cd; struct xl_list_data *ld; int error, i; XL_LOCK_ASSERT(sc); cd = &sc->xl_cdata; ld = &sc->xl_ldata; for (i = 0; i < XL_TX_LIST_CNT; i++) { cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i]; error = bus_dmamap_create(sc->xl_mtag, 0, &cd->xl_tx_chain[i].xl_map); if (error) return (error); cd->xl_tx_chain[i].xl_phys = ld->xl_tx_dmaaddr + i * sizeof(struct xl_list); if (i == (XL_TX_LIST_CNT - 1)) cd->xl_tx_chain[i].xl_next = NULL; else cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1]; } cd->xl_tx_free = &cd->xl_tx_chain[0]; cd->xl_tx_tail = cd->xl_tx_head = NULL; bus_dmamap_sync(ld->xl_tx_tag, ld->xl_tx_dmamap, BUS_DMASYNC_PREWRITE); return (0); } /* * Initialize the transmit descriptors. */ static int xl_list_tx_init_90xB(struct xl_softc *sc) { struct xl_chain_data *cd; struct xl_list_data *ld; int error, i; XL_LOCK_ASSERT(sc); cd = &sc->xl_cdata; ld = &sc->xl_ldata; for (i = 0; i < XL_TX_LIST_CNT; i++) { cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i]; error = bus_dmamap_create(sc->xl_mtag, 0, &cd->xl_tx_chain[i].xl_map); if (error) return (error); cd->xl_tx_chain[i].xl_phys = ld->xl_tx_dmaaddr + i * sizeof(struct xl_list); if (i == (XL_TX_LIST_CNT - 1)) cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[0]; else cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1]; if (i == 0) cd->xl_tx_chain[i].xl_prev = &cd->xl_tx_chain[XL_TX_LIST_CNT - 1]; else cd->xl_tx_chain[i].xl_prev = &cd->xl_tx_chain[i - 1]; } bzero(ld->xl_tx_list, XL_TX_LIST_SZ); ld->xl_tx_list[0].xl_status = htole32(XL_TXSTAT_EMPTY); cd->xl_tx_prod = 1; cd->xl_tx_cons = 1; cd->xl_tx_cnt = 0; bus_dmamap_sync(ld->xl_tx_tag, ld->xl_tx_dmamap, BUS_DMASYNC_PREWRITE); return (0); } /* * Initialize the RX descriptors and allocate mbufs for them. Note that * we arrange the descriptors in a closed ring, so that the last descriptor * points back to the first. */ static int xl_list_rx_init(struct xl_softc *sc) { struct xl_chain_data *cd; struct xl_list_data *ld; int error, i, next; u_int32_t nextptr; XL_LOCK_ASSERT(sc); cd = &sc->xl_cdata; ld = &sc->xl_ldata; for (i = 0; i < XL_RX_LIST_CNT; i++) { cd->xl_rx_chain[i].xl_ptr = &ld->xl_rx_list[i]; error = bus_dmamap_create(sc->xl_mtag, 0, &cd->xl_rx_chain[i].xl_map); if (error) return (error); error = xl_newbuf(sc, &cd->xl_rx_chain[i]); if (error) return (error); if (i == (XL_RX_LIST_CNT - 1)) next = 0; else next = i + 1; nextptr = ld->xl_rx_dmaaddr + next * sizeof(struct xl_list_onefrag); cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[next]; ld->xl_rx_list[i].xl_next = htole32(nextptr); } bus_dmamap_sync(ld->xl_rx_tag, ld->xl_rx_dmamap, BUS_DMASYNC_PREWRITE); cd->xl_rx_head = &cd->xl_rx_chain[0]; return (0); } /* * Initialize an RX descriptor and attach an MBUF cluster. * If we fail to do so, we need to leave the old mbuf and * the old DMA map untouched so that it can be reused. */ static int xl_newbuf(struct xl_softc *sc, struct xl_chain_onefrag *c) { struct mbuf *m_new = NULL; bus_dmamap_t map; bus_dma_segment_t segs[1]; int error, nseg; XL_LOCK_ASSERT(sc); m_new = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m_new == NULL) return (ENOBUFS); m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; /* Force longword alignment for packet payload. */ m_adj(m_new, ETHER_ALIGN); error = bus_dmamap_load_mbuf_sg(sc->xl_mtag, sc->xl_tmpmap, m_new, segs, &nseg, BUS_DMA_NOWAIT); if (error) { m_freem(m_new); device_printf(sc->xl_dev, "can't map mbuf (error %d)\n", error); return (error); } KASSERT(nseg == 1, ("%s: too many DMA segments (%d)", __func__, nseg)); bus_dmamap_unload(sc->xl_mtag, c->xl_map); map = c->xl_map; c->xl_map = sc->xl_tmpmap; sc->xl_tmpmap = map; c->xl_mbuf = m_new; c->xl_ptr->xl_frag.xl_len = htole32(m_new->m_len | XL_LAST_FRAG); c->xl_ptr->xl_frag.xl_addr = htole32(segs->ds_addr); c->xl_ptr->xl_status = 0; bus_dmamap_sync(sc->xl_mtag, c->xl_map, BUS_DMASYNC_PREREAD); return (0); } static int xl_rx_resync(struct xl_softc *sc) { struct xl_chain_onefrag *pos; int i; XL_LOCK_ASSERT(sc); pos = sc->xl_cdata.xl_rx_head; for (i = 0; i < XL_RX_LIST_CNT; i++) { if (pos->xl_ptr->xl_status) break; pos = pos->xl_next; } if (i == XL_RX_LIST_CNT) return (0); sc->xl_cdata.xl_rx_head = pos; return (EAGAIN); } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ static int xl_rxeof(struct xl_softc *sc) { struct mbuf *m; struct ifnet *ifp = sc->xl_ifp; struct xl_chain_onefrag *cur_rx; int total_len; int rx_npkts = 0; u_int32_t rxstat; XL_LOCK_ASSERT(sc); again: bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_POSTREAD); while ((rxstat = le32toh(sc->xl_cdata.xl_rx_head->xl_ptr->xl_status))) { #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { if (sc->rxcycles <= 0) break; sc->rxcycles--; } #endif cur_rx = sc->xl_cdata.xl_rx_head; sc->xl_cdata.xl_rx_head = cur_rx->xl_next; total_len = rxstat & XL_RXSTAT_LENMASK; rx_npkts++; /* * Since we have told the chip to allow large frames, * we need to trap giant frame errors in software. We allow * a little more than the normal frame size to account for * frames with VLAN tags. */ if (total_len > XL_MAX_FRAMELEN) rxstat |= (XL_RXSTAT_UP_ERROR|XL_RXSTAT_OVERSIZE); /* * If an error occurs, update stats, clear the * status word and leave the mbuf cluster in place: * it should simply get re-used next time this descriptor * comes up in the ring. */ if (rxstat & XL_RXSTAT_UP_ERROR) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); cur_rx->xl_ptr->xl_status = 0; bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE); continue; } /* * If the error bit was not set, the upload complete * bit should be set which means we have a valid packet. * If not, something truly strange has happened. */ if (!(rxstat & XL_RXSTAT_UP_CMPLT)) { device_printf(sc->xl_dev, "bad receive status -- packet dropped\n"); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); cur_rx->xl_ptr->xl_status = 0; bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE); continue; } /* No errors; receive the packet. */ bus_dmamap_sync(sc->xl_mtag, cur_rx->xl_map, BUS_DMASYNC_POSTREAD); m = cur_rx->xl_mbuf; /* * Try to conjure up a new mbuf cluster. If that * fails, it means we have an out of memory condition and * should leave the buffer in place and continue. This will * result in a lost packet, but there's little else we * can do in this situation. */ if (xl_newbuf(sc, cur_rx)) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); cur_rx->xl_ptr->xl_status = 0; bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE); continue; } bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE); if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = total_len; if (ifp->if_capenable & IFCAP_RXCSUM) { /* Do IP checksum checking. */ if (rxstat & XL_RXSTAT_IPCKOK) m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if (!(rxstat & XL_RXSTAT_IPCKERR)) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; if ((rxstat & XL_RXSTAT_TCPCOK && !(rxstat & XL_RXSTAT_TCPCKERR)) || (rxstat & XL_RXSTAT_UDPCKOK && !(rxstat & XL_RXSTAT_UDPCKERR))) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID|CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } XL_UNLOCK(sc); (*ifp->if_input)(ifp, m); XL_LOCK(sc); /* * If we are running from the taskqueue, the interface * might have been stopped while we were passing the last * packet up the network stack. */ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) return (rx_npkts); } /* * Handle the 'end of channel' condition. When the upload * engine hits the end of the RX ring, it will stall. This * is our cue to flush the RX ring, reload the uplist pointer * register and unstall the engine. * XXX This is actually a little goofy. With the ThunderLAN * chip, you get an interrupt when the receiver hits the end * of the receive ring, which tells you exactly when you * you need to reload the ring pointer. Here we have to * fake it. I'm mad at myself for not being clever enough * to avoid the use of a goto here. */ if (CSR_READ_4(sc, XL_UPLIST_PTR) == 0 || CSR_READ_4(sc, XL_UPLIST_STATUS) & XL_PKTSTAT_UP_STALLED) { CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL); xl_wait(sc); CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->xl_ldata.xl_rx_dmaaddr); sc->xl_cdata.xl_rx_head = &sc->xl_cdata.xl_rx_chain[0]; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL); goto again; } return (rx_npkts); } /* * Taskqueue wrapper for xl_rxeof(). */ static void xl_rxeof_task(void *arg, int pending) { struct xl_softc *sc = (struct xl_softc *)arg; XL_LOCK(sc); if (sc->xl_ifp->if_drv_flags & IFF_DRV_RUNNING) xl_rxeof(sc); XL_UNLOCK(sc); } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ static void xl_txeof(struct xl_softc *sc) { struct xl_chain *cur_tx; struct ifnet *ifp = sc->xl_ifp; XL_LOCK_ASSERT(sc); /* * Go through our tx list and free mbufs for those * frames that have been uploaded. Note: the 3c905B * sets a special bit in the status word to let us * know that a frame has been downloaded, but the * original 3c900/3c905 adapters don't do that. * Consequently, we have to use a different test if * xl_type != XL_TYPE_905B. */ while (sc->xl_cdata.xl_tx_head != NULL) { cur_tx = sc->xl_cdata.xl_tx_head; if (CSR_READ_4(sc, XL_DOWNLIST_PTR)) break; sc->xl_cdata.xl_tx_head = cur_tx->xl_next; bus_dmamap_sync(sc->xl_mtag, cur_tx->xl_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->xl_mtag, cur_tx->xl_map); m_freem(cur_tx->xl_mbuf); cur_tx->xl_mbuf = NULL; if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; cur_tx->xl_next = sc->xl_cdata.xl_tx_free; sc->xl_cdata.xl_tx_free = cur_tx; } if (sc->xl_cdata.xl_tx_head == NULL) { sc->xl_wdog_timer = 0; sc->xl_cdata.xl_tx_tail = NULL; } else { if (CSR_READ_4(sc, XL_DMACTL) & XL_DMACTL_DOWN_STALLED || !CSR_READ_4(sc, XL_DOWNLIST_PTR)) { CSR_WRITE_4(sc, XL_DOWNLIST_PTR, sc->xl_cdata.xl_tx_head->xl_phys); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); } } } static void xl_txeof_90xB(struct xl_softc *sc) { struct xl_chain *cur_tx = NULL; struct ifnet *ifp = sc->xl_ifp; int idx; XL_LOCK_ASSERT(sc); bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap, BUS_DMASYNC_POSTREAD); idx = sc->xl_cdata.xl_tx_cons; while (idx != sc->xl_cdata.xl_tx_prod) { cur_tx = &sc->xl_cdata.xl_tx_chain[idx]; if (!(le32toh(cur_tx->xl_ptr->xl_status) & XL_TXSTAT_DL_COMPLETE)) break; if (cur_tx->xl_mbuf != NULL) { bus_dmamap_sync(sc->xl_mtag, cur_tx->xl_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->xl_mtag, cur_tx->xl_map); m_freem(cur_tx->xl_mbuf); cur_tx->xl_mbuf = NULL; } if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); sc->xl_cdata.xl_tx_cnt--; XL_INC(idx, XL_TX_LIST_CNT); } if (sc->xl_cdata.xl_tx_cnt == 0) sc->xl_wdog_timer = 0; sc->xl_cdata.xl_tx_cons = idx; if (cur_tx != NULL) ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } /* * TX 'end of channel' interrupt handler. Actually, we should * only get a 'TX complete' interrupt if there's a transmit error, * so this is really TX error handler. */ static void xl_txeoc(struct xl_softc *sc) { u_int8_t txstat; XL_LOCK_ASSERT(sc); while ((txstat = CSR_READ_1(sc, XL_TX_STATUS))) { if (txstat & XL_TXSTATUS_UNDERRUN || txstat & XL_TXSTATUS_JABBER || txstat & XL_TXSTATUS_RECLAIM) { device_printf(sc->xl_dev, "transmission error: 0x%02x\n", txstat); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); xl_wait(sc); if (sc->xl_type == XL_TYPE_905B) { if (sc->xl_cdata.xl_tx_cnt) { int i; struct xl_chain *c; i = sc->xl_cdata.xl_tx_cons; c = &sc->xl_cdata.xl_tx_chain[i]; CSR_WRITE_4(sc, XL_DOWNLIST_PTR, c->xl_phys); CSR_WRITE_1(sc, XL_DOWN_POLL, 64); sc->xl_wdog_timer = 5; } } else { if (sc->xl_cdata.xl_tx_head != NULL) { CSR_WRITE_4(sc, XL_DOWNLIST_PTR, sc->xl_cdata.xl_tx_head->xl_phys); sc->xl_wdog_timer = 5; } } /* * Remember to set this for the * first generation 3c90X chips. */ CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8); if (txstat & XL_TXSTATUS_UNDERRUN && sc->xl_tx_thresh < XL_PACKET_SIZE) { sc->xl_tx_thresh += XL_MIN_FRAMELEN; device_printf(sc->xl_dev, "tx underrun, increasing tx start threshold to %d bytes\n", sc->xl_tx_thresh); } CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_SET_START|sc->xl_tx_thresh); if (sc->xl_type == XL_TYPE_905B) { CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4)); } CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); } else { CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); } /* * Write an arbitrary byte to the TX_STATUS register * to clear this interrupt/error and advance to the next. */ CSR_WRITE_1(sc, XL_TX_STATUS, 0x01); } } static void xl_intr(void *arg) { struct xl_softc *sc = arg; struct ifnet *ifp = sc->xl_ifp; u_int16_t status; XL_LOCK(sc); #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { XL_UNLOCK(sc); return; } #endif for (;;) { status = CSR_READ_2(sc, XL_STATUS); if ((status & XL_INTRS) == 0 || status == 0xFFFF) break; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|(status & XL_INTRS)); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) break; if (status & XL_STAT_UP_COMPLETE) { if (xl_rxeof(sc) == 0) { while (xl_rx_resync(sc)) xl_rxeof(sc); } } if (status & XL_STAT_DOWN_COMPLETE) { if (sc->xl_type == XL_TYPE_905B) xl_txeof_90xB(sc); else xl_txeof(sc); } if (status & XL_STAT_TX_COMPLETE) { if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); xl_txeoc(sc); } if (status & XL_STAT_ADFAIL) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; xl_init_locked(sc); break; } if (status & XL_STAT_STATSOFLOW) xl_stats_update(sc); } if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd) && ifp->if_drv_flags & IFF_DRV_RUNNING) { if (sc->xl_type == XL_TYPE_905B) xl_start_90xB_locked(ifp); else xl_start_locked(ifp); } XL_UNLOCK(sc); } #ifdef DEVICE_POLLING static int xl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct xl_softc *sc = ifp->if_softc; int rx_npkts = 0; XL_LOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) rx_npkts = xl_poll_locked(ifp, cmd, count); XL_UNLOCK(sc); return (rx_npkts); } static int xl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct xl_softc *sc = ifp->if_softc; int rx_npkts; XL_LOCK_ASSERT(sc); sc->rxcycles = count; rx_npkts = xl_rxeof(sc); if (sc->xl_type == XL_TYPE_905B) xl_txeof_90xB(sc); else xl_txeof(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { if (sc->xl_type == XL_TYPE_905B) xl_start_90xB_locked(ifp); else xl_start_locked(ifp); } if (cmd == POLL_AND_CHECK_STATUS) { u_int16_t status; status = CSR_READ_2(sc, XL_STATUS); if (status & XL_INTRS && status != 0xFFFF) { CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|(status & XL_INTRS)); if (status & XL_STAT_TX_COMPLETE) { if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); xl_txeoc(sc); } if (status & XL_STAT_ADFAIL) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; xl_init_locked(sc); } if (status & XL_STAT_STATSOFLOW) xl_stats_update(sc); } } return (rx_npkts); } #endif /* DEVICE_POLLING */ static void xl_tick(void *xsc) { struct xl_softc *sc = xsc; struct mii_data *mii; XL_LOCK_ASSERT(sc); if (sc->xl_miibus != NULL) { mii = device_get_softc(sc->xl_miibus); mii_tick(mii); } xl_stats_update(sc); if (xl_watchdog(sc) == EJUSTRETURN) return; callout_reset(&sc->xl_tick_callout, hz, xl_tick, sc); } static void xl_stats_update(struct xl_softc *sc) { struct ifnet *ifp = sc->xl_ifp; struct xl_stats xl_stats; u_int8_t *p; int i; XL_LOCK_ASSERT(sc); bzero((char *)&xl_stats, sizeof(struct xl_stats)); p = (u_int8_t *)&xl_stats; /* Read all the stats registers. */ XL_SEL_WIN(6); for (i = 0; i < 16; i++) *p++ = CSR_READ_1(sc, XL_W6_CARRIER_LOST + i); if_inc_counter(ifp, IFCOUNTER_IERRORS, xl_stats.xl_rx_overrun); if_inc_counter(ifp, IFCOUNTER_COLLISIONS, xl_stats.xl_tx_multi_collision + xl_stats.xl_tx_single_collision + xl_stats.xl_tx_late_collision); /* * Boomerang and cyclone chips have an extra stats counter * in window 4 (BadSSD). We have to read this too in order * to clear out all the stats registers and avoid a statsoflow * interrupt. */ XL_SEL_WIN(4); CSR_READ_1(sc, XL_W4_BADSSD); XL_SEL_WIN(7); } /* * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data * pointers to the fragment pointers. */ static int xl_encap(struct xl_softc *sc, struct xl_chain *c, struct mbuf **m_head) { struct mbuf *m_new; struct ifnet *ifp = sc->xl_ifp; int error, i, nseg, total_len; u_int32_t status; XL_LOCK_ASSERT(sc); error = bus_dmamap_load_mbuf_sg(sc->xl_mtag, c->xl_map, *m_head, sc->xl_cdata.xl_tx_segs, &nseg, BUS_DMA_NOWAIT); if (error && error != EFBIG) { if_printf(ifp, "can't map mbuf (error %d)\n", error); return (error); } /* * Handle special case: we used up all 63 fragments, * but we have more mbufs left in the chain. Copy the * data into an mbuf cluster. Note that we don't * bother clearing the values in the other fragment * pointers/counters; it wouldn't gain us anything, * and would waste cycles. */ if (error) { m_new = m_collapse(*m_head, M_NOWAIT, XL_MAXFRAGS); if (m_new == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } *m_head = m_new; error = bus_dmamap_load_mbuf_sg(sc->xl_mtag, c->xl_map, *m_head, sc->xl_cdata.xl_tx_segs, &nseg, BUS_DMA_NOWAIT); if (error) { m_freem(*m_head); *m_head = NULL; if_printf(ifp, "can't map mbuf (error %d)\n", error); return (error); } } KASSERT(nseg <= XL_MAXFRAGS, ("%s: too many DMA segments (%d)", __func__, nseg)); if (nseg == 0) { m_freem(*m_head); *m_head = NULL; return (EIO); } bus_dmamap_sync(sc->xl_mtag, c->xl_map, BUS_DMASYNC_PREWRITE); total_len = 0; for (i = 0; i < nseg; i++) { KASSERT(sc->xl_cdata.xl_tx_segs[i].ds_len <= MCLBYTES, ("segment size too large")); c->xl_ptr->xl_frag[i].xl_addr = htole32(sc->xl_cdata.xl_tx_segs[i].ds_addr); c->xl_ptr->xl_frag[i].xl_len = htole32(sc->xl_cdata.xl_tx_segs[i].ds_len); total_len += sc->xl_cdata.xl_tx_segs[i].ds_len; } c->xl_ptr->xl_frag[nseg - 1].xl_len |= htole32(XL_LAST_FRAG); if (sc->xl_type == XL_TYPE_905B) { status = XL_TXSTAT_RND_DEFEAT; #ifndef XL905B_TXCSUM_BROKEN if ((*m_head)->m_pkthdr.csum_flags) { if ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) status |= XL_TXSTAT_IPCKSUM; if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP) status |= XL_TXSTAT_TCPCKSUM; if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP) status |= XL_TXSTAT_UDPCKSUM; } #endif } else status = total_len; c->xl_ptr->xl_status = htole32(status); c->xl_ptr->xl_next = 0; c->xl_mbuf = *m_head; return (0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit lists. We also save a * copy of the pointers since the transmit list fragment pointers are * physical addresses. */ static void xl_start(struct ifnet *ifp) { struct xl_softc *sc = ifp->if_softc; XL_LOCK(sc); if (sc->xl_type == XL_TYPE_905B) xl_start_90xB_locked(ifp); else xl_start_locked(ifp); XL_UNLOCK(sc); } static void xl_start_locked(struct ifnet *ifp) { struct xl_softc *sc = ifp->if_softc; struct mbuf *m_head; struct xl_chain *prev = NULL, *cur_tx = NULL, *start_tx; struct xl_chain *prev_tx; int error; XL_LOCK_ASSERT(sc); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return; /* * Check for an available queue slot. If there are none, * punt. */ if (sc->xl_cdata.xl_tx_free == NULL) { xl_txeoc(sc); xl_txeof(sc); if (sc->xl_cdata.xl_tx_free == NULL) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; return; } } start_tx = sc->xl_cdata.xl_tx_free; for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->xl_cdata.xl_tx_free != NULL;) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* Pick a descriptor off the free list. */ prev_tx = cur_tx; cur_tx = sc->xl_cdata.xl_tx_free; /* Pack the data into the descriptor. */ error = xl_encap(sc, cur_tx, &m_head); if (error) { cur_tx = prev_tx; if (m_head == NULL) break; ifp->if_drv_flags |= IFF_DRV_OACTIVE; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); break; } sc->xl_cdata.xl_tx_free = cur_tx->xl_next; cur_tx->xl_next = NULL; /* Chain it together. */ if (prev != NULL) { prev->xl_next = cur_tx; prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys); } prev = cur_tx; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, cur_tx->xl_mbuf); } /* * If there are no packets queued, bail. */ if (cur_tx == NULL) return; /* * Place the request for the upload interrupt * in the last descriptor in the chain. This way, if * we're chaining several packets at once, we'll only * get an interrupt once for the whole chain rather than * once for each packet. */ cur_tx->xl_ptr->xl_status |= htole32(XL_TXSTAT_DL_INTR); /* * Queue the packets. If the TX channel is clear, update * the downlist pointer register. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL); xl_wait(sc); if (sc->xl_cdata.xl_tx_head != NULL) { sc->xl_cdata.xl_tx_tail->xl_next = start_tx; sc->xl_cdata.xl_tx_tail->xl_ptr->xl_next = htole32(start_tx->xl_phys); sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status &= htole32(~XL_TXSTAT_DL_INTR); sc->xl_cdata.xl_tx_tail = cur_tx; } else { sc->xl_cdata.xl_tx_head = start_tx; sc->xl_cdata.xl_tx_tail = cur_tx; } bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap, BUS_DMASYNC_PREWRITE); if (!CSR_READ_4(sc, XL_DOWNLIST_PTR)) CSR_WRITE_4(sc, XL_DOWNLIST_PTR, start_tx->xl_phys); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); XL_SEL_WIN(7); /* * Set a timeout in case the chip goes out to lunch. */ sc->xl_wdog_timer = 5; /* * XXX Under certain conditions, usually on slower machines * where interrupts may be dropped, it's possible for the * adapter to chew up all the buffers in the receive ring * and stall, without us being able to do anything about it. * To guard against this, we need to make a pass over the * RX queue to make sure there aren't any packets pending. * Doing it here means we can flush the receive ring at the * same time the chip is DMAing the transmit descriptors we * just gave it. * * 3Com goes to some lengths to emphasize the Parallel Tasking (tm) * nature of their chips in all their marketing literature; * we may as well take advantage of it. :) */ taskqueue_enqueue(taskqueue_swi, &sc->xl_task); } static void xl_start_90xB_locked(struct ifnet *ifp) { struct xl_softc *sc = ifp->if_softc; struct mbuf *m_head; struct xl_chain *prev = NULL, *cur_tx = NULL, *start_tx; struct xl_chain *prev_tx; int error, idx; XL_LOCK_ASSERT(sc); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return; idx = sc->xl_cdata.xl_tx_prod; start_tx = &sc->xl_cdata.xl_tx_chain[idx]; for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->xl_cdata.xl_tx_chain[idx].xl_mbuf == NULL;) { if ((XL_TX_LIST_CNT - sc->xl_cdata.xl_tx_cnt) < 3) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; prev_tx = cur_tx; cur_tx = &sc->xl_cdata.xl_tx_chain[idx]; /* Pack the data into the descriptor. */ error = xl_encap(sc, cur_tx, &m_head); if (error) { cur_tx = prev_tx; if (m_head == NULL) break; ifp->if_drv_flags |= IFF_DRV_OACTIVE; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); break; } /* Chain it together. */ if (prev != NULL) prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys); prev = cur_tx; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, cur_tx->xl_mbuf); XL_INC(idx, XL_TX_LIST_CNT); sc->xl_cdata.xl_tx_cnt++; } /* * If there are no packets queued, bail. */ if (cur_tx == NULL) return; /* * Place the request for the upload interrupt * in the last descriptor in the chain. This way, if * we're chaining several packets at once, we'll only * get an interrupt once for the whole chain rather than * once for each packet. */ cur_tx->xl_ptr->xl_status |= htole32(XL_TXSTAT_DL_INTR); /* Start transmission */ sc->xl_cdata.xl_tx_prod = idx; start_tx->xl_prev->xl_ptr->xl_next = htole32(start_tx->xl_phys); bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap, BUS_DMASYNC_PREWRITE); /* * Set a timeout in case the chip goes out to lunch. */ sc->xl_wdog_timer = 5; } static void xl_init(void *xsc) { struct xl_softc *sc = xsc; XL_LOCK(sc); xl_init_locked(sc); XL_UNLOCK(sc); } static void xl_init_locked(struct xl_softc *sc) { struct ifnet *ifp = sc->xl_ifp; int error, i; struct mii_data *mii = NULL; XL_LOCK_ASSERT(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) return; /* * Cancel pending I/O and free all RX/TX buffers. */ xl_stop(sc); /* Reset the chip to a known state. */ xl_reset(sc); if (sc->xl_miibus == NULL) { CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET); xl_wait(sc); } CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); xl_wait(sc); DELAY(10000); if (sc->xl_miibus != NULL) mii = device_get_softc(sc->xl_miibus); /* * Clear WOL status and disable all WOL feature as WOL * would interfere Rx operation under normal environments. */ if ((sc->xl_flags & XL_FLAG_WOL) != 0) { XL_SEL_WIN(7); CSR_READ_2(sc, XL_W7_BM_PME); CSR_WRITE_2(sc, XL_W7_BM_PME, 0); } /* Init our MAC address */ XL_SEL_WIN(2); for (i = 0; i < ETHER_ADDR_LEN; i++) { CSR_WRITE_1(sc, XL_W2_STATION_ADDR_LO + i, IF_LLADDR(sc->xl_ifp)[i]); } /* Clear the station mask. */ for (i = 0; i < 3; i++) CSR_WRITE_2(sc, XL_W2_STATION_MASK_LO + (i * 2), 0); #ifdef notdef /* Reset TX and RX. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET); xl_wait(sc); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); xl_wait(sc); #endif /* Init circular RX list. */ error = xl_list_rx_init(sc); if (error) { device_printf(sc->xl_dev, "initialization of the rx ring failed (%d)\n", error); xl_stop(sc); return; } /* Init TX descriptors. */ if (sc->xl_type == XL_TYPE_905B) error = xl_list_tx_init_90xB(sc); else error = xl_list_tx_init(sc); if (error) { device_printf(sc->xl_dev, "initialization of the tx ring failed (%d)\n", error); xl_stop(sc); return; } /* * Set the TX freethresh value. * Note that this has no effect on 3c905B "cyclone" * cards but is required for 3c900/3c905 "boomerang" * cards in order to enable the download engine. */ CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8); /* Set the TX start threshold for best performance. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_SET_START|sc->xl_tx_thresh); /* * If this is a 3c905B, also set the tx reclaim threshold. * This helps cut down on the number of tx reclaim errors * that could happen on a busy network. The chip multiplies * the register value by 16 to obtain the actual threshold * in bytes, so we divide by 16 when setting the value here. * The existing threshold value can be examined by reading * the register at offset 9 in window 5. */ if (sc->xl_type == XL_TYPE_905B) { CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4)); } /* Set RX filter bits. */ xl_rxfilter(sc); /* * Load the address of the RX list. We have to * stall the upload engine before we can manipulate * the uplist pointer register, then unstall it when * we're finished. We also have to wait for the * stall command to complete before proceeding. * Note that we have to do this after any RX resets * have completed since the uplist register is cleared * by a reset. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL); xl_wait(sc); CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->xl_ldata.xl_rx_dmaaddr); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL); xl_wait(sc); if (sc->xl_type == XL_TYPE_905B) { /* Set polling interval */ CSR_WRITE_1(sc, XL_DOWN_POLL, 64); /* Load the address of the TX list */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL); xl_wait(sc); CSR_WRITE_4(sc, XL_DOWNLIST_PTR, sc->xl_cdata.xl_tx_chain[0].xl_phys); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); xl_wait(sc); } /* * If the coax transceiver is on, make sure to enable * the DC-DC converter. */ XL_SEL_WIN(3); if (sc->xl_xcvr == XL_XCVR_COAX) CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START); else CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP); /* * increase packet size to allow reception of 802.1q or ISL packets. * For the 3c90x chip, set the 'allow large packets' bit in the MAC * control register. For 3c90xB/C chips, use the RX packet size * register. */ if (sc->xl_type == XL_TYPE_905B) CSR_WRITE_2(sc, XL_W3_MAXPKTSIZE, XL_PACKET_SIZE); else { u_int8_t macctl; macctl = CSR_READ_1(sc, XL_W3_MAC_CTRL); macctl |= XL_MACCTRL_ALLOW_LARGE_PACK; CSR_WRITE_1(sc, XL_W3_MAC_CTRL, macctl); } /* Clear out the stats counters. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE); xl_stats_update(sc); XL_SEL_WIN(4); CSR_WRITE_2(sc, XL_W4_NET_DIAG, XL_NETDIAG_UPPER_BYTES_ENABLE); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_ENABLE); /* * Enable interrupts. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|XL_INTRS); #ifdef DEVICE_POLLING /* Disable interrupts if we are polling. */ if (ifp->if_capenable & IFCAP_POLLING) CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0); else #endif CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS); if (sc->xl_flags & XL_FLAG_FUNCREG) bus_space_write_4(sc->xl_ftag, sc->xl_fhandle, 4, 0x8000); /* Set the RX early threshold */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_THRESH|(XL_PACKET_SIZE >>2)); CSR_WRITE_4(sc, XL_DMACTL, XL_DMACTL_UP_RX_EARLY); /* Enable receiver and transmitter. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE); xl_wait(sc); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE); xl_wait(sc); /* XXX Downcall to miibus. */ if (mii != NULL) mii_mediachg(mii); /* Select window 7 for normal operations. */ XL_SEL_WIN(7); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; sc->xl_wdog_timer = 0; callout_reset(&sc->xl_tick_callout, hz, xl_tick, sc); } /* * Set media options. */ static int xl_ifmedia_upd(struct ifnet *ifp) { struct xl_softc *sc = ifp->if_softc; struct ifmedia *ifm = NULL; struct mii_data *mii = NULL; XL_LOCK(sc); if (sc->xl_miibus != NULL) mii = device_get_softc(sc->xl_miibus); if (mii == NULL) ifm = &sc->ifmedia; else ifm = &mii->mii_media; switch (IFM_SUBTYPE(ifm->ifm_media)) { case IFM_100_FX: case IFM_10_FL: case IFM_10_2: case IFM_10_5: xl_setmode(sc, ifm->ifm_media); XL_UNLOCK(sc); return (0); } if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX || sc->xl_media & XL_MEDIAOPT_BT4) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; xl_init_locked(sc); } else { xl_setmode(sc, ifm->ifm_media); } XL_UNLOCK(sc); return (0); } /* * Report current media status. */ static void xl_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct xl_softc *sc = ifp->if_softc; u_int32_t icfg; u_int16_t status = 0; struct mii_data *mii = NULL; XL_LOCK(sc); if (sc->xl_miibus != NULL) mii = device_get_softc(sc->xl_miibus); XL_SEL_WIN(4); status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS); XL_SEL_WIN(3); icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG) & XL_ICFG_CONNECTOR_MASK; icfg >>= XL_ICFG_CONNECTOR_BITS; ifmr->ifm_active = IFM_ETHER; ifmr->ifm_status = IFM_AVALID; if ((status & XL_MEDIASTAT_CARRIER) == 0) ifmr->ifm_status |= IFM_ACTIVE; switch (icfg) { case XL_XCVR_10BT: ifmr->ifm_active = IFM_ETHER|IFM_10_T; if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX) ifmr->ifm_active |= IFM_FDX; else ifmr->ifm_active |= IFM_HDX; break; case XL_XCVR_AUI: if (sc->xl_type == XL_TYPE_905B && sc->xl_media == XL_MEDIAOPT_10FL) { ifmr->ifm_active = IFM_ETHER|IFM_10_FL; if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX) ifmr->ifm_active |= IFM_FDX; else ifmr->ifm_active |= IFM_HDX; } else ifmr->ifm_active = IFM_ETHER|IFM_10_5; break; case XL_XCVR_COAX: ifmr->ifm_active = IFM_ETHER|IFM_10_2; break; /* * XXX MII and BTX/AUTO should be separate cases. */ case XL_XCVR_100BTX: case XL_XCVR_AUTO: case XL_XCVR_MII: if (mii != NULL) { mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } break; case XL_XCVR_100BFX: ifmr->ifm_active = IFM_ETHER|IFM_100_FX; break; default: if_printf(ifp, "unknown XCVR type: %d\n", icfg); break; } XL_UNLOCK(sc); } static int xl_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct xl_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; int error = 0, mask; struct mii_data *mii = NULL; switch (command) { case SIOCSIFFLAGS: XL_LOCK(sc); if (ifp->if_flags & IFF_UP) { if (ifp->if_drv_flags & IFF_DRV_RUNNING && (ifp->if_flags ^ sc->xl_if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) xl_rxfilter(sc); else xl_init_locked(sc); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) xl_stop(sc); } sc->xl_if_flags = ifp->if_flags; XL_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: /* XXX Downcall from if_addmulti() possibly with locks held. */ XL_LOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) xl_rxfilter(sc); XL_UNLOCK(sc); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: if (sc->xl_miibus != NULL) mii = device_get_softc(sc->xl_miibus); if (mii == NULL) error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); else error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; case SIOCSIFCAP: mask = ifr->ifr_reqcap ^ ifp->if_capenable; #ifdef DEVICE_POLLING if ((mask & IFCAP_POLLING) != 0 && (ifp->if_capabilities & IFCAP_POLLING) != 0) { ifp->if_capenable ^= IFCAP_POLLING; if ((ifp->if_capenable & IFCAP_POLLING) != 0) { error = ether_poll_register(xl_poll, ifp); if (error) break; XL_LOCK(sc); /* Disable interrupts */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0); ifp->if_capenable |= IFCAP_POLLING; XL_UNLOCK(sc); } else { error = ether_poll_deregister(ifp); /* Enable interrupts. */ XL_LOCK(sc); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK | 0xFF); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB | XL_INTRS); if (sc->xl_flags & XL_FLAG_FUNCREG) bus_space_write_4(sc->xl_ftag, sc->xl_fhandle, 4, 0x8000); XL_UNLOCK(sc); } } #endif /* DEVICE_POLLING */ XL_LOCK(sc); if ((mask & IFCAP_TXCSUM) != 0 && (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { ifp->if_capenable ^= IFCAP_TXCSUM; if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) ifp->if_hwassist |= XL905B_CSUM_FEATURES; else ifp->if_hwassist &= ~XL905B_CSUM_FEATURES; } if ((mask & IFCAP_RXCSUM) != 0 && (ifp->if_capabilities & IFCAP_RXCSUM) != 0) ifp->if_capenable ^= IFCAP_RXCSUM; if ((mask & IFCAP_WOL_MAGIC) != 0 && (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0) ifp->if_capenable ^= IFCAP_WOL_MAGIC; XL_UNLOCK(sc); break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } static int xl_watchdog(struct xl_softc *sc) { struct ifnet *ifp = sc->xl_ifp; u_int16_t status = 0; int misintr; XL_LOCK_ASSERT(sc); if (sc->xl_wdog_timer == 0 || --sc->xl_wdog_timer != 0) return (0); xl_rxeof(sc); xl_txeoc(sc); misintr = 0; if (sc->xl_type == XL_TYPE_905B) { xl_txeof_90xB(sc); if (sc->xl_cdata.xl_tx_cnt == 0) misintr++; } else { xl_txeof(sc); if (sc->xl_cdata.xl_tx_head == NULL) misintr++; } if (misintr != 0) { device_printf(sc->xl_dev, "watchdog timeout (missed Tx interrupts) -- recovering\n"); return (0); } if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); XL_SEL_WIN(4); status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS); device_printf(sc->xl_dev, "watchdog timeout\n"); if (status & XL_MEDIASTAT_CARRIER) device_printf(sc->xl_dev, "no carrier - transceiver cable problem?\n"); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; xl_init_locked(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { if (sc->xl_type == XL_TYPE_905B) xl_start_90xB_locked(ifp); else xl_start_locked(ifp); } return (EJUSTRETURN); } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void xl_stop(struct xl_softc *sc) { int i; struct ifnet *ifp = sc->xl_ifp; XL_LOCK_ASSERT(sc); sc->xl_wdog_timer = 0; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISABLE); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISCARD); xl_wait(sc); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_DISABLE); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP); DELAY(800); #ifdef foo CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET); xl_wait(sc); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); xl_wait(sc); #endif CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|XL_STAT_INTLATCH); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|0); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0); if (sc->xl_flags & XL_FLAG_FUNCREG) bus_space_write_4(sc->xl_ftag, sc->xl_fhandle, 4, 0x8000); /* Stop the stats updater. */ callout_stop(&sc->xl_tick_callout); /* * Free data in the RX lists. */ for (i = 0; i < XL_RX_LIST_CNT; i++) { if (sc->xl_cdata.xl_rx_chain[i].xl_mbuf != NULL) { bus_dmamap_unload(sc->xl_mtag, sc->xl_cdata.xl_rx_chain[i].xl_map); bus_dmamap_destroy(sc->xl_mtag, sc->xl_cdata.xl_rx_chain[i].xl_map); m_freem(sc->xl_cdata.xl_rx_chain[i].xl_mbuf); sc->xl_cdata.xl_rx_chain[i].xl_mbuf = NULL; } } if (sc->xl_ldata.xl_rx_list != NULL) bzero(sc->xl_ldata.xl_rx_list, XL_RX_LIST_SZ); /* * Free the TX list buffers. */ for (i = 0; i < XL_TX_LIST_CNT; i++) { if (sc->xl_cdata.xl_tx_chain[i].xl_mbuf != NULL) { bus_dmamap_unload(sc->xl_mtag, sc->xl_cdata.xl_tx_chain[i].xl_map); bus_dmamap_destroy(sc->xl_mtag, sc->xl_cdata.xl_tx_chain[i].xl_map); m_freem(sc->xl_cdata.xl_tx_chain[i].xl_mbuf); sc->xl_cdata.xl_tx_chain[i].xl_mbuf = NULL; } } if (sc->xl_ldata.xl_tx_list != NULL) bzero(sc->xl_ldata.xl_tx_list, XL_TX_LIST_SZ); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static int xl_shutdown(device_t dev) { return (xl_suspend(dev)); } static int xl_suspend(device_t dev) { struct xl_softc *sc; sc = device_get_softc(dev); XL_LOCK(sc); xl_stop(sc); xl_setwol(sc); XL_UNLOCK(sc); return (0); } static int xl_resume(device_t dev) { struct xl_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->xl_ifp; XL_LOCK(sc); if (ifp->if_flags & IFF_UP) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; xl_init_locked(sc); } XL_UNLOCK(sc); return (0); } static void xl_setwol(struct xl_softc *sc) { struct ifnet *ifp; u_int16_t cfg, pmstat; if ((sc->xl_flags & XL_FLAG_WOL) == 0) return; ifp = sc->xl_ifp; XL_SEL_WIN(7); /* Clear any pending PME events. */ CSR_READ_2(sc, XL_W7_BM_PME); cfg = 0; if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) cfg |= XL_BM_PME_MAGIC; CSR_WRITE_2(sc, XL_W7_BM_PME, cfg); /* Enable RX. */ if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE); /* Request PME. */ pmstat = pci_read_config(sc->xl_dev, sc->xl_pmcap + PCIR_POWER_STATUS, 2); if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) pmstat |= PCIM_PSTAT_PMEENABLE; else pmstat &= ~PCIM_PSTAT_PMEENABLE; pci_write_config(sc->xl_dev, sc->xl_pmcap + PCIR_POWER_STATUS, pmstat, 2); } diff --git a/sys/net/iflib.c b/sys/net/iflib.c index 26c5b0974048..a9fc8090c48a 100644 --- a/sys/net/iflib.c +++ b/sys/net/iflib.c @@ -1,6841 +1,6838 @@ /*- * Copyright (c) 2014-2018, Matthew Macy * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Neither the name of Matthew Macy nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_inet6.h" #include "opt_acpi.h" #include "opt_sched.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ifdi_if.h" #ifdef PCI_IOV #include #endif #include /* * enable accounting of every mbuf as it comes in to and goes out of * iflib's software descriptor references */ #define MEMORY_LOGGING 0 /* * Enable mbuf vectors for compressing long mbuf chains */ /* * NB: * - Prefetching in tx cleaning should perhaps be a tunable. The distance ahead * we prefetch needs to be determined by the time spent in m_free vis a vis * the cost of a prefetch. This will of course vary based on the workload: * - NFLX's m_free path is dominated by vm-based M_EXT manipulation which * is quite expensive, thus suggesting very little prefetch. * - small packet forwarding which is just returning a single mbuf to * UMA will typically be very fast vis a vis the cost of a memory * access. */ /* * File organization: * - private structures * - iflib private utility functions * - ifnet functions * - vlan registry and other exported functions * - iflib public core functions * * */ MALLOC_DEFINE(M_IFLIB, "iflib", "ifnet library"); struct iflib_txq; typedef struct iflib_txq *iflib_txq_t; struct iflib_rxq; typedef struct iflib_rxq *iflib_rxq_t; struct iflib_fl; typedef struct iflib_fl *iflib_fl_t; struct iflib_ctx; static void iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid); static void iflib_timer(void *arg); typedef struct iflib_filter_info { driver_filter_t *ifi_filter; void *ifi_filter_arg; struct grouptask *ifi_task; void *ifi_ctx; } *iflib_filter_info_t; struct iflib_ctx { KOBJ_FIELDS; /* * Pointer to hardware driver's softc */ void *ifc_softc; device_t ifc_dev; if_t ifc_ifp; cpuset_t ifc_cpus; if_shared_ctx_t ifc_sctx; struct if_softc_ctx ifc_softc_ctx; struct sx ifc_ctx_sx; struct mtx ifc_state_mtx; iflib_txq_t ifc_txqs; iflib_rxq_t ifc_rxqs; uint32_t ifc_if_flags; uint32_t ifc_flags; uint32_t ifc_max_fl_buf_size; uint32_t ifc_rx_mbuf_sz; int ifc_link_state; int ifc_watchdog_events; struct cdev *ifc_led_dev; struct resource *ifc_msix_mem; struct if_irq ifc_legacy_irq; struct grouptask ifc_admin_task; struct grouptask ifc_vflr_task; struct iflib_filter_info ifc_filter_info; struct ifmedia ifc_media; struct ifmedia *ifc_mediap; struct sysctl_oid *ifc_sysctl_node; uint16_t ifc_sysctl_ntxqs; uint16_t ifc_sysctl_nrxqs; uint16_t ifc_sysctl_qs_eq_override; uint16_t ifc_sysctl_rx_budget; uint16_t ifc_sysctl_tx_abdicate; uint16_t ifc_sysctl_core_offset; #define CORE_OFFSET_UNSPECIFIED 0xffff uint8_t ifc_sysctl_separate_txrx; qidx_t ifc_sysctl_ntxds[8]; qidx_t ifc_sysctl_nrxds[8]; struct if_txrx ifc_txrx; #define isc_txd_encap ifc_txrx.ift_txd_encap #define isc_txd_flush ifc_txrx.ift_txd_flush #define isc_txd_credits_update ifc_txrx.ift_txd_credits_update #define isc_rxd_available ifc_txrx.ift_rxd_available #define isc_rxd_pkt_get ifc_txrx.ift_rxd_pkt_get #define isc_rxd_refill ifc_txrx.ift_rxd_refill #define isc_rxd_flush ifc_txrx.ift_rxd_flush #define isc_rxd_refill ifc_txrx.ift_rxd_refill #define isc_rxd_refill ifc_txrx.ift_rxd_refill #define isc_legacy_intr ifc_txrx.ift_legacy_intr eventhandler_tag ifc_vlan_attach_event; eventhandler_tag ifc_vlan_detach_event; struct ether_addr ifc_mac; }; void * iflib_get_softc(if_ctx_t ctx) { return (ctx->ifc_softc); } device_t iflib_get_dev(if_ctx_t ctx) { return (ctx->ifc_dev); } if_t iflib_get_ifp(if_ctx_t ctx) { return (ctx->ifc_ifp); } struct ifmedia * iflib_get_media(if_ctx_t ctx) { return (ctx->ifc_mediap); } uint32_t iflib_get_flags(if_ctx_t ctx) { return (ctx->ifc_flags); } void iflib_set_mac(if_ctx_t ctx, uint8_t mac[ETHER_ADDR_LEN]) { bcopy(mac, ctx->ifc_mac.octet, ETHER_ADDR_LEN); } if_softc_ctx_t iflib_get_softc_ctx(if_ctx_t ctx) { return (&ctx->ifc_softc_ctx); } if_shared_ctx_t iflib_get_sctx(if_ctx_t ctx) { return (ctx->ifc_sctx); } #define IP_ALIGNED(m) ((((uintptr_t)(m)->m_data) & 0x3) == 0x2) #define CACHE_PTR_INCREMENT (CACHE_LINE_SIZE/sizeof(void*)) #define CACHE_PTR_NEXT(ptr) ((void *)(((uintptr_t)(ptr)+CACHE_LINE_SIZE-1) & (CACHE_LINE_SIZE-1))) #define LINK_ACTIVE(ctx) ((ctx)->ifc_link_state == LINK_STATE_UP) #define CTX_IS_VF(ctx) ((ctx)->ifc_sctx->isc_flags & IFLIB_IS_VF) typedef struct iflib_sw_rx_desc_array { bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */ struct mbuf **ifsd_m; /* pkthdr mbufs */ caddr_t *ifsd_cl; /* direct cluster pointer for rx */ bus_addr_t *ifsd_ba; /* bus addr of cluster for rx */ } iflib_rxsd_array_t; typedef struct iflib_sw_tx_desc_array { bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */ bus_dmamap_t *ifsd_tso_map; /* bus_dma maps for TSO packet */ struct mbuf **ifsd_m; /* pkthdr mbufs */ } if_txsd_vec_t; /* magic number that should be high enough for any hardware */ #define IFLIB_MAX_TX_SEGS 128 #define IFLIB_RX_COPY_THRESH 128 #define IFLIB_MAX_RX_REFRESH 32 /* The minimum descriptors per second before we start coalescing */ #define IFLIB_MIN_DESC_SEC 16384 #define IFLIB_DEFAULT_TX_UPDATE_FREQ 16 #define IFLIB_QUEUE_IDLE 0 #define IFLIB_QUEUE_HUNG 1 #define IFLIB_QUEUE_WORKING 2 /* maximum number of txqs that can share an rx interrupt */ #define IFLIB_MAX_TX_SHARED_INTR 4 /* this should really scale with ring size - this is a fairly arbitrary value */ #define TX_BATCH_SIZE 32 #define IFLIB_RESTART_BUDGET 8 #define CSUM_OFFLOAD (CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP| \ CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \ CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP) struct iflib_txq { qidx_t ift_in_use; qidx_t ift_cidx; qidx_t ift_cidx_processed; qidx_t ift_pidx; uint8_t ift_gen; uint8_t ift_br_offset; uint16_t ift_npending; uint16_t ift_db_pending; uint16_t ift_rs_pending; /* implicit pad */ uint8_t ift_txd_size[8]; uint64_t ift_processed; uint64_t ift_cleaned; uint64_t ift_cleaned_prev; #if MEMORY_LOGGING uint64_t ift_enqueued; uint64_t ift_dequeued; #endif uint64_t ift_no_tx_dma_setup; uint64_t ift_no_desc_avail; uint64_t ift_mbuf_defrag_failed; uint64_t ift_mbuf_defrag; uint64_t ift_map_failed; uint64_t ift_txd_encap_efbig; uint64_t ift_pullups; uint64_t ift_last_timer_tick; struct mtx ift_mtx; struct mtx ift_db_mtx; /* constant values */ if_ctx_t ift_ctx; struct ifmp_ring *ift_br; struct grouptask ift_task; qidx_t ift_size; uint16_t ift_id; struct callout ift_timer; if_txsd_vec_t ift_sds; uint8_t ift_qstatus; uint8_t ift_closed; uint8_t ift_update_freq; struct iflib_filter_info ift_filter_info; bus_dma_tag_t ift_buf_tag; bus_dma_tag_t ift_tso_buf_tag; iflib_dma_info_t ift_ifdi; #define MTX_NAME_LEN 16 char ift_mtx_name[MTX_NAME_LEN]; bus_dma_segment_t ift_segs[IFLIB_MAX_TX_SEGS] __aligned(CACHE_LINE_SIZE); #ifdef IFLIB_DIAGNOSTICS uint64_t ift_cpu_exec_count[256]; #endif } __aligned(CACHE_LINE_SIZE); struct iflib_fl { qidx_t ifl_cidx; qidx_t ifl_pidx; qidx_t ifl_credits; uint8_t ifl_gen; uint8_t ifl_rxd_size; #if MEMORY_LOGGING uint64_t ifl_m_enqueued; uint64_t ifl_m_dequeued; uint64_t ifl_cl_enqueued; uint64_t ifl_cl_dequeued; #endif /* implicit pad */ bitstr_t *ifl_rx_bitmap; qidx_t ifl_fragidx; /* constant */ qidx_t ifl_size; uint16_t ifl_buf_size; uint16_t ifl_cltype; uma_zone_t ifl_zone; iflib_rxsd_array_t ifl_sds; iflib_rxq_t ifl_rxq; uint8_t ifl_id; bus_dma_tag_t ifl_buf_tag; iflib_dma_info_t ifl_ifdi; uint64_t ifl_bus_addrs[IFLIB_MAX_RX_REFRESH] __aligned(CACHE_LINE_SIZE); caddr_t ifl_vm_addrs[IFLIB_MAX_RX_REFRESH]; qidx_t ifl_rxd_idxs[IFLIB_MAX_RX_REFRESH]; } __aligned(CACHE_LINE_SIZE); static inline qidx_t get_inuse(int size, qidx_t cidx, qidx_t pidx, uint8_t gen) { qidx_t used; if (pidx > cidx) used = pidx - cidx; else if (pidx < cidx) used = size - cidx + pidx; else if (gen == 0 && pidx == cidx) used = 0; else if (gen == 1 && pidx == cidx) used = size; else panic("bad state"); return (used); } #define TXQ_AVAIL(txq) (txq->ift_size - get_inuse(txq->ift_size, txq->ift_cidx, txq->ift_pidx, txq->ift_gen)) #define IDXDIFF(head, tail, wrap) \ ((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head)) struct iflib_rxq { if_ctx_t ifr_ctx; iflib_fl_t ifr_fl; uint64_t ifr_rx_irq; struct pfil_head *pfil; /* * If there is a separate completion queue (IFLIB_HAS_RXCQ), this is * the command queue consumer index. Otherwise it's unused. */ qidx_t ifr_cq_cidx; uint16_t ifr_id; uint8_t ifr_nfl; uint8_t ifr_ntxqirq; uint8_t ifr_txqid[IFLIB_MAX_TX_SHARED_INTR]; uint8_t ifr_fl_offset; struct lro_ctrl ifr_lc; struct grouptask ifr_task; struct iflib_filter_info ifr_filter_info; iflib_dma_info_t ifr_ifdi; /* dynamically allocate if any drivers need a value substantially larger than this */ struct if_rxd_frag ifr_frags[IFLIB_MAX_RX_SEGS] __aligned(CACHE_LINE_SIZE); #ifdef IFLIB_DIAGNOSTICS uint64_t ifr_cpu_exec_count[256]; #endif } __aligned(CACHE_LINE_SIZE); typedef struct if_rxsd { caddr_t *ifsd_cl; iflib_fl_t ifsd_fl; qidx_t ifsd_cidx; } *if_rxsd_t; /* multiple of word size */ #ifdef __LP64__ #define PKT_INFO_SIZE 6 #define RXD_INFO_SIZE 5 #define PKT_TYPE uint64_t #else #define PKT_INFO_SIZE 11 #define RXD_INFO_SIZE 8 #define PKT_TYPE uint32_t #endif #define PKT_LOOP_BOUND ((PKT_INFO_SIZE/3)*3) #define RXD_LOOP_BOUND ((RXD_INFO_SIZE/4)*4) typedef struct if_pkt_info_pad { PKT_TYPE pkt_val[PKT_INFO_SIZE]; } *if_pkt_info_pad_t; typedef struct if_rxd_info_pad { PKT_TYPE rxd_val[RXD_INFO_SIZE]; } *if_rxd_info_pad_t; CTASSERT(sizeof(struct if_pkt_info_pad) == sizeof(struct if_pkt_info)); CTASSERT(sizeof(struct if_rxd_info_pad) == sizeof(struct if_rxd_info)); static inline void pkt_info_zero(if_pkt_info_t pi) { if_pkt_info_pad_t pi_pad; pi_pad = (if_pkt_info_pad_t)pi; pi_pad->pkt_val[0] = 0; pi_pad->pkt_val[1] = 0; pi_pad->pkt_val[2] = 0; pi_pad->pkt_val[3] = 0; pi_pad->pkt_val[4] = 0; pi_pad->pkt_val[5] = 0; #ifndef __LP64__ pi_pad->pkt_val[6] = 0; pi_pad->pkt_val[7] = 0; pi_pad->pkt_val[8] = 0; pi_pad->pkt_val[9] = 0; pi_pad->pkt_val[10] = 0; #endif } static device_method_t iflib_pseudo_methods[] = { DEVMETHOD(device_attach, noop_attach), DEVMETHOD(device_detach, iflib_pseudo_detach), DEVMETHOD_END }; driver_t iflib_pseudodriver = { "iflib_pseudo", iflib_pseudo_methods, sizeof(struct iflib_ctx), }; static inline void rxd_info_zero(if_rxd_info_t ri) { if_rxd_info_pad_t ri_pad; int i; ri_pad = (if_rxd_info_pad_t)ri; for (i = 0; i < RXD_LOOP_BOUND; i += 4) { ri_pad->rxd_val[i] = 0; ri_pad->rxd_val[i+1] = 0; ri_pad->rxd_val[i+2] = 0; ri_pad->rxd_val[i+3] = 0; } #ifdef __LP64__ ri_pad->rxd_val[RXD_INFO_SIZE-1] = 0; #endif } /* * Only allow a single packet to take up most 1/nth of the tx ring */ #define MAX_SINGLE_PACKET_FRACTION 12 #define IF_BAD_DMA (bus_addr_t)-1 #define CTX_ACTIVE(ctx) ((if_getdrvflags((ctx)->ifc_ifp) & IFF_DRV_RUNNING)) #define CTX_LOCK_INIT(_sc) sx_init(&(_sc)->ifc_ctx_sx, "iflib ctx lock") #define CTX_LOCK(ctx) sx_xlock(&(ctx)->ifc_ctx_sx) #define CTX_UNLOCK(ctx) sx_xunlock(&(ctx)->ifc_ctx_sx) #define CTX_LOCK_DESTROY(ctx) sx_destroy(&(ctx)->ifc_ctx_sx) #define STATE_LOCK_INIT(_sc, _name) mtx_init(&(_sc)->ifc_state_mtx, _name, "iflib state lock", MTX_DEF) #define STATE_LOCK(ctx) mtx_lock(&(ctx)->ifc_state_mtx) #define STATE_UNLOCK(ctx) mtx_unlock(&(ctx)->ifc_state_mtx) #define STATE_LOCK_DESTROY(ctx) mtx_destroy(&(ctx)->ifc_state_mtx) #define CALLOUT_LOCK(txq) mtx_lock(&txq->ift_mtx) #define CALLOUT_UNLOCK(txq) mtx_unlock(&txq->ift_mtx) void iflib_set_detach(if_ctx_t ctx) { STATE_LOCK(ctx); ctx->ifc_flags |= IFC_IN_DETACH; STATE_UNLOCK(ctx); } /* Our boot-time initialization hook */ static int iflib_module_event_handler(module_t, int, void *); static moduledata_t iflib_moduledata = { "iflib", iflib_module_event_handler, NULL }; DECLARE_MODULE(iflib, iflib_moduledata, SI_SUB_INIT_IF, SI_ORDER_ANY); MODULE_VERSION(iflib, 1); MODULE_DEPEND(iflib, pci, 1, 1, 1); MODULE_DEPEND(iflib, ether, 1, 1, 1); TASKQGROUP_DEFINE(if_io_tqg, mp_ncpus, 1); TASKQGROUP_DEFINE(if_config_tqg, 1, 1); #ifndef IFLIB_DEBUG_COUNTERS #ifdef INVARIANTS #define IFLIB_DEBUG_COUNTERS 1 #else #define IFLIB_DEBUG_COUNTERS 0 #endif /* !INVARIANTS */ #endif static SYSCTL_NODE(_net, OID_AUTO, iflib, CTLFLAG_RD, 0, "iflib driver parameters"); /* * XXX need to ensure that this can't accidentally cause the head to be moved backwards */ static int iflib_min_tx_latency = 0; SYSCTL_INT(_net_iflib, OID_AUTO, min_tx_latency, CTLFLAG_RW, &iflib_min_tx_latency, 0, "minimize transmit latency at the possible expense of throughput"); static int iflib_no_tx_batch = 0; SYSCTL_INT(_net_iflib, OID_AUTO, no_tx_batch, CTLFLAG_RW, &iflib_no_tx_batch, 0, "minimize transmit latency at the possible expense of throughput"); #if IFLIB_DEBUG_COUNTERS static int iflib_tx_seen; static int iflib_tx_sent; static int iflib_tx_encap; static int iflib_rx_allocs; static int iflib_fl_refills; static int iflib_fl_refills_large; static int iflib_tx_frees; SYSCTL_INT(_net_iflib, OID_AUTO, tx_seen, CTLFLAG_RD, &iflib_tx_seen, 0, "# TX mbufs seen"); SYSCTL_INT(_net_iflib, OID_AUTO, tx_sent, CTLFLAG_RD, &iflib_tx_sent, 0, "# TX mbufs sent"); SYSCTL_INT(_net_iflib, OID_AUTO, tx_encap, CTLFLAG_RD, &iflib_tx_encap, 0, "# TX mbufs encapped"); SYSCTL_INT(_net_iflib, OID_AUTO, tx_frees, CTLFLAG_RD, &iflib_tx_frees, 0, "# TX frees"); SYSCTL_INT(_net_iflib, OID_AUTO, rx_allocs, CTLFLAG_RD, &iflib_rx_allocs, 0, "# RX allocations"); SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills, CTLFLAG_RD, &iflib_fl_refills, 0, "# refills"); SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills_large, CTLFLAG_RD, &iflib_fl_refills_large, 0, "# large refills"); static int iflib_txq_drain_flushing; static int iflib_txq_drain_oactive; static int iflib_txq_drain_notready; SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_flushing, CTLFLAG_RD, &iflib_txq_drain_flushing, 0, "# drain flushes"); SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_oactive, CTLFLAG_RD, &iflib_txq_drain_oactive, 0, "# drain oactives"); SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_notready, CTLFLAG_RD, &iflib_txq_drain_notready, 0, "# drain notready"); static int iflib_encap_load_mbuf_fail; static int iflib_encap_pad_mbuf_fail; static int iflib_encap_txq_avail_fail; static int iflib_encap_txd_encap_fail; SYSCTL_INT(_net_iflib, OID_AUTO, encap_load_mbuf_fail, CTLFLAG_RD, &iflib_encap_load_mbuf_fail, 0, "# busdma load failures"); SYSCTL_INT(_net_iflib, OID_AUTO, encap_pad_mbuf_fail, CTLFLAG_RD, &iflib_encap_pad_mbuf_fail, 0, "# runt frame pad failures"); SYSCTL_INT(_net_iflib, OID_AUTO, encap_txq_avail_fail, CTLFLAG_RD, &iflib_encap_txq_avail_fail, 0, "# txq avail failures"); SYSCTL_INT(_net_iflib, OID_AUTO, encap_txd_encap_fail, CTLFLAG_RD, &iflib_encap_txd_encap_fail, 0, "# driver encap failures"); static int iflib_task_fn_rxs; static int iflib_rx_intr_enables; static int iflib_fast_intrs; static int iflib_rx_unavail; static int iflib_rx_ctx_inactive; static int iflib_rx_if_input; static int iflib_rxd_flush; static int iflib_verbose_debug; SYSCTL_INT(_net_iflib, OID_AUTO, task_fn_rx, CTLFLAG_RD, &iflib_task_fn_rxs, 0, "# task_fn_rx calls"); SYSCTL_INT(_net_iflib, OID_AUTO, rx_intr_enables, CTLFLAG_RD, &iflib_rx_intr_enables, 0, "# RX intr enables"); SYSCTL_INT(_net_iflib, OID_AUTO, fast_intrs, CTLFLAG_RD, &iflib_fast_intrs, 0, "# fast_intr calls"); SYSCTL_INT(_net_iflib, OID_AUTO, rx_unavail, CTLFLAG_RD, &iflib_rx_unavail, 0, "# times rxeof called with no available data"); SYSCTL_INT(_net_iflib, OID_AUTO, rx_ctx_inactive, CTLFLAG_RD, &iflib_rx_ctx_inactive, 0, "# times rxeof called with inactive context"); SYSCTL_INT(_net_iflib, OID_AUTO, rx_if_input, CTLFLAG_RD, &iflib_rx_if_input, 0, "# times rxeof called if_input"); SYSCTL_INT(_net_iflib, OID_AUTO, rxd_flush, CTLFLAG_RD, &iflib_rxd_flush, 0, "# times rxd_flush called"); SYSCTL_INT(_net_iflib, OID_AUTO, verbose_debug, CTLFLAG_RW, &iflib_verbose_debug, 0, "enable verbose debugging"); #define DBG_COUNTER_INC(name) atomic_add_int(&(iflib_ ## name), 1) static void iflib_debug_reset(void) { iflib_tx_seen = iflib_tx_sent = iflib_tx_encap = iflib_rx_allocs = iflib_fl_refills = iflib_fl_refills_large = iflib_tx_frees = iflib_txq_drain_flushing = iflib_txq_drain_oactive = iflib_txq_drain_notready = iflib_encap_load_mbuf_fail = iflib_encap_pad_mbuf_fail = iflib_encap_txq_avail_fail = iflib_encap_txd_encap_fail = iflib_task_fn_rxs = iflib_rx_intr_enables = iflib_fast_intrs = iflib_rx_unavail = iflib_rx_ctx_inactive = iflib_rx_if_input = iflib_rxd_flush = 0; } #else #define DBG_COUNTER_INC(name) static void iflib_debug_reset(void) {} #endif #define IFLIB_DEBUG 0 static void iflib_tx_structures_free(if_ctx_t ctx); static void iflib_rx_structures_free(if_ctx_t ctx); static int iflib_queues_alloc(if_ctx_t ctx); static int iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq); static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget); static int iflib_qset_structures_setup(if_ctx_t ctx); static int iflib_msix_init(if_ctx_t ctx); static int iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filterarg, int *rid, const char *str); static void iflib_txq_check_drain(iflib_txq_t txq, int budget); static uint32_t iflib_txq_can_drain(struct ifmp_ring *); #ifdef ALTQ static void iflib_altq_if_start(if_t ifp); static int iflib_altq_if_transmit(if_t ifp, struct mbuf *m); #endif static int iflib_register(if_ctx_t); static void iflib_deregister(if_ctx_t); static void iflib_unregister_vlan_handlers(if_ctx_t ctx); static void iflib_init_locked(if_ctx_t ctx); static void iflib_add_device_sysctl_pre(if_ctx_t ctx); static void iflib_add_device_sysctl_post(if_ctx_t ctx); static void iflib_ifmp_purge(iflib_txq_t txq); static void _iflib_pre_assert(if_softc_ctx_t scctx); static void iflib_if_init_locked(if_ctx_t ctx); static void iflib_free_intr_mem(if_ctx_t ctx); #ifndef __NO_STRICT_ALIGNMENT static struct mbuf * iflib_fixup_rx(struct mbuf *m); #endif static SLIST_HEAD(cpu_offset_list, cpu_offset) cpu_offsets = SLIST_HEAD_INITIALIZER(cpu_offsets); struct cpu_offset { SLIST_ENTRY(cpu_offset) entries; cpuset_t set; unsigned int refcount; uint16_t offset; }; static struct mtx cpu_offset_mtx; MTX_SYSINIT(iflib_cpu_offset, &cpu_offset_mtx, "iflib_cpu_offset lock", MTX_DEF); DEBUGNET_DEFINE(iflib); #ifdef DEV_NETMAP #include #include #include MODULE_DEPEND(iflib, netmap, 1, 1, 1); static int netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init); /* * device-specific sysctl variables: * * iflib_crcstrip: 0: keep CRC in rx frames (default), 1: strip it. * During regular operations the CRC is stripped, but on some * hardware reception of frames not multiple of 64 is slower, * so using crcstrip=0 helps in benchmarks. * * iflib_rx_miss, iflib_rx_miss_bufs: * count packets that might be missed due to lost interrupts. */ SYSCTL_DECL(_dev_netmap); /* * The xl driver by default strips CRCs and we do not override it. */ int iflib_crcstrip = 1; SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_crcstrip, CTLFLAG_RW, &iflib_crcstrip, 1, "strip CRC on RX frames"); int iflib_rx_miss, iflib_rx_miss_bufs; SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss, CTLFLAG_RW, &iflib_rx_miss, 0, "potentially missed RX intr"); SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss_bufs, CTLFLAG_RW, &iflib_rx_miss_bufs, 0, "potentially missed RX intr bufs"); /* * Register/unregister. We are already under netmap lock. * Only called on the first register or the last unregister. */ static int iflib_netmap_register(struct netmap_adapter *na, int onoff) { if_t ifp = na->ifp; if_ctx_t ctx = ifp->if_softc; int status; CTX_LOCK(ctx); IFDI_INTR_DISABLE(ctx); /* Tell the stack that the interface is no longer active */ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); if (!CTX_IS_VF(ctx)) IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); /* enable or disable flags and callbacks in na and ifp */ if (onoff) { nm_set_native_flags(na); } else { nm_clear_native_flags(na); } iflib_stop(ctx); iflib_init_locked(ctx); IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); // XXX why twice ? status = ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1; if (status) nm_clear_native_flags(na); CTX_UNLOCK(ctx); return (status); } static int netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init) { struct netmap_adapter *na = kring->na; u_int const lim = kring->nkr_num_slots - 1; u_int head = kring->rhead; struct netmap_ring *ring = kring->ring; bus_dmamap_t *map; struct if_rxd_update iru; if_ctx_t ctx = rxq->ifr_ctx; iflib_fl_t fl = &rxq->ifr_fl[0]; uint32_t refill_pidx, nic_i; #if IFLIB_DEBUG_COUNTERS int rf_count = 0; #endif if (nm_i == head && __predict_true(!init)) return 0; iru_init(&iru, rxq, 0 /* flid */); map = fl->ifl_sds.ifsd_map; refill_pidx = netmap_idx_k2n(kring, nm_i); /* * IMPORTANT: we must leave one free slot in the ring, * so move head back by one unit */ head = nm_prev(head, lim); nic_i = UINT_MAX; DBG_COUNTER_INC(fl_refills); while (nm_i != head) { #if IFLIB_DEBUG_COUNTERS if (++rf_count == 9) DBG_COUNTER_INC(fl_refills_large); #endif for (int tmp_pidx = 0; tmp_pidx < IFLIB_MAX_RX_REFRESH && nm_i != head; tmp_pidx++) { struct netmap_slot *slot = &ring->slot[nm_i]; void *addr = PNMB(na, slot, &fl->ifl_bus_addrs[tmp_pidx]); uint32_t nic_i_dma = refill_pidx; nic_i = netmap_idx_k2n(kring, nm_i); MPASS(tmp_pidx < IFLIB_MAX_RX_REFRESH); if (addr == NETMAP_BUF_BASE(na)) /* bad buf */ return netmap_ring_reinit(kring); fl->ifl_vm_addrs[tmp_pidx] = addr; if (__predict_false(init)) { netmap_load_map(na, fl->ifl_buf_tag, map[nic_i], addr); } else if (slot->flags & NS_BUF_CHANGED) { /* buffer has changed, reload map */ netmap_reload_map(na, fl->ifl_buf_tag, map[nic_i], addr); } slot->flags &= ~NS_BUF_CHANGED; nm_i = nm_next(nm_i, lim); fl->ifl_rxd_idxs[tmp_pidx] = nic_i = nm_next(nic_i, lim); if (nm_i != head && tmp_pidx < IFLIB_MAX_RX_REFRESH-1) continue; iru.iru_pidx = refill_pidx; iru.iru_count = tmp_pidx+1; ctx->isc_rxd_refill(ctx->ifc_softc, &iru); refill_pidx = nic_i; for (int n = 0; n < iru.iru_count; n++) { bus_dmamap_sync(fl->ifl_buf_tag, map[nic_i_dma], BUS_DMASYNC_PREREAD); /* XXX - change this to not use the netmap func*/ nic_i_dma = nm_next(nic_i_dma, lim); } } } kring->nr_hwcur = head; bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); if (__predict_true(nic_i != UINT_MAX)) { ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id, nic_i); DBG_COUNTER_INC(rxd_flush); } return (0); } /* * Reconcile kernel and user view of the transmit ring. * * All information is in the kring. * Userspace wants to send packets up to the one before kring->rhead, * kernel knows kring->nr_hwcur is the first unsent packet. * * Here we push packets out (as many as possible), and possibly * reclaim buffers from previously completed transmission. * * The caller (netmap) guarantees that there is only one instance * running at any time. Any interference with other driver * methods should be handled by the individual drivers. */ static int iflib_netmap_txsync(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; if_t ifp = na->ifp; struct netmap_ring *ring = kring->ring; u_int nm_i; /* index into the netmap kring */ u_int nic_i; /* index into the NIC ring */ u_int n; u_int const lim = kring->nkr_num_slots - 1; u_int const head = kring->rhead; struct if_pkt_info pi; /* * interrupts on every tx packet are expensive so request * them every half ring, or where NS_REPORT is set */ u_int report_frequency = kring->nkr_num_slots >> 1; /* device-specific */ if_ctx_t ctx = ifp->if_softc; iflib_txq_t txq = &ctx->ifc_txqs[kring->ring_id]; bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* * First part: process new packets to send. * nm_i is the current index in the netmap kring, * nic_i is the corresponding index in the NIC ring. * * If we have packets to send (nm_i != head) * iterate over the netmap ring, fetch length and update * the corresponding slot in the NIC ring. Some drivers also * need to update the buffer's physical address in the NIC slot * even NS_BUF_CHANGED is not set (PNMB computes the addresses). * * The netmap_reload_map() calls is especially expensive, * even when (as in this case) the tag is 0, so do only * when the buffer has actually changed. * * If possible do not set the report/intr bit on all slots, * but only a few times per ring or when NS_REPORT is set. * * Finally, on 10G and faster drivers, it might be useful * to prefetch the next slot and txr entry. */ nm_i = kring->nr_hwcur; if (nm_i != head) { /* we have new packets to send */ pkt_info_zero(&pi); pi.ipi_segs = txq->ift_segs; pi.ipi_qsidx = kring->ring_id; nic_i = netmap_idx_k2n(kring, nm_i); __builtin_prefetch(&ring->slot[nm_i]); __builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i]); __builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i]); for (n = 0; nm_i != head; n++) { struct netmap_slot *slot = &ring->slot[nm_i]; u_int len = slot->len; uint64_t paddr; void *addr = PNMB(na, slot, &paddr); int flags = (slot->flags & NS_REPORT || nic_i == 0 || nic_i == report_frequency) ? IPI_TX_INTR : 0; /* device-specific */ pi.ipi_len = len; pi.ipi_segs[0].ds_addr = paddr; pi.ipi_segs[0].ds_len = len; pi.ipi_nsegs = 1; pi.ipi_ndescs = 0; pi.ipi_pidx = nic_i; pi.ipi_flags = flags; /* Fill the slot in the NIC ring. */ ctx->isc_txd_encap(ctx->ifc_softc, &pi); DBG_COUNTER_INC(tx_encap); /* prefetch for next round */ __builtin_prefetch(&ring->slot[nm_i + 1]); __builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i + 1]); __builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i + 1]); NM_CHECK_ADDR_LEN(na, addr, len); if (slot->flags & NS_BUF_CHANGED) { /* buffer has changed, reload map */ netmap_reload_map(na, txq->ift_buf_tag, txq->ift_sds.ifsd_map[nic_i], addr); } /* make sure changes to the buffer are synced */ bus_dmamap_sync(txq->ift_buf_tag, txq->ift_sds.ifsd_map[nic_i], BUS_DMASYNC_PREWRITE); slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); nm_i = nm_next(nm_i, lim); nic_i = nm_next(nic_i, lim); } kring->nr_hwcur = nm_i; /* synchronize the NIC ring */ bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* (re)start the tx unit up to slot nic_i (excluded) */ ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, nic_i); } /* * Second part: reclaim buffers for completed transmissions. * * If there are unclaimed buffers, attempt to reclaim them. * If none are reclaimed, and TX IRQs are not in use, do an initial * minimal delay, then trigger the tx handler which will spin in the * group task queue. */ if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) { if (iflib_tx_credits_update(ctx, txq)) { /* some tx completed, increment avail */ nic_i = txq->ift_cidx_processed; kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim); } } if (!(ctx->ifc_flags & IFC_NETMAP_TX_IRQ)) if (kring->nr_hwtail != nm_prev(kring->nr_hwcur, lim)) { callout_reset_on(&txq->ift_timer, hz < 2000 ? 1 : hz / 1000, iflib_timer, txq, txq->ift_timer.c_cpu); } return (0); } /* * Reconcile kernel and user view of the receive ring. * Same as for the txsync, this routine must be efficient. * The caller guarantees a single invocations, but races against * the rest of the driver should be handled here. * * On call, kring->rhead is the first packet that userspace wants * to keep, and kring->rcur is the wakeup point. * The kernel has previously reported packets up to kring->rtail. * * If (flags & NAF_FORCE_READ) also check for incoming packets irrespective * of whether or not we received an interrupt. */ static int iflib_netmap_rxsync(struct netmap_kring *kring, int flags) { struct netmap_adapter *na = kring->na; struct netmap_ring *ring = kring->ring; if_t ifp = na->ifp; iflib_fl_t fl; uint32_t nm_i; /* index into the netmap ring */ uint32_t nic_i; /* index into the NIC ring */ u_int i, n; u_int const lim = kring->nkr_num_slots - 1; u_int const head = kring->rhead; int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; struct if_rxd_info ri; if_ctx_t ctx = ifp->if_softc; iflib_rxq_t rxq = &ctx->ifc_rxqs[kring->ring_id]; if (head > lim) return netmap_ring_reinit(kring); /* * XXX netmap_fl_refill() only ever (re)fills free list 0 so far. */ for (i = 0, fl = rxq->ifr_fl; i < rxq->ifr_nfl; i++, fl++) { bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); } /* * First part: import newly received packets. * * nm_i is the index of the next free slot in the netmap ring, * nic_i is the index of the next received packet in the NIC ring, * and they may differ in case if_init() has been called while * in netmap mode. For the receive ring we have * * nic_i = rxr->next_check; * nm_i = kring->nr_hwtail (previous) * and * nm_i == (nic_i + kring->nkr_hwofs) % ring_size * * rxr->next_check is set to 0 on a ring reinit */ if (netmap_no_pendintr || force_update) { int crclen = iflib_crcstrip ? 0 : 4; int error, avail; for (i = 0; i < rxq->ifr_nfl; i++) { fl = &rxq->ifr_fl[i]; nic_i = fl->ifl_cidx; nm_i = netmap_idx_n2k(kring, nic_i); avail = ctx->isc_rxd_available(ctx->ifc_softc, rxq->ifr_id, nic_i, USHRT_MAX); for (n = 0; avail > 0; n++, avail--) { rxd_info_zero(&ri); ri.iri_frags = rxq->ifr_frags; ri.iri_qsidx = kring->ring_id; ri.iri_ifp = ctx->ifc_ifp; ri.iri_cidx = nic_i; error = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri); ring->slot[nm_i].len = error ? 0 : ri.iri_len - crclen; ring->slot[nm_i].flags = 0; bus_dmamap_sync(fl->ifl_buf_tag, fl->ifl_sds.ifsd_map[nic_i], BUS_DMASYNC_POSTREAD); nm_i = nm_next(nm_i, lim); nic_i = nm_next(nic_i, lim); } if (n) { /* update the state variables */ if (netmap_no_pendintr && !force_update) { /* diagnostics */ iflib_rx_miss ++; iflib_rx_miss_bufs += n; } fl->ifl_cidx = nic_i; kring->nr_hwtail = nm_i; } kring->nr_kflags &= ~NKR_PENDINTR; } } /* * Second part: skip past packets that userspace has released. * (kring->nr_hwcur to head excluded), * and make the buffers available for reception. * As usual nm_i is the index in the netmap ring, * nic_i is the index in the NIC ring, and * nm_i == (nic_i + kring->nkr_hwofs) % ring_size */ /* XXX not sure how this will work with multiple free lists */ nm_i = kring->nr_hwcur; return (netmap_fl_refill(rxq, kring, nm_i, false)); } static void iflib_netmap_intr(struct netmap_adapter *na, int onoff) { if_ctx_t ctx = na->ifp->if_softc; CTX_LOCK(ctx); if (onoff) { IFDI_INTR_ENABLE(ctx); } else { IFDI_INTR_DISABLE(ctx); } CTX_UNLOCK(ctx); } static int iflib_netmap_attach(if_ctx_t ctx) { struct netmap_adapter na; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; bzero(&na, sizeof(na)); na.ifp = ctx->ifc_ifp; na.na_flags = NAF_BDG_MAYSLEEP; MPASS(ctx->ifc_softc_ctx.isc_ntxqsets); MPASS(ctx->ifc_softc_ctx.isc_nrxqsets); na.num_tx_desc = scctx->isc_ntxd[0]; na.num_rx_desc = scctx->isc_nrxd[0]; na.nm_txsync = iflib_netmap_txsync; na.nm_rxsync = iflib_netmap_rxsync; na.nm_register = iflib_netmap_register; na.nm_intr = iflib_netmap_intr; na.num_tx_rings = ctx->ifc_softc_ctx.isc_ntxqsets; na.num_rx_rings = ctx->ifc_softc_ctx.isc_nrxqsets; return (netmap_attach(&na)); } static void iflib_netmap_txq_init(if_ctx_t ctx, iflib_txq_t txq) { struct netmap_adapter *na = NA(ctx->ifc_ifp); struct netmap_slot *slot; slot = netmap_reset(na, NR_TX, txq->ift_id, 0); if (slot == NULL) return; for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxd[0]; i++) { /* * In netmap mode, set the map for the packet buffer. * NOTE: Some drivers (not this one) also need to set * the physical buffer address in the NIC ring. * netmap_idx_n2k() maps a nic index, i, into the corresponding * netmap slot index, si */ int si = netmap_idx_n2k(na->tx_rings[txq->ift_id], i); netmap_load_map(na, txq->ift_buf_tag, txq->ift_sds.ifsd_map[i], NMB(na, slot + si)); } } static void iflib_netmap_rxq_init(if_ctx_t ctx, iflib_rxq_t rxq) { struct netmap_adapter *na = NA(ctx->ifc_ifp); struct netmap_kring *kring = na->rx_rings[rxq->ifr_id]; struct netmap_slot *slot; uint32_t nm_i; slot = netmap_reset(na, NR_RX, rxq->ifr_id, 0); if (slot == NULL) return; nm_i = netmap_idx_n2k(kring, 0); netmap_fl_refill(rxq, kring, nm_i, true); } static void iflib_netmap_timer_adjust(if_ctx_t ctx, iflib_txq_t txq, uint32_t *reset_on) { struct netmap_kring *kring; uint16_t txqid; txqid = txq->ift_id; kring = NA(ctx->ifc_ifp)->tx_rings[txqid]; if (kring->nr_hwcur != nm_next(kring->nr_hwtail, kring->nkr_num_slots - 1)) { bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, BUS_DMASYNC_POSTREAD); if (ctx->isc_txd_credits_update(ctx->ifc_softc, txqid, false)) netmap_tx_irq(ctx->ifc_ifp, txqid); if (!(ctx->ifc_flags & IFC_NETMAP_TX_IRQ)) { if (hz < 2000) *reset_on = 1; else *reset_on = hz / 1000; } } } #define iflib_netmap_detach(ifp) netmap_detach(ifp) #else #define iflib_netmap_txq_init(ctx, txq) #define iflib_netmap_rxq_init(ctx, rxq) #define iflib_netmap_detach(ifp) #define iflib_netmap_attach(ctx) (0) #define netmap_rx_irq(ifp, qid, budget) (0) #define netmap_tx_irq(ifp, qid) do {} while (0) #define iflib_netmap_timer_adjust(ctx, txq, reset_on) #endif #if defined(__i386__) || defined(__amd64__) static __inline void prefetch(void *x) { __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); } static __inline void prefetch2cachelines(void *x) { __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); #if (CACHE_LINE_SIZE < 128) __asm volatile("prefetcht0 %0" :: "m" (*(((unsigned long *)x)+CACHE_LINE_SIZE/(sizeof(unsigned long))))); #endif } #else #define prefetch(x) #define prefetch2cachelines(x) #endif static void iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid) { iflib_fl_t fl; fl = &rxq->ifr_fl[flid]; iru->iru_paddrs = fl->ifl_bus_addrs; iru->iru_vaddrs = &fl->ifl_vm_addrs[0]; iru->iru_idxs = fl->ifl_rxd_idxs; iru->iru_qsidx = rxq->ifr_id; iru->iru_buf_size = fl->ifl_buf_size; iru->iru_flidx = fl->ifl_id; } static void _iflib_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err) { if (err) return; *(bus_addr_t *) arg = segs[0].ds_addr; } int iflib_dma_alloc_align(if_ctx_t ctx, int size, int align, iflib_dma_info_t dma, int mapflags) { int err; device_t dev = ctx->ifc_dev; err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ align, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ size, /* maxsize */ 1, /* nsegments */ size, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &dma->idi_tag); if (err) { device_printf(dev, "%s: bus_dma_tag_create failed: %d\n", __func__, err); goto fail_0; } err = bus_dmamem_alloc(dma->idi_tag, (void**) &dma->idi_vaddr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->idi_map); if (err) { device_printf(dev, "%s: bus_dmamem_alloc(%ju) failed: %d\n", __func__, (uintmax_t)size, err); goto fail_1; } dma->idi_paddr = IF_BAD_DMA; err = bus_dmamap_load(dma->idi_tag, dma->idi_map, dma->idi_vaddr, size, _iflib_dmamap_cb, &dma->idi_paddr, mapflags | BUS_DMA_NOWAIT); if (err || dma->idi_paddr == IF_BAD_DMA) { device_printf(dev, "%s: bus_dmamap_load failed: %d\n", __func__, err); goto fail_2; } dma->idi_size = size; return (0); fail_2: bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map); fail_1: bus_dma_tag_destroy(dma->idi_tag); fail_0: dma->idi_tag = NULL; return (err); } int iflib_dma_alloc(if_ctx_t ctx, int size, iflib_dma_info_t dma, int mapflags) { if_shared_ctx_t sctx = ctx->ifc_sctx; KASSERT(sctx->isc_q_align != 0, ("alignment value not initialized")); return (iflib_dma_alloc_align(ctx, size, sctx->isc_q_align, dma, mapflags)); } int iflib_dma_alloc_multi(if_ctx_t ctx, int *sizes, iflib_dma_info_t *dmalist, int mapflags, int count) { int i, err; iflib_dma_info_t *dmaiter; dmaiter = dmalist; for (i = 0; i < count; i++, dmaiter++) { if ((err = iflib_dma_alloc(ctx, sizes[i], *dmaiter, mapflags)) != 0) break; } if (err) iflib_dma_free_multi(dmalist, i); return (err); } void iflib_dma_free(iflib_dma_info_t dma) { if (dma->idi_tag == NULL) return; if (dma->idi_paddr != IF_BAD_DMA) { bus_dmamap_sync(dma->idi_tag, dma->idi_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(dma->idi_tag, dma->idi_map); dma->idi_paddr = IF_BAD_DMA; } if (dma->idi_vaddr != NULL) { bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map); dma->idi_vaddr = NULL; } bus_dma_tag_destroy(dma->idi_tag); dma->idi_tag = NULL; } void iflib_dma_free_multi(iflib_dma_info_t *dmalist, int count) { int i; iflib_dma_info_t *dmaiter = dmalist; for (i = 0; i < count; i++, dmaiter++) iflib_dma_free(*dmaiter); } #ifdef EARLY_AP_STARTUP static const int iflib_started = 1; #else /* * We used to abuse the smp_started flag to decide if the queues have been * fully initialized (by late taskqgroup_adjust() calls in a SYSINIT()). * That gave bad races, since the SYSINIT() runs strictly after smp_started * is set. Run a SYSINIT() strictly after that to just set a usable * completion flag. */ static int iflib_started; static void iflib_record_started(void *arg) { iflib_started = 1; } SYSINIT(iflib_record_started, SI_SUB_SMP + 1, SI_ORDER_FIRST, iflib_record_started, NULL); #endif static int iflib_fast_intr(void *arg) { iflib_filter_info_t info = arg; struct grouptask *gtask = info->ifi_task; int result; if (!iflib_started) return (FILTER_STRAY); DBG_COUNTER_INC(fast_intrs); if (info->ifi_filter != NULL) { result = info->ifi_filter(info->ifi_filter_arg); if ((result & FILTER_SCHEDULE_THREAD) == 0) return (result); } GROUPTASK_ENQUEUE(gtask); return (FILTER_HANDLED); } static int iflib_fast_intr_rxtx(void *arg) { iflib_filter_info_t info = arg; struct grouptask *gtask = info->ifi_task; if_ctx_t ctx; iflib_rxq_t rxq = (iflib_rxq_t)info->ifi_ctx; iflib_txq_t txq; void *sc; int i, cidx, result; qidx_t txqid; bool intr_enable, intr_legacy; if (!iflib_started) return (FILTER_STRAY); DBG_COUNTER_INC(fast_intrs); if (info->ifi_filter != NULL) { result = info->ifi_filter(info->ifi_filter_arg); if ((result & FILTER_SCHEDULE_THREAD) == 0) return (result); } ctx = rxq->ifr_ctx; sc = ctx->ifc_softc; intr_enable = false; intr_legacy = !!(ctx->ifc_flags & IFC_LEGACY); MPASS(rxq->ifr_ntxqirq); for (i = 0; i < rxq->ifr_ntxqirq; i++) { txqid = rxq->ifr_txqid[i]; txq = &ctx->ifc_txqs[txqid]; bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, BUS_DMASYNC_POSTREAD); if (!ctx->isc_txd_credits_update(sc, txqid, false)) { if (intr_legacy) intr_enable = true; else IFDI_TX_QUEUE_INTR_ENABLE(ctx, txqid); continue; } GROUPTASK_ENQUEUE(&txq->ift_task); } if (ctx->ifc_sctx->isc_flags & IFLIB_HAS_RXCQ) cidx = rxq->ifr_cq_cidx; else cidx = rxq->ifr_fl[0].ifl_cidx; if (iflib_rxd_avail(ctx, rxq, cidx, 1)) GROUPTASK_ENQUEUE(gtask); else { if (intr_legacy) intr_enable = true; else IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id); DBG_COUNTER_INC(rx_intr_enables); } if (intr_enable) IFDI_INTR_ENABLE(ctx); return (FILTER_HANDLED); } static int iflib_fast_intr_ctx(void *arg) { iflib_filter_info_t info = arg; struct grouptask *gtask = info->ifi_task; int result; if (!iflib_started) return (FILTER_STRAY); DBG_COUNTER_INC(fast_intrs); if (info->ifi_filter != NULL) { result = info->ifi_filter(info->ifi_filter_arg); if ((result & FILTER_SCHEDULE_THREAD) == 0) return (result); } GROUPTASK_ENQUEUE(gtask); return (FILTER_HANDLED); } static int _iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid, driver_filter_t filter, driver_intr_t handler, void *arg, const char *name) { struct resource *res; void *tag = NULL; device_t dev = ctx->ifc_dev; int flags, i, rc; flags = RF_ACTIVE; if (ctx->ifc_flags & IFC_LEGACY) flags |= RF_SHAREABLE; MPASS(rid < 512); i = rid; res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &i, flags); if (res == NULL) { device_printf(dev, "failed to allocate IRQ for rid %d, name %s.\n", rid, name); return (ENOMEM); } irq->ii_res = res; KASSERT(filter == NULL || handler == NULL, ("filter and handler can't both be non-NULL")); rc = bus_setup_intr(dev, res, INTR_MPSAFE | INTR_TYPE_NET, filter, handler, arg, &tag); if (rc != 0) { device_printf(dev, "failed to setup interrupt for rid %d, name %s: %d\n", rid, name ? name : "unknown", rc); return (rc); } else if (name) bus_describe_intr(dev, res, tag, "%s", name); irq->ii_tag = tag; return (0); } /********************************************************************* * * Allocate DMA resources for TX buffers as well as memory for the TX * mbuf map. TX DMA maps (non-TSO/TSO) and TX mbuf map are kept in a * iflib_sw_tx_desc_array structure, storing all the information that * is needed to transmit a packet on the wire. This is called only * once at attach, setup is done every reset. * **********************************************************************/ static int iflib_txsd_alloc(iflib_txq_t txq) { if_ctx_t ctx = txq->ift_ctx; if_shared_ctx_t sctx = ctx->ifc_sctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; device_t dev = ctx->ifc_dev; bus_size_t tsomaxsize; int err, nsegments, ntsosegments; bool tso; nsegments = scctx->isc_tx_nsegments; ntsosegments = scctx->isc_tx_tso_segments_max; tsomaxsize = scctx->isc_tx_tso_size_max; if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_VLAN_MTU) tsomaxsize += sizeof(struct ether_vlan_header); MPASS(scctx->isc_ntxd[0] > 0); MPASS(scctx->isc_ntxd[txq->ift_br_offset] > 0); MPASS(nsegments > 0); if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) { MPASS(ntsosegments > 0); MPASS(sctx->isc_tso_maxsize >= tsomaxsize); } /* * Set up DMA tags for TX buffers. */ if ((err = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sctx->isc_tx_maxsize, /* maxsize */ nsegments, /* nsegments */ sctx->isc_tx_maxsegsize, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &txq->ift_buf_tag))) { device_printf(dev,"Unable to allocate TX DMA tag: %d\n", err); device_printf(dev,"maxsize: %ju nsegments: %d maxsegsize: %ju\n", (uintmax_t)sctx->isc_tx_maxsize, nsegments, (uintmax_t)sctx->isc_tx_maxsegsize); goto fail; } tso = (if_getcapabilities(ctx->ifc_ifp) & IFCAP_TSO) != 0; if (tso && (err = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ tsomaxsize, /* maxsize */ ntsosegments, /* nsegments */ sctx->isc_tso_maxsegsize,/* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &txq->ift_tso_buf_tag))) { device_printf(dev, "Unable to allocate TSO TX DMA tag: %d\n", err); goto fail; } /* Allocate memory for the TX mbuf map. */ if (!(txq->ift_sds.ifsd_m = (struct mbuf **) malloc(sizeof(struct mbuf *) * scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate TX mbuf map memory\n"); err = ENOMEM; goto fail; } /* * Create the DMA maps for TX buffers. */ if ((txq->ift_sds.ifsd_map = (bus_dmamap_t *)malloc( sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) { device_printf(dev, "Unable to allocate TX buffer DMA map memory\n"); err = ENOMEM; goto fail; } if (tso && (txq->ift_sds.ifsd_tso_map = (bus_dmamap_t *)malloc( sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) { device_printf(dev, "Unable to allocate TSO TX buffer map memory\n"); err = ENOMEM; goto fail; } for (int i = 0; i < scctx->isc_ntxd[txq->ift_br_offset]; i++) { err = bus_dmamap_create(txq->ift_buf_tag, 0, &txq->ift_sds.ifsd_map[i]); if (err != 0) { device_printf(dev, "Unable to create TX DMA map\n"); goto fail; } if (!tso) continue; err = bus_dmamap_create(txq->ift_tso_buf_tag, 0, &txq->ift_sds.ifsd_tso_map[i]); if (err != 0) { device_printf(dev, "Unable to create TSO TX DMA map\n"); goto fail; } } return (0); fail: /* We free all, it handles case where we are in the middle */ iflib_tx_structures_free(ctx); return (err); } static void iflib_txsd_destroy(if_ctx_t ctx, iflib_txq_t txq, int i) { bus_dmamap_t map; if (txq->ift_sds.ifsd_map != NULL) { map = txq->ift_sds.ifsd_map[i]; bus_dmamap_sync(txq->ift_buf_tag, map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txq->ift_buf_tag, map); bus_dmamap_destroy(txq->ift_buf_tag, map); txq->ift_sds.ifsd_map[i] = NULL; } if (txq->ift_sds.ifsd_tso_map != NULL) { map = txq->ift_sds.ifsd_tso_map[i]; bus_dmamap_sync(txq->ift_tso_buf_tag, map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txq->ift_tso_buf_tag, map); bus_dmamap_destroy(txq->ift_tso_buf_tag, map); txq->ift_sds.ifsd_tso_map[i] = NULL; } } static void iflib_txq_destroy(iflib_txq_t txq) { if_ctx_t ctx = txq->ift_ctx; for (int i = 0; i < txq->ift_size; i++) iflib_txsd_destroy(ctx, txq, i); if (txq->ift_br != NULL) { ifmp_ring_free(txq->ift_br); txq->ift_br = NULL; } mtx_destroy(&txq->ift_mtx); if (txq->ift_sds.ifsd_map != NULL) { free(txq->ift_sds.ifsd_map, M_IFLIB); txq->ift_sds.ifsd_map = NULL; } if (txq->ift_sds.ifsd_tso_map != NULL) { free(txq->ift_sds.ifsd_tso_map, M_IFLIB); txq->ift_sds.ifsd_tso_map = NULL; } if (txq->ift_sds.ifsd_m != NULL) { free(txq->ift_sds.ifsd_m, M_IFLIB); txq->ift_sds.ifsd_m = NULL; } if (txq->ift_buf_tag != NULL) { bus_dma_tag_destroy(txq->ift_buf_tag); txq->ift_buf_tag = NULL; } if (txq->ift_tso_buf_tag != NULL) { bus_dma_tag_destroy(txq->ift_tso_buf_tag); txq->ift_tso_buf_tag = NULL; } if (txq->ift_ifdi != NULL) { free(txq->ift_ifdi, M_IFLIB); } } static void iflib_txsd_free(if_ctx_t ctx, iflib_txq_t txq, int i) { struct mbuf **mp; mp = &txq->ift_sds.ifsd_m[i]; if (*mp == NULL) return; if (txq->ift_sds.ifsd_map != NULL) { bus_dmamap_sync(txq->ift_buf_tag, txq->ift_sds.ifsd_map[i], BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txq->ift_buf_tag, txq->ift_sds.ifsd_map[i]); } if (txq->ift_sds.ifsd_tso_map != NULL) { bus_dmamap_sync(txq->ift_tso_buf_tag, txq->ift_sds.ifsd_tso_map[i], BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txq->ift_tso_buf_tag, txq->ift_sds.ifsd_tso_map[i]); } m_free(*mp); DBG_COUNTER_INC(tx_frees); *mp = NULL; } static int iflib_txq_setup(iflib_txq_t txq) { if_ctx_t ctx = txq->ift_ctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; if_shared_ctx_t sctx = ctx->ifc_sctx; iflib_dma_info_t di; int i; /* Set number of descriptors available */ txq->ift_qstatus = IFLIB_QUEUE_IDLE; /* XXX make configurable */ txq->ift_update_freq = IFLIB_DEFAULT_TX_UPDATE_FREQ; /* Reset indices */ txq->ift_cidx_processed = 0; txq->ift_pidx = txq->ift_cidx = txq->ift_npending = 0; txq->ift_size = scctx->isc_ntxd[txq->ift_br_offset]; for (i = 0, di = txq->ift_ifdi; i < sctx->isc_ntxqs; i++, di++) bzero((void *)di->idi_vaddr, di->idi_size); IFDI_TXQ_SETUP(ctx, txq->ift_id); for (i = 0, di = txq->ift_ifdi; i < sctx->isc_ntxqs; i++, di++) bus_dmamap_sync(di->idi_tag, di->idi_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } /********************************************************************* * * Allocate DMA resources for RX buffers as well as memory for the RX * mbuf map, direct RX cluster pointer map and RX cluster bus address * map. RX DMA map, RX mbuf map, direct RX cluster pointer map and * RX cluster map are kept in a iflib_sw_rx_desc_array structure. * Since we use use one entry in iflib_sw_rx_desc_array per received * packet, the maximum number of entries we'll need is equal to the * number of hardware receive descriptors that we've allocated. * **********************************************************************/ static int iflib_rxsd_alloc(iflib_rxq_t rxq) { if_ctx_t ctx = rxq->ifr_ctx; if_shared_ctx_t sctx = ctx->ifc_sctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; device_t dev = ctx->ifc_dev; iflib_fl_t fl; int err; MPASS(scctx->isc_nrxd[0] > 0); MPASS(scctx->isc_nrxd[rxq->ifr_fl_offset] > 0); fl = rxq->ifr_fl; for (int i = 0; i < rxq->ifr_nfl; i++, fl++) { fl->ifl_size = scctx->isc_nrxd[rxq->ifr_fl_offset]; /* this isn't necessarily the same */ /* Set up DMA tag for RX buffers. */ err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sctx->isc_rx_maxsize, /* maxsize */ sctx->isc_rx_nsegments, /* nsegments */ sctx->isc_rx_maxsegsize, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &fl->ifl_buf_tag); if (err) { device_printf(dev, "Unable to allocate RX DMA tag: %d\n", err); goto fail; } /* Allocate memory for the RX mbuf map. */ if (!(fl->ifl_sds.ifsd_m = (struct mbuf **) malloc(sizeof(struct mbuf *) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate RX mbuf map memory\n"); err = ENOMEM; goto fail; } /* Allocate memory for the direct RX cluster pointer map. */ if (!(fl->ifl_sds.ifsd_cl = (caddr_t *) malloc(sizeof(caddr_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate RX cluster map memory\n"); err = ENOMEM; goto fail; } /* Allocate memory for the RX cluster bus address map. */ if (!(fl->ifl_sds.ifsd_ba = (bus_addr_t *) malloc(sizeof(bus_addr_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate RX bus address map memory\n"); err = ENOMEM; goto fail; } /* * Create the DMA maps for RX buffers. */ if (!(fl->ifl_sds.ifsd_map = (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate RX buffer DMA map memory\n"); err = ENOMEM; goto fail; } for (int i = 0; i < scctx->isc_nrxd[rxq->ifr_fl_offset]; i++) { err = bus_dmamap_create(fl->ifl_buf_tag, 0, &fl->ifl_sds.ifsd_map[i]); if (err != 0) { device_printf(dev, "Unable to create RX buffer DMA map\n"); goto fail; } } } return (0); fail: iflib_rx_structures_free(ctx); return (err); } /* * Internal service routines */ struct rxq_refill_cb_arg { int error; bus_dma_segment_t seg; int nseg; }; static void _rxq_refill_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct rxq_refill_cb_arg *cb_arg = arg; cb_arg->error = error; cb_arg->seg = segs[0]; cb_arg->nseg = nseg; } /** * _iflib_fl_refill - refill an rxq free-buffer list * @ctx: the iflib context * @fl: the free list to refill * @count: the number of new buffers to allocate * * (Re)populate an rxq free-buffer list with up to @count new packet buffers. * The caller must assure that @count does not exceed the queue's capacity. */ static void _iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count) { struct if_rxd_update iru; struct rxq_refill_cb_arg cb_arg; struct mbuf *m; caddr_t cl, *sd_cl; struct mbuf **sd_m; bus_dmamap_t *sd_map; bus_addr_t bus_addr, *sd_ba; int err, frag_idx, i, idx, n, pidx; qidx_t credits; sd_m = fl->ifl_sds.ifsd_m; sd_map = fl->ifl_sds.ifsd_map; sd_cl = fl->ifl_sds.ifsd_cl; sd_ba = fl->ifl_sds.ifsd_ba; pidx = fl->ifl_pidx; idx = pidx; frag_idx = fl->ifl_fragidx; credits = fl->ifl_credits; i = 0; n = count; MPASS(n > 0); MPASS(credits + n <= fl->ifl_size); if (pidx < fl->ifl_cidx) MPASS(pidx + n <= fl->ifl_cidx); if (pidx == fl->ifl_cidx && (credits < fl->ifl_size)) MPASS(fl->ifl_gen == 0); if (pidx > fl->ifl_cidx) MPASS(n <= fl->ifl_size - pidx + fl->ifl_cidx); DBG_COUNTER_INC(fl_refills); if (n > 8) DBG_COUNTER_INC(fl_refills_large); iru_init(&iru, fl->ifl_rxq, fl->ifl_id); while (n--) { /* * We allocate an uninitialized mbuf + cluster, mbuf is * initialized after rx. * * If the cluster is still set then we know a minimum sized packet was received */ bit_ffc_at(fl->ifl_rx_bitmap, frag_idx, fl->ifl_size, &frag_idx); if (frag_idx < 0) bit_ffc(fl->ifl_rx_bitmap, fl->ifl_size, &frag_idx); MPASS(frag_idx >= 0); if ((cl = sd_cl[frag_idx]) == NULL) { if ((cl = m_cljget(NULL, M_NOWAIT, fl->ifl_buf_size)) == NULL) break; cb_arg.error = 0; MPASS(sd_map != NULL); err = bus_dmamap_load(fl->ifl_buf_tag, sd_map[frag_idx], cl, fl->ifl_buf_size, _rxq_refill_cb, &cb_arg, BUS_DMA_NOWAIT); if (err != 0 || cb_arg.error) { /* * !zone_pack ? */ if (fl->ifl_zone == zone_pack) uma_zfree(fl->ifl_zone, cl); break; } sd_ba[frag_idx] = bus_addr = cb_arg.seg.ds_addr; sd_cl[frag_idx] = cl; #if MEMORY_LOGGING fl->ifl_cl_enqueued++; #endif } else { bus_addr = sd_ba[frag_idx]; } bus_dmamap_sync(fl->ifl_buf_tag, sd_map[frag_idx], BUS_DMASYNC_PREREAD); if (sd_m[frag_idx] == NULL) { if ((m = m_gethdr(M_NOWAIT, MT_NOINIT)) == NULL) { break; } sd_m[frag_idx] = m; } bit_set(fl->ifl_rx_bitmap, frag_idx); #if MEMORY_LOGGING fl->ifl_m_enqueued++; #endif DBG_COUNTER_INC(rx_allocs); fl->ifl_rxd_idxs[i] = frag_idx; fl->ifl_bus_addrs[i] = bus_addr; fl->ifl_vm_addrs[i] = cl; credits++; i++; MPASS(credits <= fl->ifl_size); if (++idx == fl->ifl_size) { fl->ifl_gen = 1; idx = 0; } if (n == 0 || i == IFLIB_MAX_RX_REFRESH) { iru.iru_pidx = pidx; iru.iru_count = i; ctx->isc_rxd_refill(ctx->ifc_softc, &iru); i = 0; pidx = idx; fl->ifl_pidx = idx; fl->ifl_credits = credits; } } if (i) { iru.iru_pidx = pidx; iru.iru_count = i; ctx->isc_rxd_refill(ctx->ifc_softc, &iru); fl->ifl_pidx = idx; fl->ifl_credits = credits; } DBG_COUNTER_INC(rxd_flush); if (fl->ifl_pidx == 0) pidx = fl->ifl_size - 1; else pidx = fl->ifl_pidx - 1; bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); ctx->isc_rxd_flush(ctx->ifc_softc, fl->ifl_rxq->ifr_id, fl->ifl_id, pidx); fl->ifl_fragidx = frag_idx; } static __inline void __iflib_fl_refill_lt(if_ctx_t ctx, iflib_fl_t fl, int max) { /* we avoid allowing pidx to catch up with cidx as it confuses ixl */ int32_t reclaimable = fl->ifl_size - fl->ifl_credits - 1; #ifdef INVARIANTS int32_t delta = fl->ifl_size - get_inuse(fl->ifl_size, fl->ifl_cidx, fl->ifl_pidx, fl->ifl_gen) - 1; #endif MPASS(fl->ifl_credits <= fl->ifl_size); MPASS(reclaimable == delta); if (reclaimable > 0) _iflib_fl_refill(ctx, fl, min(max, reclaimable)); } uint8_t iflib_in_detach(if_ctx_t ctx) { bool in_detach; STATE_LOCK(ctx); in_detach = !!(ctx->ifc_flags & IFC_IN_DETACH); STATE_UNLOCK(ctx); return (in_detach); } static void iflib_fl_bufs_free(iflib_fl_t fl) { iflib_dma_info_t idi = fl->ifl_ifdi; bus_dmamap_t sd_map; uint32_t i; for (i = 0; i < fl->ifl_size; i++) { struct mbuf **sd_m = &fl->ifl_sds.ifsd_m[i]; caddr_t *sd_cl = &fl->ifl_sds.ifsd_cl[i]; if (*sd_cl != NULL) { sd_map = fl->ifl_sds.ifsd_map[i]; bus_dmamap_sync(fl->ifl_buf_tag, sd_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(fl->ifl_buf_tag, sd_map); if (*sd_cl != NULL) uma_zfree(fl->ifl_zone, *sd_cl); if (*sd_m != NULL) { m_init(*sd_m, M_NOWAIT, MT_DATA, 0); uma_zfree(zone_mbuf, *sd_m); } } else { MPASS(*sd_cl == NULL); MPASS(*sd_m == NULL); } #if MEMORY_LOGGING fl->ifl_m_dequeued++; fl->ifl_cl_dequeued++; #endif *sd_cl = NULL; *sd_m = NULL; } #ifdef INVARIANTS for (i = 0; i < fl->ifl_size; i++) { MPASS(fl->ifl_sds.ifsd_cl[i] == NULL); MPASS(fl->ifl_sds.ifsd_m[i] == NULL); } #endif /* * Reset free list values */ fl->ifl_credits = fl->ifl_cidx = fl->ifl_pidx = fl->ifl_gen = fl->ifl_fragidx = 0; bzero(idi->idi_vaddr, idi->idi_size); } /********************************************************************* * * Initialize a free list and its buffers. * **********************************************************************/ static int iflib_fl_setup(iflib_fl_t fl) { iflib_rxq_t rxq = fl->ifl_rxq; if_ctx_t ctx = rxq->ifr_ctx; bit_nclear(fl->ifl_rx_bitmap, 0, fl->ifl_size - 1); /* ** Free current RX buffer structs and their mbufs */ iflib_fl_bufs_free(fl); /* Now replenish the mbufs */ MPASS(fl->ifl_credits == 0); fl->ifl_buf_size = ctx->ifc_rx_mbuf_sz; if (fl->ifl_buf_size > ctx->ifc_max_fl_buf_size) ctx->ifc_max_fl_buf_size = fl->ifl_buf_size; fl->ifl_cltype = m_gettype(fl->ifl_buf_size); fl->ifl_zone = m_getzone(fl->ifl_buf_size); /* avoid pre-allocating zillions of clusters to an idle card * potentially speeding up attach */ _iflib_fl_refill(ctx, fl, min(128, fl->ifl_size)); MPASS(min(128, fl->ifl_size) == fl->ifl_credits); if (min(128, fl->ifl_size) != fl->ifl_credits) return (ENOBUFS); /* * handle failure */ MPASS(rxq != NULL); MPASS(fl->ifl_ifdi != NULL); bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } /********************************************************************* * * Free receive ring data structures * **********************************************************************/ static void iflib_rx_sds_free(iflib_rxq_t rxq) { iflib_fl_t fl; int i, j; if (rxq->ifr_fl != NULL) { for (i = 0; i < rxq->ifr_nfl; i++) { fl = &rxq->ifr_fl[i]; if (fl->ifl_buf_tag != NULL) { if (fl->ifl_sds.ifsd_map != NULL) { for (j = 0; j < fl->ifl_size; j++) { bus_dmamap_sync( fl->ifl_buf_tag, fl->ifl_sds.ifsd_map[j], BUS_DMASYNC_POSTREAD); bus_dmamap_unload( fl->ifl_buf_tag, fl->ifl_sds.ifsd_map[j]); bus_dmamap_destroy( fl->ifl_buf_tag, fl->ifl_sds.ifsd_map[j]); } } bus_dma_tag_destroy(fl->ifl_buf_tag); fl->ifl_buf_tag = NULL; } free(fl->ifl_sds.ifsd_m, M_IFLIB); free(fl->ifl_sds.ifsd_cl, M_IFLIB); free(fl->ifl_sds.ifsd_ba, M_IFLIB); free(fl->ifl_sds.ifsd_map, M_IFLIB); fl->ifl_sds.ifsd_m = NULL; fl->ifl_sds.ifsd_cl = NULL; fl->ifl_sds.ifsd_ba = NULL; fl->ifl_sds.ifsd_map = NULL; } free(rxq->ifr_fl, M_IFLIB); rxq->ifr_fl = NULL; free(rxq->ifr_ifdi, M_IFLIB); rxq->ifr_ifdi = NULL; rxq->ifr_cq_cidx = 0; } } /* * Timer routine */ static void iflib_timer(void *arg) { iflib_txq_t txq = arg; if_ctx_t ctx = txq->ift_ctx; if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; uint64_t this_tick = ticks; uint32_t reset_on = hz / 2; if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) return; /* ** Check on the state of the TX queue(s), this ** can be done without the lock because its RO ** and the HUNG state will be static if set. */ if (this_tick - txq->ift_last_timer_tick >= hz / 2) { txq->ift_last_timer_tick = this_tick; IFDI_TIMER(ctx, txq->ift_id); if ((txq->ift_qstatus == IFLIB_QUEUE_HUNG) && ((txq->ift_cleaned_prev == txq->ift_cleaned) || (sctx->isc_pause_frames == 0))) goto hung; if (txq->ift_qstatus != IFLIB_QUEUE_IDLE && ifmp_ring_is_stalled(txq->ift_br)) { KASSERT(ctx->ifc_link_state == LINK_STATE_UP, ("queue can't be marked as hung if interface is down")); txq->ift_qstatus = IFLIB_QUEUE_HUNG; } txq->ift_cleaned_prev = txq->ift_cleaned; } #ifdef DEV_NETMAP if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP) iflib_netmap_timer_adjust(ctx, txq, &reset_on); #endif /* handle any laggards */ if (txq->ift_db_pending) GROUPTASK_ENQUEUE(&txq->ift_task); sctx->isc_pause_frames = 0; if (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING) callout_reset_on(&txq->ift_timer, reset_on, iflib_timer, txq, txq->ift_timer.c_cpu); return; hung: device_printf(ctx->ifc_dev, "Watchdog timeout (TX: %d desc avail: %d pidx: %d) -- resetting\n", txq->ift_id, TXQ_AVAIL(txq), txq->ift_pidx); STATE_LOCK(ctx); if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); ctx->ifc_flags |= (IFC_DO_WATCHDOG|IFC_DO_RESET); iflib_admin_intr_deferred(ctx); STATE_UNLOCK(ctx); } static void iflib_calc_rx_mbuf_sz(if_ctx_t ctx) { if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; /* * XXX don't set the max_frame_size to larger * than the hardware can handle */ if (sctx->isc_max_frame_size <= MCLBYTES) ctx->ifc_rx_mbuf_sz = MCLBYTES; else ctx->ifc_rx_mbuf_sz = MJUMPAGESIZE; } uint32_t iflib_get_rx_mbuf_sz(if_ctx_t ctx) { return (ctx->ifc_rx_mbuf_sz); } static void iflib_init_locked(if_ctx_t ctx) { if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; if_t ifp = ctx->ifc_ifp; iflib_fl_t fl; iflib_txq_t txq; iflib_rxq_t rxq; int i, j, tx_ip_csum_flags, tx_ip6_csum_flags; if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); IFDI_INTR_DISABLE(ctx); tx_ip_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP); tx_ip6_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_SCTP); /* Set hardware offload abilities */ if_clearhwassist(ifp); if (if_getcapenable(ifp) & IFCAP_TXCSUM) if_sethwassistbits(ifp, tx_ip_csum_flags, 0); if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6) if_sethwassistbits(ifp, tx_ip6_csum_flags, 0); if (if_getcapenable(ifp) & IFCAP_TSO4) if_sethwassistbits(ifp, CSUM_IP_TSO, 0); if (if_getcapenable(ifp) & IFCAP_TSO6) if_sethwassistbits(ifp, CSUM_IP6_TSO, 0); for (i = 0, txq = ctx->ifc_txqs; i < sctx->isc_ntxqsets; i++, txq++) { CALLOUT_LOCK(txq); callout_stop(&txq->ift_timer); CALLOUT_UNLOCK(txq); iflib_netmap_txq_init(ctx, txq); } /* * Calculate a suitable Rx mbuf size prior to calling IFDI_INIT, so * that drivers can use the value when setting up the hardware receive * buffers. */ iflib_calc_rx_mbuf_sz(ctx); #ifdef INVARIANTS i = if_getdrvflags(ifp); #endif IFDI_INIT(ctx); MPASS(if_getdrvflags(ifp) == i); for (i = 0, rxq = ctx->ifc_rxqs; i < sctx->isc_nrxqsets; i++, rxq++) { /* XXX this should really be done on a per-queue basis */ if (if_getcapenable(ifp) & IFCAP_NETMAP) { MPASS(rxq->ifr_id == i); iflib_netmap_rxq_init(ctx, rxq); continue; } for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) { if (iflib_fl_setup(fl)) { device_printf(ctx->ifc_dev, "setting up free list %d failed - " "check cluster settings\n", j); goto done; } } } done: if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); IFDI_INTR_ENABLE(ctx); txq = ctx->ifc_txqs; for (i = 0; i < sctx->isc_ntxqsets; i++, txq++) callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq, txq->ift_timer.c_cpu); } static int iflib_media_change(if_t ifp) { if_ctx_t ctx = if_getsoftc(ifp); int err; CTX_LOCK(ctx); if ((err = IFDI_MEDIA_CHANGE(ctx)) == 0) iflib_init_locked(ctx); CTX_UNLOCK(ctx); return (err); } static void iflib_media_status(if_t ifp, struct ifmediareq *ifmr) { if_ctx_t ctx = if_getsoftc(ifp); CTX_LOCK(ctx); IFDI_UPDATE_ADMIN_STATUS(ctx); IFDI_MEDIA_STATUS(ctx, ifmr); CTX_UNLOCK(ctx); } void iflib_stop(if_ctx_t ctx) { iflib_txq_t txq = ctx->ifc_txqs; iflib_rxq_t rxq = ctx->ifc_rxqs; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; if_shared_ctx_t sctx = ctx->ifc_sctx; iflib_dma_info_t di; iflib_fl_t fl; int i, j; /* Tell the stack that the interface is no longer active */ if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); IFDI_INTR_DISABLE(ctx); DELAY(1000); IFDI_STOP(ctx); DELAY(1000); iflib_debug_reset(); /* Wait for current tx queue users to exit to disarm watchdog timer. */ for (i = 0; i < scctx->isc_ntxqsets; i++, txq++) { /* make sure all transmitters have completed before proceeding XXX */ CALLOUT_LOCK(txq); callout_stop(&txq->ift_timer); CALLOUT_UNLOCK(txq); /* clean any enqueued buffers */ iflib_ifmp_purge(txq); /* Free any existing tx buffers. */ for (j = 0; j < txq->ift_size; j++) { iflib_txsd_free(ctx, txq, j); } txq->ift_processed = txq->ift_cleaned = txq->ift_cidx_processed = 0; txq->ift_in_use = txq->ift_gen = txq->ift_cidx = txq->ift_pidx = txq->ift_no_desc_avail = 0; txq->ift_closed = txq->ift_mbuf_defrag = txq->ift_mbuf_defrag_failed = 0; txq->ift_no_tx_dma_setup = txq->ift_txd_encap_efbig = txq->ift_map_failed = 0; txq->ift_pullups = 0; ifmp_ring_reset_stats(txq->ift_br); for (j = 0, di = txq->ift_ifdi; j < sctx->isc_ntxqs; j++, di++) bzero((void *)di->idi_vaddr, di->idi_size); } for (i = 0; i < scctx->isc_nrxqsets; i++, rxq++) { /* make sure all transmitters have completed before proceeding XXX */ rxq->ifr_cq_cidx = 0; for (j = 0, di = rxq->ifr_ifdi; j < sctx->isc_nrxqs; j++, di++) bzero((void *)di->idi_vaddr, di->idi_size); /* also resets the free lists pidx/cidx */ for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) iflib_fl_bufs_free(fl); } } static inline caddr_t calc_next_rxd(iflib_fl_t fl, int cidx) { qidx_t size; int nrxd; caddr_t start, end, cur, next; nrxd = fl->ifl_size; size = fl->ifl_rxd_size; start = fl->ifl_ifdi->idi_vaddr; if (__predict_false(size == 0)) return (start); cur = start + size*cidx; end = start + size*nrxd; next = CACHE_PTR_NEXT(cur); return (next < end ? next : start); } static inline void prefetch_pkts(iflib_fl_t fl, int cidx) { int nextptr; int nrxd = fl->ifl_size; caddr_t next_rxd; nextptr = (cidx + CACHE_PTR_INCREMENT) & (nrxd-1); prefetch(&fl->ifl_sds.ifsd_m[nextptr]); prefetch(&fl->ifl_sds.ifsd_cl[nextptr]); next_rxd = calc_next_rxd(fl, cidx); prefetch(next_rxd); prefetch(fl->ifl_sds.ifsd_m[(cidx + 1) & (nrxd-1)]); prefetch(fl->ifl_sds.ifsd_m[(cidx + 2) & (nrxd-1)]); prefetch(fl->ifl_sds.ifsd_m[(cidx + 3) & (nrxd-1)]); prefetch(fl->ifl_sds.ifsd_m[(cidx + 4) & (nrxd-1)]); prefetch(fl->ifl_sds.ifsd_cl[(cidx + 1) & (nrxd-1)]); prefetch(fl->ifl_sds.ifsd_cl[(cidx + 2) & (nrxd-1)]); prefetch(fl->ifl_sds.ifsd_cl[(cidx + 3) & (nrxd-1)]); prefetch(fl->ifl_sds.ifsd_cl[(cidx + 4) & (nrxd-1)]); } static struct mbuf * rxd_frag_to_sd(iflib_rxq_t rxq, if_rxd_frag_t irf, bool unload, if_rxsd_t sd, int *pf_rv, if_rxd_info_t ri) { bus_dmamap_t map; iflib_fl_t fl; caddr_t payload; struct mbuf *m; int flid, cidx, len, next; map = NULL; flid = irf->irf_flid; cidx = irf->irf_idx; fl = &rxq->ifr_fl[flid]; sd->ifsd_fl = fl; sd->ifsd_cidx = cidx; m = fl->ifl_sds.ifsd_m[cidx]; sd->ifsd_cl = &fl->ifl_sds.ifsd_cl[cidx]; fl->ifl_credits--; #if MEMORY_LOGGING fl->ifl_m_dequeued++; #endif if (rxq->ifr_ctx->ifc_flags & IFC_PREFETCH) prefetch_pkts(fl, cidx); next = (cidx + CACHE_PTR_INCREMENT) & (fl->ifl_size-1); prefetch(&fl->ifl_sds.ifsd_map[next]); map = fl->ifl_sds.ifsd_map[cidx]; next = (cidx + CACHE_LINE_SIZE) & (fl->ifl_size-1); /* not valid assert if bxe really does SGE from non-contiguous elements */ MPASS(fl->ifl_cidx == cidx); bus_dmamap_sync(fl->ifl_buf_tag, map, BUS_DMASYNC_POSTREAD); if (rxq->pfil != NULL && PFIL_HOOKED_IN(rxq->pfil) && pf_rv != NULL) { payload = *sd->ifsd_cl; payload += ri->iri_pad; len = ri->iri_len - ri->iri_pad; *pf_rv = pfil_run_hooks(rxq->pfil, payload, ri->iri_ifp, len | PFIL_MEMPTR | PFIL_IN, NULL); switch (*pf_rv) { case PFIL_DROPPED: case PFIL_CONSUMED: /* * The filter ate it. Everything is recycled. */ m = NULL; unload = 0; break; case PFIL_REALLOCED: /* * The filter copied it. Everything is recycled. */ m = pfil_mem2mbuf(payload); unload = 0; break; case PFIL_PASS: /* * Filter said it was OK, so receive like * normal */ fl->ifl_sds.ifsd_m[cidx] = NULL; break; default: MPASS(0); } } else { fl->ifl_sds.ifsd_m[cidx] = NULL; *pf_rv = PFIL_PASS; } if (unload) bus_dmamap_unload(fl->ifl_buf_tag, map); fl->ifl_cidx = (fl->ifl_cidx + 1) & (fl->ifl_size-1); if (__predict_false(fl->ifl_cidx == 0)) fl->ifl_gen = 0; bit_clear(fl->ifl_rx_bitmap, cidx); return (m); } static struct mbuf * assemble_segments(iflib_rxq_t rxq, if_rxd_info_t ri, if_rxsd_t sd, int *pf_rv) { struct mbuf *m, *mh, *mt; caddr_t cl; int *pf_rv_ptr, flags, i, padlen; bool consumed; i = 0; mh = NULL; consumed = false; *pf_rv = PFIL_PASS; pf_rv_ptr = pf_rv; do { m = rxd_frag_to_sd(rxq, &ri->iri_frags[i], !consumed, sd, pf_rv_ptr, ri); MPASS(*sd->ifsd_cl != NULL); /* * Exclude zero-length frags & frags from * packets the filter has consumed or dropped */ if (ri->iri_frags[i].irf_len == 0 || consumed || *pf_rv == PFIL_CONSUMED || *pf_rv == PFIL_DROPPED) { if (mh == NULL) { /* everything saved here */ consumed = true; pf_rv_ptr = NULL; continue; } /* XXX we can save the cluster here, but not the mbuf */ m_init(m, M_NOWAIT, MT_DATA, 0); m_free(m); continue; } if (mh == NULL) { flags = M_PKTHDR|M_EXT; mh = mt = m; padlen = ri->iri_pad; } else { flags = M_EXT; mt->m_next = m; mt = m; /* assuming padding is only on the first fragment */ padlen = 0; } cl = *sd->ifsd_cl; *sd->ifsd_cl = NULL; /* Can these two be made one ? */ m_init(m, M_NOWAIT, MT_DATA, flags); m_cljset(m, cl, sd->ifsd_fl->ifl_cltype); /* * These must follow m_init and m_cljset */ m->m_data += padlen; ri->iri_len -= padlen; m->m_len = ri->iri_frags[i].irf_len; } while (++i < ri->iri_nfrags); return (mh); } /* * Process one software descriptor */ static struct mbuf * iflib_rxd_pkt_get(iflib_rxq_t rxq, if_rxd_info_t ri) { struct if_rxsd sd; struct mbuf *m; int pf_rv; /* should I merge this back in now that the two paths are basically duplicated? */ if (ri->iri_nfrags == 1 && ri->iri_frags[0].irf_len <= MIN(IFLIB_RX_COPY_THRESH, MHLEN)) { m = rxd_frag_to_sd(rxq, &ri->iri_frags[0], false, &sd, &pf_rv, ri); if (pf_rv != PFIL_PASS && pf_rv != PFIL_REALLOCED) return (m); if (pf_rv == PFIL_PASS) { m_init(m, M_NOWAIT, MT_DATA, M_PKTHDR); #ifndef __NO_STRICT_ALIGNMENT if (!IP_ALIGNED(m)) m->m_data += 2; #endif memcpy(m->m_data, *sd.ifsd_cl, ri->iri_len); m->m_len = ri->iri_frags[0].irf_len; } } else { m = assemble_segments(rxq, ri, &sd, &pf_rv); if (pf_rv != PFIL_PASS && pf_rv != PFIL_REALLOCED) return (m); } m->m_pkthdr.len = ri->iri_len; m->m_pkthdr.rcvif = ri->iri_ifp; m->m_flags |= ri->iri_flags; m->m_pkthdr.ether_vtag = ri->iri_vtag; m->m_pkthdr.flowid = ri->iri_flowid; M_HASHTYPE_SET(m, ri->iri_rsstype); m->m_pkthdr.csum_flags = ri->iri_csum_flags; m->m_pkthdr.csum_data = ri->iri_csum_data; return (m); } #if defined(INET6) || defined(INET) static void iflib_get_ip_forwarding(struct lro_ctrl *lc, bool *v4, bool *v6) { CURVNET_SET(lc->ifp->if_vnet); #if defined(INET6) *v6 = V_ip6_forwarding; #endif #if defined(INET) *v4 = V_ipforwarding; #endif CURVNET_RESTORE(); } /* * Returns true if it's possible this packet could be LROed. * if it returns false, it is guaranteed that tcp_lro_rx() * would not return zero. */ static bool iflib_check_lro_possible(struct mbuf *m, bool v4_forwarding, bool v6_forwarding) { struct ether_header *eh; eh = mtod(m, struct ether_header *); switch (eh->ether_type) { #if defined(INET6) case htons(ETHERTYPE_IPV6): return (!v6_forwarding); #endif #if defined (INET) case htons(ETHERTYPE_IP): return (!v4_forwarding); #endif } return false; } #else static void iflib_get_ip_forwarding(struct lro_ctrl *lc __unused, bool *v4 __unused, bool *v6 __unused) { } #endif static bool iflib_rxeof(iflib_rxq_t rxq, qidx_t budget) { if_t ifp; if_ctx_t ctx = rxq->ifr_ctx; if_shared_ctx_t sctx = ctx->ifc_sctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; int avail, i; qidx_t *cidxp; struct if_rxd_info ri; int err, budget_left, rx_bytes, rx_pkts; iflib_fl_t fl; int lro_enabled; bool v4_forwarding, v6_forwarding, lro_possible; /* * XXX early demux data packets so that if_input processing only handles * acks in interrupt context */ struct mbuf *m, *mh, *mt, *mf; NET_EPOCH_ASSERT(); lro_possible = v4_forwarding = v6_forwarding = false; ifp = ctx->ifc_ifp; mh = mt = NULL; MPASS(budget > 0); rx_pkts = rx_bytes = 0; if (sctx->isc_flags & IFLIB_HAS_RXCQ) cidxp = &rxq->ifr_cq_cidx; else cidxp = &rxq->ifr_fl[0].ifl_cidx; if ((avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget)) == 0) { for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++) __iflib_fl_refill_lt(ctx, fl, budget + 8); DBG_COUNTER_INC(rx_unavail); return (false); } /* pfil needs the vnet to be set */ CURVNET_SET_QUIET(ifp->if_vnet); for (budget_left = budget; budget_left > 0 && avail > 0;) { if (__predict_false(!CTX_ACTIVE(ctx))) { DBG_COUNTER_INC(rx_ctx_inactive); break; } /* * Reset client set fields to their default values */ rxd_info_zero(&ri); ri.iri_qsidx = rxq->ifr_id; ri.iri_cidx = *cidxp; ri.iri_ifp = ifp; ri.iri_frags = rxq->ifr_frags; err = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri); if (err) goto err; rx_pkts += 1; rx_bytes += ri.iri_len; if (sctx->isc_flags & IFLIB_HAS_RXCQ) { *cidxp = ri.iri_cidx; /* Update our consumer index */ /* XXX NB: shurd - check if this is still safe */ while (rxq->ifr_cq_cidx >= scctx->isc_nrxd[0]) rxq->ifr_cq_cidx -= scctx->isc_nrxd[0]; /* was this only a completion queue message? */ if (__predict_false(ri.iri_nfrags == 0)) continue; } MPASS(ri.iri_nfrags != 0); MPASS(ri.iri_len != 0); /* will advance the cidx on the corresponding free lists */ m = iflib_rxd_pkt_get(rxq, &ri); avail--; budget_left--; if (avail == 0 && budget_left) avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget_left); if (__predict_false(m == NULL)) continue; /* imm_pkt: -- cxgb */ if (mh == NULL) mh = mt = m; else { mt->m_nextpkt = m; mt = m; } } CURVNET_RESTORE(); /* make sure that we can refill faster than drain */ for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++) __iflib_fl_refill_lt(ctx, fl, budget + 8); lro_enabled = (if_getcapenable(ifp) & IFCAP_LRO); if (lro_enabled) iflib_get_ip_forwarding(&rxq->ifr_lc, &v4_forwarding, &v6_forwarding); mt = mf = NULL; while (mh != NULL) { m = mh; mh = mh->m_nextpkt; m->m_nextpkt = NULL; #ifndef __NO_STRICT_ALIGNMENT if (!IP_ALIGNED(m) && (m = iflib_fixup_rx(m)) == NULL) continue; #endif rx_bytes += m->m_pkthdr.len; rx_pkts++; #if defined(INET6) || defined(INET) if (lro_enabled) { if (!lro_possible) { lro_possible = iflib_check_lro_possible(m, v4_forwarding, v6_forwarding); if (lro_possible && mf != NULL) { ifp->if_input(ifp, mf); DBG_COUNTER_INC(rx_if_input); mt = mf = NULL; } } if ((m->m_pkthdr.csum_flags & (CSUM_L4_CALC|CSUM_L4_VALID)) == (CSUM_L4_CALC|CSUM_L4_VALID)) { if (lro_possible && tcp_lro_rx(&rxq->ifr_lc, m, 0) == 0) continue; } } #endif if (lro_possible) { ifp->if_input(ifp, m); DBG_COUNTER_INC(rx_if_input); continue; } if (mf == NULL) mf = m; if (mt != NULL) mt->m_nextpkt = m; mt = m; } if (mf != NULL) { ifp->if_input(ifp, mf); DBG_COUNTER_INC(rx_if_input); } if_inc_counter(ifp, IFCOUNTER_IBYTES, rx_bytes); if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_pkts); /* * Flush any outstanding LRO work */ #if defined(INET6) || defined(INET) tcp_lro_flush_all(&rxq->ifr_lc); #endif if (avail) return true; return (iflib_rxd_avail(ctx, rxq, *cidxp, 1)); err: STATE_LOCK(ctx); ctx->ifc_flags |= IFC_DO_RESET; iflib_admin_intr_deferred(ctx); STATE_UNLOCK(ctx); return (false); } #define TXD_NOTIFY_COUNT(txq) (((txq)->ift_size / (txq)->ift_update_freq)-1) static inline qidx_t txq_max_db_deferred(iflib_txq_t txq, qidx_t in_use) { qidx_t notify_count = TXD_NOTIFY_COUNT(txq); qidx_t minthresh = txq->ift_size / 8; if (in_use > 4*minthresh) return (notify_count); if (in_use > 2*minthresh) return (notify_count >> 1); if (in_use > minthresh) return (notify_count >> 3); return (0); } static inline qidx_t txq_max_rs_deferred(iflib_txq_t txq) { qidx_t notify_count = TXD_NOTIFY_COUNT(txq); qidx_t minthresh = txq->ift_size / 8; if (txq->ift_in_use > 4*minthresh) return (notify_count); if (txq->ift_in_use > 2*minthresh) return (notify_count >> 1); if (txq->ift_in_use > minthresh) return (notify_count >> 2); return (2); } #define M_CSUM_FLAGS(m) ((m)->m_pkthdr.csum_flags) #define M_HAS_VLANTAG(m) (m->m_flags & M_VLANTAG) #define TXQ_MAX_DB_DEFERRED(txq, in_use) txq_max_db_deferred((txq), (in_use)) #define TXQ_MAX_RS_DEFERRED(txq) txq_max_rs_deferred(txq) #define TXQ_MAX_DB_CONSUMED(size) (size >> 4) /* forward compatibility for cxgb */ #define FIRST_QSET(ctx) 0 #define NTXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_ntxqsets) #define NRXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_nrxqsets) #define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NTXQSETS(ctx)) + FIRST_QSET(ctx)) #define DESC_RECLAIMABLE(q) ((int)((q)->ift_processed - (q)->ift_cleaned - (q)->ift_ctx->ifc_softc_ctx.isc_tx_nsegments)) /* XXX we should be setting this to something other than zero */ #define RECLAIM_THRESH(ctx) ((ctx)->ifc_sctx->isc_tx_reclaim_thresh) #define MAX_TX_DESC(ctx) max((ctx)->ifc_softc_ctx.isc_tx_tso_segments_max, \ (ctx)->ifc_softc_ctx.isc_tx_nsegments) static inline bool iflib_txd_db_check(if_ctx_t ctx, iflib_txq_t txq, int ring, qidx_t in_use) { qidx_t dbval, max; bool rang; rang = false; max = TXQ_MAX_DB_DEFERRED(txq, in_use); if (ring || txq->ift_db_pending >= max) { dbval = txq->ift_npending ? txq->ift_npending : txq->ift_pidx; bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, dbval); txq->ift_db_pending = txq->ift_npending = 0; rang = true; } return (rang); } #ifdef PKT_DEBUG static void print_pkt(if_pkt_info_t pi) { printf("pi len: %d qsidx: %d nsegs: %d ndescs: %d flags: %x pidx: %d\n", pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx); printf("pi new_pidx: %d csum_flags: %lx tso_segsz: %d mflags: %x vtag: %d\n", pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_tso_segsz, pi->ipi_mflags, pi->ipi_vtag); printf("pi etype: %d ehdrlen: %d ip_hlen: %d ipproto: %d\n", pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto); } #endif #define IS_TSO4(pi) ((pi)->ipi_csum_flags & CSUM_IP_TSO) #define IS_TX_OFFLOAD4(pi) ((pi)->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP_TSO)) #define IS_TSO6(pi) ((pi)->ipi_csum_flags & CSUM_IP6_TSO) #define IS_TX_OFFLOAD6(pi) ((pi)->ipi_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_TSO)) static int iflib_parse_header(iflib_txq_t txq, if_pkt_info_t pi, struct mbuf **mp) { if_shared_ctx_t sctx = txq->ift_ctx->ifc_sctx; struct ether_vlan_header *eh; struct mbuf *m; m = *mp; if ((sctx->isc_flags & IFLIB_NEED_SCRATCH) && M_WRITABLE(m) == 0) { if ((m = m_dup(m, M_NOWAIT)) == NULL) { return (ENOMEM); } else { m_freem(*mp); DBG_COUNTER_INC(tx_frees); *mp = m; } } /* * Determine where frame payload starts. * Jump over vlan headers if already present, * helpful for QinQ too. */ if (__predict_false(m->m_len < sizeof(*eh))) { txq->ift_pullups++; if (__predict_false((m = m_pullup(m, sizeof(*eh))) == NULL)) return (ENOMEM); } eh = mtod(m, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { pi->ipi_etype = ntohs(eh->evl_proto); pi->ipi_ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; } else { pi->ipi_etype = ntohs(eh->evl_encap_proto); pi->ipi_ehdrlen = ETHER_HDR_LEN; } switch (pi->ipi_etype) { #ifdef INET case ETHERTYPE_IP: { struct mbuf *n; struct ip *ip = NULL; struct tcphdr *th = NULL; int minthlen; minthlen = min(m->m_pkthdr.len, pi->ipi_ehdrlen + sizeof(*ip) + sizeof(*th)); if (__predict_false(m->m_len < minthlen)) { /* * if this code bloat is causing too much of a hit * move it to a separate function and mark it noinline */ if (m->m_len == pi->ipi_ehdrlen) { n = m->m_next; MPASS(n); if (n->m_len >= sizeof(*ip)) { ip = (struct ip *)n->m_data; if (n->m_len >= (ip->ip_hl << 2) + sizeof(*th)) th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); } else { txq->ift_pullups++; if (__predict_false((m = m_pullup(m, minthlen)) == NULL)) return (ENOMEM); ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); } } else { txq->ift_pullups++; if (__predict_false((m = m_pullup(m, minthlen)) == NULL)) return (ENOMEM); ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th)) th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); } } else { ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th)) th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); } pi->ipi_ip_hlen = ip->ip_hl << 2; pi->ipi_ipproto = ip->ip_p; pi->ipi_flags |= IPI_TX_IPV4; /* TCP checksum offload may require TCP header length */ if (IS_TX_OFFLOAD4(pi)) { if (__predict_true(pi->ipi_ipproto == IPPROTO_TCP)) { if (__predict_false(th == NULL)) { txq->ift_pullups++; if (__predict_false((m = m_pullup(m, (ip->ip_hl << 2) + sizeof(*th))) == NULL)) return (ENOMEM); th = (struct tcphdr *)((caddr_t)ip + pi->ipi_ip_hlen); } pi->ipi_tcp_hflags = th->th_flags; pi->ipi_tcp_hlen = th->th_off << 2; pi->ipi_tcp_seq = th->th_seq; } if (IS_TSO4(pi)) { if (__predict_false(ip->ip_p != IPPROTO_TCP)) return (ENXIO); /* * TSO always requires hardware checksum offload. */ pi->ipi_csum_flags |= (CSUM_IP_TCP | CSUM_IP); th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htons(IPPROTO_TCP)); pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz; if (sctx->isc_flags & IFLIB_TSO_INIT_IP) { ip->ip_sum = 0; ip->ip_len = htons(pi->ipi_ip_hlen + pi->ipi_tcp_hlen + pi->ipi_tso_segsz); } } } if ((sctx->isc_flags & IFLIB_NEED_ZERO_CSUM) && (pi->ipi_csum_flags & CSUM_IP)) ip->ip_sum = 0; break; } #endif #ifdef INET6 case ETHERTYPE_IPV6: { struct ip6_hdr *ip6 = (struct ip6_hdr *)(m->m_data + pi->ipi_ehdrlen); struct tcphdr *th; pi->ipi_ip_hlen = sizeof(struct ip6_hdr); if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) { txq->ift_pullups++; if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) == NULL)) return (ENOMEM); } th = (struct tcphdr *)((caddr_t)ip6 + pi->ipi_ip_hlen); /* XXX-BZ this will go badly in case of ext hdrs. */ pi->ipi_ipproto = ip6->ip6_nxt; pi->ipi_flags |= IPI_TX_IPV6; /* TCP checksum offload may require TCP header length */ if (IS_TX_OFFLOAD6(pi)) { if (pi->ipi_ipproto == IPPROTO_TCP) { if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) { txq->ift_pullups++; if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) == NULL)) return (ENOMEM); } pi->ipi_tcp_hflags = th->th_flags; pi->ipi_tcp_hlen = th->th_off << 2; pi->ipi_tcp_seq = th->th_seq; } if (IS_TSO6(pi)) { if (__predict_false(ip6->ip6_nxt != IPPROTO_TCP)) return (ENXIO); /* * TSO always requires hardware checksum offload. */ pi->ipi_csum_flags |= CSUM_IP6_TCP; th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0); pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz; } } break; } #endif default: pi->ipi_csum_flags &= ~CSUM_OFFLOAD; pi->ipi_ip_hlen = 0; break; } *mp = m; return (0); } /* * If dodgy hardware rejects the scatter gather chain we've handed it * we'll need to remove the mbuf chain from ifsg_m[] before we can add the * m_defrag'd mbufs */ static __noinline struct mbuf * iflib_remove_mbuf(iflib_txq_t txq) { int ntxd, pidx; struct mbuf *m, **ifsd_m; ifsd_m = txq->ift_sds.ifsd_m; ntxd = txq->ift_size; pidx = txq->ift_pidx & (ntxd - 1); ifsd_m = txq->ift_sds.ifsd_m; m = ifsd_m[pidx]; ifsd_m[pidx] = NULL; bus_dmamap_unload(txq->ift_buf_tag, txq->ift_sds.ifsd_map[pidx]); if (txq->ift_sds.ifsd_tso_map != NULL) bus_dmamap_unload(txq->ift_tso_buf_tag, txq->ift_sds.ifsd_tso_map[pidx]); #if MEMORY_LOGGING txq->ift_dequeued++; #endif return (m); } static inline caddr_t calc_next_txd(iflib_txq_t txq, int cidx, uint8_t qid) { qidx_t size; int ntxd; caddr_t start, end, cur, next; ntxd = txq->ift_size; size = txq->ift_txd_size[qid]; start = txq->ift_ifdi[qid].idi_vaddr; if (__predict_false(size == 0)) return (start); cur = start + size*cidx; end = start + size*ntxd; next = CACHE_PTR_NEXT(cur); return (next < end ? next : start); } /* * Pad an mbuf to ensure a minimum ethernet frame size. * min_frame_size is the frame size (less CRC) to pad the mbuf to */ static __noinline int iflib_ether_pad(device_t dev, struct mbuf **m_head, uint16_t min_frame_size) { /* * 18 is enough bytes to pad an ARP packet to 46 bytes, and * and ARP message is the smallest common payload I can think of */ static char pad[18]; /* just zeros */ int n; struct mbuf *new_head; if (!M_WRITABLE(*m_head)) { new_head = m_dup(*m_head, M_NOWAIT); if (new_head == NULL) { m_freem(*m_head); device_printf(dev, "cannot pad short frame, m_dup() failed"); DBG_COUNTER_INC(encap_pad_mbuf_fail); DBG_COUNTER_INC(tx_frees); return ENOMEM; } m_freem(*m_head); *m_head = new_head; } for (n = min_frame_size - (*m_head)->m_pkthdr.len; n > 0; n -= sizeof(pad)) if (!m_append(*m_head, min(n, sizeof(pad)), pad)) break; if (n > 0) { m_freem(*m_head); device_printf(dev, "cannot pad short frame\n"); DBG_COUNTER_INC(encap_pad_mbuf_fail); DBG_COUNTER_INC(tx_frees); return (ENOBUFS); } return 0; } static int iflib_encap(iflib_txq_t txq, struct mbuf **m_headp) { if_ctx_t ctx; if_shared_ctx_t sctx; if_softc_ctx_t scctx; bus_dma_tag_t buf_tag; bus_dma_segment_t *segs; struct mbuf *m_head, **ifsd_m; void *next_txd; bus_dmamap_t map; struct if_pkt_info pi; int remap = 0; int err, nsegs, ndesc, max_segs, pidx, cidx, next, ntxd; ctx = txq->ift_ctx; sctx = ctx->ifc_sctx; scctx = &ctx->ifc_softc_ctx; segs = txq->ift_segs; ntxd = txq->ift_size; m_head = *m_headp; map = NULL; /* * If we're doing TSO the next descriptor to clean may be quite far ahead */ cidx = txq->ift_cidx; pidx = txq->ift_pidx; if (ctx->ifc_flags & IFC_PREFETCH) { next = (cidx + CACHE_PTR_INCREMENT) & (ntxd-1); if (!(ctx->ifc_flags & IFLIB_HAS_TXCQ)) { next_txd = calc_next_txd(txq, cidx, 0); prefetch(next_txd); } /* prefetch the next cache line of mbuf pointers and flags */ prefetch(&txq->ift_sds.ifsd_m[next]); prefetch(&txq->ift_sds.ifsd_map[next]); next = (cidx + CACHE_LINE_SIZE) & (ntxd-1); } map = txq->ift_sds.ifsd_map[pidx]; ifsd_m = txq->ift_sds.ifsd_m; if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { buf_tag = txq->ift_tso_buf_tag; max_segs = scctx->isc_tx_tso_segments_max; map = txq->ift_sds.ifsd_tso_map[pidx]; MPASS(buf_tag != NULL); MPASS(max_segs > 0); } else { buf_tag = txq->ift_buf_tag; max_segs = scctx->isc_tx_nsegments; map = txq->ift_sds.ifsd_map[pidx]; } if ((sctx->isc_flags & IFLIB_NEED_ETHER_PAD) && __predict_false(m_head->m_pkthdr.len < scctx->isc_min_frame_size)) { err = iflib_ether_pad(ctx->ifc_dev, m_headp, scctx->isc_min_frame_size); if (err) { DBG_COUNTER_INC(encap_txd_encap_fail); return err; } } m_head = *m_headp; pkt_info_zero(&pi); pi.ipi_mflags = (m_head->m_flags & (M_VLANTAG|M_BCAST|M_MCAST)); pi.ipi_pidx = pidx; pi.ipi_qsidx = txq->ift_id; pi.ipi_len = m_head->m_pkthdr.len; pi.ipi_csum_flags = m_head->m_pkthdr.csum_flags; pi.ipi_vtag = M_HAS_VLANTAG(m_head) ? m_head->m_pkthdr.ether_vtag : 0; /* deliberate bitwise OR to make one condition */ if (__predict_true((pi.ipi_csum_flags | pi.ipi_vtag))) { if (__predict_false((err = iflib_parse_header(txq, &pi, m_headp)) != 0)) { DBG_COUNTER_INC(encap_txd_encap_fail); return (err); } m_head = *m_headp; } retry: err = bus_dmamap_load_mbuf_sg(buf_tag, map, m_head, segs, &nsegs, BUS_DMA_NOWAIT); defrag: if (__predict_false(err)) { switch (err) { case EFBIG: /* try collapse once and defrag once */ if (remap == 0) { m_head = m_collapse(*m_headp, M_NOWAIT, max_segs); /* try defrag if collapsing fails */ if (m_head == NULL) remap++; } if (remap == 1) { txq->ift_mbuf_defrag++; m_head = m_defrag(*m_headp, M_NOWAIT); } /* * remap should never be >1 unless bus_dmamap_load_mbuf_sg * failed to map an mbuf that was run through m_defrag */ MPASS(remap <= 1); if (__predict_false(m_head == NULL || remap > 1)) goto defrag_failed; remap++; *m_headp = m_head; goto retry; break; case ENOMEM: txq->ift_no_tx_dma_setup++; break; default: txq->ift_no_tx_dma_setup++; m_freem(*m_headp); DBG_COUNTER_INC(tx_frees); *m_headp = NULL; break; } txq->ift_map_failed++; DBG_COUNTER_INC(encap_load_mbuf_fail); DBG_COUNTER_INC(encap_txd_encap_fail); return (err); } ifsd_m[pidx] = m_head; /* * XXX assumes a 1 to 1 relationship between segments and * descriptors - this does not hold true on all drivers, e.g. * cxgb */ if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) { txq->ift_no_desc_avail++; bus_dmamap_unload(buf_tag, map); DBG_COUNTER_INC(encap_txq_avail_fail); DBG_COUNTER_INC(encap_txd_encap_fail); if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0) GROUPTASK_ENQUEUE(&txq->ift_task); return (ENOBUFS); } /* * On Intel cards we can greatly reduce the number of TX interrupts * we see by only setting report status on every Nth descriptor. * However, this also means that the driver will need to keep track * of the descriptors that RS was set on to check them for the DD bit. */ txq->ift_rs_pending += nsegs + 1; if (txq->ift_rs_pending > TXQ_MAX_RS_DEFERRED(txq) || iflib_no_tx_batch || (TXQ_AVAIL(txq) - nsegs) <= MAX_TX_DESC(ctx) + 2) { pi.ipi_flags |= IPI_TX_INTR; txq->ift_rs_pending = 0; } pi.ipi_segs = segs; pi.ipi_nsegs = nsegs; MPASS(pidx >= 0 && pidx < txq->ift_size); #ifdef PKT_DEBUG print_pkt(&pi); #endif if ((err = ctx->isc_txd_encap(ctx->ifc_softc, &pi)) == 0) { bus_dmamap_sync(buf_tag, map, BUS_DMASYNC_PREWRITE); DBG_COUNTER_INC(tx_encap); MPASS(pi.ipi_new_pidx < txq->ift_size); ndesc = pi.ipi_new_pidx - pi.ipi_pidx; if (pi.ipi_new_pidx < pi.ipi_pidx) { ndesc += txq->ift_size; txq->ift_gen = 1; } /* * drivers can need as many as * two sentinels */ MPASS(ndesc <= pi.ipi_nsegs + 2); MPASS(pi.ipi_new_pidx != pidx); MPASS(ndesc > 0); txq->ift_in_use += ndesc; /* * We update the last software descriptor again here because there may * be a sentinel and/or there may be more mbufs than segments */ txq->ift_pidx = pi.ipi_new_pidx; txq->ift_npending += pi.ipi_ndescs; } else { *m_headp = m_head = iflib_remove_mbuf(txq); if (err == EFBIG) { txq->ift_txd_encap_efbig++; if (remap < 2) { remap = 1; goto defrag; } } goto defrag_failed; } /* * err can't possibly be non-zero here, so we don't neet to test it * to see if we need to DBG_COUNTER_INC(encap_txd_encap_fail). */ return (err); defrag_failed: txq->ift_mbuf_defrag_failed++; txq->ift_map_failed++; m_freem(*m_headp); DBG_COUNTER_INC(tx_frees); *m_headp = NULL; DBG_COUNTER_INC(encap_txd_encap_fail); return (ENOMEM); } static void iflib_tx_desc_free(iflib_txq_t txq, int n) { uint32_t qsize, cidx, mask, gen; struct mbuf *m, **ifsd_m; bool do_prefetch; cidx = txq->ift_cidx; gen = txq->ift_gen; qsize = txq->ift_size; mask = qsize-1; ifsd_m = txq->ift_sds.ifsd_m; do_prefetch = (txq->ift_ctx->ifc_flags & IFC_PREFETCH); while (n-- > 0) { if (do_prefetch) { prefetch(ifsd_m[(cidx + 3) & mask]); prefetch(ifsd_m[(cidx + 4) & mask]); } if ((m = ifsd_m[cidx]) != NULL) { prefetch(&ifsd_m[(cidx + CACHE_PTR_INCREMENT) & mask]); if (m->m_pkthdr.csum_flags & CSUM_TSO) { bus_dmamap_sync(txq->ift_tso_buf_tag, txq->ift_sds.ifsd_tso_map[cidx], BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txq->ift_tso_buf_tag, txq->ift_sds.ifsd_tso_map[cidx]); } else { bus_dmamap_sync(txq->ift_buf_tag, txq->ift_sds.ifsd_map[cidx], BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txq->ift_buf_tag, txq->ift_sds.ifsd_map[cidx]); } /* XXX we don't support any drivers that batch packets yet */ MPASS(m->m_nextpkt == NULL); m_freem(m); ifsd_m[cidx] = NULL; #if MEMORY_LOGGING txq->ift_dequeued++; #endif DBG_COUNTER_INC(tx_frees); } if (__predict_false(++cidx == qsize)) { cidx = 0; gen = 0; } } txq->ift_cidx = cidx; txq->ift_gen = gen; } static __inline int iflib_completed_tx_reclaim(iflib_txq_t txq, int thresh) { int reclaim; if_ctx_t ctx = txq->ift_ctx; KASSERT(thresh >= 0, ("invalid threshold to reclaim")); MPASS(thresh /*+ MAX_TX_DESC(txq->ift_ctx) */ < txq->ift_size); /* * Need a rate-limiting check so that this isn't called every time */ iflib_tx_credits_update(ctx, txq); reclaim = DESC_RECLAIMABLE(txq); if (reclaim <= thresh /* + MAX_TX_DESC(txq->ift_ctx) */) { #ifdef INVARIANTS if (iflib_verbose_debug) { printf("%s processed=%ju cleaned=%ju tx_nsegments=%d reclaim=%d thresh=%d\n", __FUNCTION__, txq->ift_processed, txq->ift_cleaned, txq->ift_ctx->ifc_softc_ctx.isc_tx_nsegments, reclaim, thresh); } #endif return (0); } iflib_tx_desc_free(txq, reclaim); txq->ift_cleaned += reclaim; txq->ift_in_use -= reclaim; return (reclaim); } static struct mbuf ** _ring_peek_one(struct ifmp_ring *r, int cidx, int offset, int remaining) { int next, size; struct mbuf **items; size = r->size; next = (cidx + CACHE_PTR_INCREMENT) & (size-1); items = __DEVOLATILE(struct mbuf **, &r->items[0]); prefetch(items[(cidx + offset) & (size-1)]); if (remaining > 1) { prefetch2cachelines(&items[next]); prefetch2cachelines(items[(cidx + offset + 1) & (size-1)]); prefetch2cachelines(items[(cidx + offset + 2) & (size-1)]); prefetch2cachelines(items[(cidx + offset + 3) & (size-1)]); } return (__DEVOLATILE(struct mbuf **, &r->items[(cidx + offset) & (size-1)])); } static void iflib_txq_check_drain(iflib_txq_t txq, int budget) { ifmp_ring_check_drainage(txq->ift_br, budget); } static uint32_t iflib_txq_can_drain(struct ifmp_ring *r) { iflib_txq_t txq = r->cookie; if_ctx_t ctx = txq->ift_ctx; if (TXQ_AVAIL(txq) > MAX_TX_DESC(ctx) + 2) return (1); bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, BUS_DMASYNC_POSTREAD); return (ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false)); } static uint32_t iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx) { iflib_txq_t txq = r->cookie; if_ctx_t ctx = txq->ift_ctx; if_t ifp = ctx->ifc_ifp; struct mbuf *m, **mp; int avail, bytes_sent, consumed, count, err, i, in_use_prev; int mcast_sent, pkt_sent, reclaimed, txq_avail; bool do_prefetch, rang, ring; if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING) || !LINK_ACTIVE(ctx))) { DBG_COUNTER_INC(txq_drain_notready); return (0); } reclaimed = iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx)); rang = iflib_txd_db_check(ctx, txq, reclaimed, txq->ift_in_use); avail = IDXDIFF(pidx, cidx, r->size); if (__predict_false(ctx->ifc_flags & IFC_QFLUSH)) { DBG_COUNTER_INC(txq_drain_flushing); for (i = 0; i < avail; i++) { if (__predict_true(r->items[(cidx + i) & (r->size-1)] != (void *)txq)) m_free(r->items[(cidx + i) & (r->size-1)]); r->items[(cidx + i) & (r->size-1)] = NULL; } return (avail); } if (__predict_false(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE)) { txq->ift_qstatus = IFLIB_QUEUE_IDLE; CALLOUT_LOCK(txq); callout_stop(&txq->ift_timer); CALLOUT_UNLOCK(txq); DBG_COUNTER_INC(txq_drain_oactive); return (0); } if (reclaimed) txq->ift_qstatus = IFLIB_QUEUE_IDLE; consumed = mcast_sent = bytes_sent = pkt_sent = 0; count = MIN(avail, TX_BATCH_SIZE); #ifdef INVARIANTS if (iflib_verbose_debug) printf("%s avail=%d ifc_flags=%x txq_avail=%d ", __FUNCTION__, avail, ctx->ifc_flags, TXQ_AVAIL(txq)); #endif do_prefetch = (ctx->ifc_flags & IFC_PREFETCH); txq_avail = TXQ_AVAIL(txq); err = 0; for (i = 0; i < count && txq_avail > MAX_TX_DESC(ctx) + 2; i++) { int rem = do_prefetch ? count - i : 0; mp = _ring_peek_one(r, cidx, i, rem); MPASS(mp != NULL && *mp != NULL); if (__predict_false(*mp == (struct mbuf *)txq)) { consumed++; continue; } in_use_prev = txq->ift_in_use; err = iflib_encap(txq, mp); if (__predict_false(err)) { /* no room - bail out */ if (err == ENOBUFS) break; consumed++; /* we can't send this packet - skip it */ continue; } consumed++; pkt_sent++; m = *mp; DBG_COUNTER_INC(tx_sent); bytes_sent += m->m_pkthdr.len; mcast_sent += !!(m->m_flags & M_MCAST); txq_avail = TXQ_AVAIL(txq); txq->ift_db_pending += (txq->ift_in_use - in_use_prev); ETHER_BPF_MTAP(ifp, m); if (__predict_false(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) break; rang = iflib_txd_db_check(ctx, txq, false, in_use_prev); } /* deliberate use of bitwise or to avoid gratuitous short-circuit */ ring = rang ? false : (iflib_min_tx_latency | err) || (TXQ_AVAIL(txq) < MAX_TX_DESC(ctx)); iflib_txd_db_check(ctx, txq, ring, txq->ift_in_use); if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent); if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent); if (mcast_sent) if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast_sent); #ifdef INVARIANTS if (iflib_verbose_debug) printf("consumed=%d\n", consumed); #endif return (consumed); } static uint32_t iflib_txq_drain_always(struct ifmp_ring *r) { return (1); } static uint32_t iflib_txq_drain_free(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx) { int i, avail; struct mbuf **mp; iflib_txq_t txq; txq = r->cookie; txq->ift_qstatus = IFLIB_QUEUE_IDLE; CALLOUT_LOCK(txq); callout_stop(&txq->ift_timer); CALLOUT_UNLOCK(txq); avail = IDXDIFF(pidx, cidx, r->size); for (i = 0; i < avail; i++) { mp = _ring_peek_one(r, cidx, i, avail - i); if (__predict_false(*mp == (struct mbuf *)txq)) continue; m_freem(*mp); DBG_COUNTER_INC(tx_frees); } MPASS(ifmp_ring_is_stalled(r) == 0); return (avail); } static void iflib_ifmp_purge(iflib_txq_t txq) { struct ifmp_ring *r; r = txq->ift_br; r->drain = iflib_txq_drain_free; r->can_drain = iflib_txq_drain_always; ifmp_ring_check_drainage(r, r->size); r->drain = iflib_txq_drain; r->can_drain = iflib_txq_can_drain; } static void _task_fn_tx(void *context) { iflib_txq_t txq = context; if_ctx_t ctx = txq->ift_ctx; #if defined(ALTQ) || defined(DEV_NETMAP) if_t ifp = ctx->ifc_ifp; #endif int abdicate = ctx->ifc_sysctl_tx_abdicate; #ifdef IFLIB_DIAGNOSTICS txq->ift_cpu_exec_count[curcpu]++; #endif if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) return; #ifdef DEV_NETMAP if (if_getcapenable(ifp) & IFCAP_NETMAP) { bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, BUS_DMASYNC_POSTREAD); if (ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false)) netmap_tx_irq(ifp, txq->ift_id); if (ctx->ifc_flags & IFC_LEGACY) IFDI_INTR_ENABLE(ctx); else IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id); return; } #endif #ifdef ALTQ if (ALTQ_IS_ENABLED(&ifp->if_snd)) iflib_altq_if_start(ifp); #endif if (txq->ift_db_pending) ifmp_ring_enqueue(txq->ift_br, (void **)&txq, 1, TX_BATCH_SIZE, abdicate); else if (!abdicate) ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); /* * When abdicating, we always need to check drainage, not just when we don't enqueue */ if (abdicate) ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); if (ctx->ifc_flags & IFC_LEGACY) IFDI_INTR_ENABLE(ctx); else IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id); } static void _task_fn_rx(void *context) { - struct epoch_tracker et; iflib_rxq_t rxq = context; if_ctx_t ctx = rxq->ifr_ctx; bool more; uint16_t budget; #ifdef IFLIB_DIAGNOSTICS rxq->ifr_cpu_exec_count[curcpu]++; #endif DBG_COUNTER_INC(task_fn_rxs); if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))) return; more = true; #ifdef DEV_NETMAP if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP) { u_int work = 0; if (netmap_rx_irq(ctx->ifc_ifp, rxq->ifr_id, &work)) { more = false; } } #endif budget = ctx->ifc_sysctl_rx_budget; if (budget == 0) budget = 16; /* XXX */ - NET_EPOCH_ENTER(et); if (more == false || (more = iflib_rxeof(rxq, budget)) == false) { if (ctx->ifc_flags & IFC_LEGACY) IFDI_INTR_ENABLE(ctx); else IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id); DBG_COUNTER_INC(rx_intr_enables); } - NET_EPOCH_EXIT(et); if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))) return; if (more) GROUPTASK_ENQUEUE(&rxq->ifr_task); } static void _task_fn_admin(void *context) { if_ctx_t ctx = context; if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; iflib_txq_t txq; int i; bool oactive, running, do_reset, do_watchdog, in_detach; uint32_t reset_on = hz / 2; STATE_LOCK(ctx); running = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING); oactive = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE); do_reset = (ctx->ifc_flags & IFC_DO_RESET); do_watchdog = (ctx->ifc_flags & IFC_DO_WATCHDOG); in_detach = (ctx->ifc_flags & IFC_IN_DETACH); ctx->ifc_flags &= ~(IFC_DO_RESET|IFC_DO_WATCHDOG); STATE_UNLOCK(ctx); if ((!running && !oactive) && !(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN)) return; if (in_detach) return; CTX_LOCK(ctx); for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) { CALLOUT_LOCK(txq); callout_stop(&txq->ift_timer); CALLOUT_UNLOCK(txq); } if (do_watchdog) { ctx->ifc_watchdog_events++; IFDI_WATCHDOG_RESET(ctx); } IFDI_UPDATE_ADMIN_STATUS(ctx); for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) { #ifdef DEV_NETMAP reset_on = hz / 2; if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP) iflib_netmap_timer_adjust(ctx, txq, &reset_on); #endif callout_reset_on(&txq->ift_timer, reset_on, iflib_timer, txq, txq->ift_timer.c_cpu); } IFDI_LINK_INTR_ENABLE(ctx); if (do_reset) iflib_if_init_locked(ctx); CTX_UNLOCK(ctx); if (LINK_ACTIVE(ctx) == 0) return; for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET); } static void _task_fn_iov(void *context) { if_ctx_t ctx = context; if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING) && !(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN)) return; CTX_LOCK(ctx); IFDI_VFLR_HANDLE(ctx); CTX_UNLOCK(ctx); } static int iflib_sysctl_int_delay(SYSCTL_HANDLER_ARGS) { int err; if_int_delay_info_t info; if_ctx_t ctx; info = (if_int_delay_info_t)arg1; ctx = info->iidi_ctx; info->iidi_req = req; info->iidi_oidp = oidp; CTX_LOCK(ctx); err = IFDI_SYSCTL_INT_DELAY(ctx, info); CTX_UNLOCK(ctx); return (err); } /********************************************************************* * * IFNET FUNCTIONS * **********************************************************************/ static void iflib_if_init_locked(if_ctx_t ctx) { iflib_stop(ctx); iflib_init_locked(ctx); } static void iflib_if_init(void *arg) { if_ctx_t ctx = arg; CTX_LOCK(ctx); iflib_if_init_locked(ctx); CTX_UNLOCK(ctx); } static int iflib_if_transmit(if_t ifp, struct mbuf *m) { if_ctx_t ctx = if_getsoftc(ifp); iflib_txq_t txq; int err, qidx; int abdicate = ctx->ifc_sysctl_tx_abdicate; if (__predict_false((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || !LINK_ACTIVE(ctx))) { DBG_COUNTER_INC(tx_frees); m_freem(m); return (ENETDOWN); } MPASS(m->m_nextpkt == NULL); /* ALTQ-enabled interfaces always use queue 0. */ qidx = 0; if ((NTXQSETS(ctx) > 1) && M_HASHTYPE_GET(m) && !ALTQ_IS_ENABLED(&ifp->if_snd)) qidx = QIDX(ctx, m); /* * XXX calculate buf_ring based on flowid (divvy up bits?) */ txq = &ctx->ifc_txqs[qidx]; #ifdef DRIVER_BACKPRESSURE if (txq->ift_closed) { while (m != NULL) { next = m->m_nextpkt; m->m_nextpkt = NULL; m_freem(m); DBG_COUNTER_INC(tx_frees); m = next; } return (ENOBUFS); } #endif #ifdef notyet qidx = count = 0; mp = marr; next = m; do { count++; next = next->m_nextpkt; } while (next != NULL); if (count > nitems(marr)) if ((mp = malloc(count*sizeof(struct mbuf *), M_IFLIB, M_NOWAIT)) == NULL) { /* XXX check nextpkt */ m_freem(m); /* XXX simplify for now */ DBG_COUNTER_INC(tx_frees); return (ENOBUFS); } for (next = m, i = 0; next != NULL; i++) { mp[i] = next; next = next->m_nextpkt; mp[i]->m_nextpkt = NULL; } #endif DBG_COUNTER_INC(tx_seen); err = ifmp_ring_enqueue(txq->ift_br, (void **)&m, 1, TX_BATCH_SIZE, abdicate); if (abdicate) GROUPTASK_ENQUEUE(&txq->ift_task); if (err) { if (!abdicate) GROUPTASK_ENQUEUE(&txq->ift_task); /* support forthcoming later */ #ifdef DRIVER_BACKPRESSURE txq->ift_closed = TRUE; #endif ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); m_freem(m); DBG_COUNTER_INC(tx_frees); } return (err); } #ifdef ALTQ /* * The overall approach to integrating iflib with ALTQ is to continue to use * the iflib mp_ring machinery between the ALTQ queue(s) and the hardware * ring. Technically, when using ALTQ, queueing to an intermediate mp_ring * is redundant/unnecessary, but doing so minimizes the amount of * ALTQ-specific code required in iflib. It is assumed that the overhead of * redundantly queueing to an intermediate mp_ring is swamped by the * performance limitations inherent in using ALTQ. * * When ALTQ support is compiled in, all iflib drivers will use a transmit * routine, iflib_altq_if_transmit(), that checks if ALTQ is enabled for the * given interface. If ALTQ is enabled for an interface, then all * transmitted packets for that interface will be submitted to the ALTQ * subsystem via IFQ_ENQUEUE(). We don't use the legacy if_transmit() * implementation because it uses IFQ_HANDOFF(), which will duplicatively * update stats that the iflib machinery handles, and which is sensitve to * the disused IFF_DRV_OACTIVE flag. Additionally, iflib_altq_if_start() * will be installed as the start routine for use by ALTQ facilities that * need to trigger queue drains on a scheduled basis. * */ static void iflib_altq_if_start(if_t ifp) { struct ifaltq *ifq = &ifp->if_snd; struct mbuf *m; IFQ_LOCK(ifq); IFQ_DEQUEUE_NOLOCK(ifq, m); while (m != NULL) { iflib_if_transmit(ifp, m); IFQ_DEQUEUE_NOLOCK(ifq, m); } IFQ_UNLOCK(ifq); } static int iflib_altq_if_transmit(if_t ifp, struct mbuf *m) { int err; if (ALTQ_IS_ENABLED(&ifp->if_snd)) { IFQ_ENQUEUE(&ifp->if_snd, m, err); if (err == 0) iflib_altq_if_start(ifp); } else err = iflib_if_transmit(ifp, m); return (err); } #endif /* ALTQ */ static void iflib_if_qflush(if_t ifp) { if_ctx_t ctx = if_getsoftc(ifp); iflib_txq_t txq = ctx->ifc_txqs; int i; STATE_LOCK(ctx); ctx->ifc_flags |= IFC_QFLUSH; STATE_UNLOCK(ctx); for (i = 0; i < NTXQSETS(ctx); i++, txq++) while (!(ifmp_ring_is_idle(txq->ift_br) || ifmp_ring_is_stalled(txq->ift_br))) iflib_txq_check_drain(txq, 0); STATE_LOCK(ctx); ctx->ifc_flags &= ~IFC_QFLUSH; STATE_UNLOCK(ctx); /* * When ALTQ is enabled, this will also take care of purging the * ALTQ queue(s). */ if_qflush(ifp); } #define IFCAP_FLAGS (IFCAP_HWCSUM_IPV6 | IFCAP_HWCSUM | IFCAP_LRO | \ IFCAP_TSO | IFCAP_VLAN_HWTAGGING | IFCAP_HWSTATS | \ IFCAP_VLAN_MTU | IFCAP_VLAN_HWFILTER | \ IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM | IFCAP_NOMAP) static int iflib_if_ioctl(if_t ifp, u_long command, caddr_t data) { if_ctx_t ctx = if_getsoftc(ifp); struct ifreq *ifr = (struct ifreq *)data; #if defined(INET) || defined(INET6) struct ifaddr *ifa = (struct ifaddr *)data; #endif bool avoid_reset = false; int err = 0, reinit = 0, bits; switch (command) { case SIOCSIFADDR: #ifdef INET if (ifa->ifa_addr->sa_family == AF_INET) avoid_reset = true; #endif #ifdef INET6 if (ifa->ifa_addr->sa_family == AF_INET6) avoid_reset = true; #endif /* ** Calling init results in link renegotiation, ** so we avoid doing it when possible. */ if (avoid_reset) { if_setflagbits(ifp, IFF_UP,0); if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) reinit = 1; #ifdef INET if (!(if_getflags(ifp) & IFF_NOARP)) arp_ifinit(ifp, ifa); #endif } else err = ether_ioctl(ifp, command, data); break; case SIOCSIFMTU: CTX_LOCK(ctx); if (ifr->ifr_mtu == if_getmtu(ifp)) { CTX_UNLOCK(ctx); break; } bits = if_getdrvflags(ifp); /* stop the driver and free any clusters before proceeding */ iflib_stop(ctx); if ((err = IFDI_MTU_SET(ctx, ifr->ifr_mtu)) == 0) { STATE_LOCK(ctx); if (ifr->ifr_mtu > ctx->ifc_max_fl_buf_size) ctx->ifc_flags |= IFC_MULTISEG; else ctx->ifc_flags &= ~IFC_MULTISEG; STATE_UNLOCK(ctx); err = if_setmtu(ifp, ifr->ifr_mtu); } iflib_init_locked(ctx); STATE_LOCK(ctx); if_setdrvflags(ifp, bits); STATE_UNLOCK(ctx); CTX_UNLOCK(ctx); break; case SIOCSIFFLAGS: CTX_LOCK(ctx); if (if_getflags(ifp) & IFF_UP) { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { if ((if_getflags(ifp) ^ ctx->ifc_if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) { err = IFDI_PROMISC_SET(ctx, if_getflags(ifp)); } } else reinit = 1; } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { iflib_stop(ctx); } ctx->ifc_if_flags = if_getflags(ifp); CTX_UNLOCK(ctx); break; case SIOCADDMULTI: case SIOCDELMULTI: if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { CTX_LOCK(ctx); IFDI_INTR_DISABLE(ctx); IFDI_MULTI_SET(ctx); IFDI_INTR_ENABLE(ctx); CTX_UNLOCK(ctx); } break; case SIOCSIFMEDIA: CTX_LOCK(ctx); IFDI_MEDIA_SET(ctx); CTX_UNLOCK(ctx); /* FALLTHROUGH */ case SIOCGIFMEDIA: case SIOCGIFXMEDIA: err = ifmedia_ioctl(ifp, ifr, ctx->ifc_mediap, command); break; case SIOCGI2C: { struct ifi2creq i2c; err = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); if (err != 0) break; if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { err = EINVAL; break; } if (i2c.len > sizeof(i2c.data)) { err = EINVAL; break; } if ((err = IFDI_I2C_REQ(ctx, &i2c)) == 0) err = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c)); break; } case SIOCSIFCAP: { int mask, setmask, oldmask; oldmask = if_getcapenable(ifp); mask = ifr->ifr_reqcap ^ oldmask; mask &= ctx->ifc_softc_ctx.isc_capabilities | IFCAP_NOMAP; setmask = 0; #ifdef TCP_OFFLOAD setmask |= mask & (IFCAP_TOE4|IFCAP_TOE6); #endif setmask |= (mask & IFCAP_FLAGS); setmask |= (mask & IFCAP_WOL); /* * If any RX csum has changed, change all the ones that * are supported by the driver. */ if (setmask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) { setmask |= ctx->ifc_softc_ctx.isc_capabilities & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6); } /* * want to ensure that traffic has stopped before we change any of the flags */ if (setmask) { CTX_LOCK(ctx); bits = if_getdrvflags(ifp); if (bits & IFF_DRV_RUNNING && setmask & ~IFCAP_WOL) iflib_stop(ctx); STATE_LOCK(ctx); if_togglecapenable(ifp, setmask); STATE_UNLOCK(ctx); if (bits & IFF_DRV_RUNNING && setmask & ~IFCAP_WOL) iflib_init_locked(ctx); STATE_LOCK(ctx); if_setdrvflags(ifp, bits); STATE_UNLOCK(ctx); CTX_UNLOCK(ctx); } if_vlancap(ifp); break; } case SIOCGPRIVATE_0: case SIOCSDRVSPEC: case SIOCGDRVSPEC: CTX_LOCK(ctx); err = IFDI_PRIV_IOCTL(ctx, command, data); CTX_UNLOCK(ctx); break; default: err = ether_ioctl(ifp, command, data); break; } if (reinit) iflib_if_init(ctx); return (err); } static uint64_t iflib_if_get_counter(if_t ifp, ift_counter cnt) { if_ctx_t ctx = if_getsoftc(ifp); return (IFDI_GET_COUNTER(ctx, cnt)); } /********************************************************************* * * OTHER FUNCTIONS EXPORTED TO THE STACK * **********************************************************************/ static void iflib_vlan_register(void *arg, if_t ifp, uint16_t vtag) { if_ctx_t ctx = if_getsoftc(ifp); if ((void *)ctx != arg) return; if ((vtag == 0) || (vtag > 4095)) return; if (iflib_in_detach(ctx)) return; CTX_LOCK(ctx); IFDI_VLAN_REGISTER(ctx, vtag); /* Re-init to load the changes */ if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) iflib_if_init_locked(ctx); CTX_UNLOCK(ctx); } static void iflib_vlan_unregister(void *arg, if_t ifp, uint16_t vtag) { if_ctx_t ctx = if_getsoftc(ifp); if ((void *)ctx != arg) return; if ((vtag == 0) || (vtag > 4095)) return; CTX_LOCK(ctx); IFDI_VLAN_UNREGISTER(ctx, vtag); /* Re-init to load the changes */ if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) iflib_if_init_locked(ctx); CTX_UNLOCK(ctx); } static void iflib_led_func(void *arg, int onoff) { if_ctx_t ctx = arg; CTX_LOCK(ctx); IFDI_LED_FUNC(ctx, onoff); CTX_UNLOCK(ctx); } /********************************************************************* * * BUS FUNCTION DEFINITIONS * **********************************************************************/ int iflib_device_probe(device_t dev) { const pci_vendor_info_t *ent; if_shared_ctx_t sctx; uint16_t pci_device_id, pci_rev_id, pci_subdevice_id, pci_subvendor_id; uint16_t pci_vendor_id; if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC) return (ENOTSUP); pci_vendor_id = pci_get_vendor(dev); pci_device_id = pci_get_device(dev); pci_subvendor_id = pci_get_subvendor(dev); pci_subdevice_id = pci_get_subdevice(dev); pci_rev_id = pci_get_revid(dev); if (sctx->isc_parse_devinfo != NULL) sctx->isc_parse_devinfo(&pci_device_id, &pci_subvendor_id, &pci_subdevice_id, &pci_rev_id); ent = sctx->isc_vendor_info; while (ent->pvi_vendor_id != 0) { if (pci_vendor_id != ent->pvi_vendor_id) { ent++; continue; } if ((pci_device_id == ent->pvi_device_id) && ((pci_subvendor_id == ent->pvi_subvendor_id) || (ent->pvi_subvendor_id == 0)) && ((pci_subdevice_id == ent->pvi_subdevice_id) || (ent->pvi_subdevice_id == 0)) && ((pci_rev_id == ent->pvi_rev_id) || (ent->pvi_rev_id == 0))) { device_set_desc_copy(dev, ent->pvi_name); /* this needs to be changed to zero if the bus probing code * ever stops re-probing on best match because the sctx * may have its values over written by register calls * in subsequent probes */ return (BUS_PROBE_DEFAULT); } ent++; } return (ENXIO); } int iflib_device_probe_vendor(device_t dev) { int probe; probe = iflib_device_probe(dev); if (probe == BUS_PROBE_DEFAULT) return (BUS_PROBE_VENDOR); else return (probe); } static void iflib_reset_qvalues(if_ctx_t ctx) { if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; if_shared_ctx_t sctx = ctx->ifc_sctx; device_t dev = ctx->ifc_dev; int i; if (ctx->ifc_sysctl_ntxqs != 0) scctx->isc_ntxqsets = ctx->ifc_sysctl_ntxqs; if (ctx->ifc_sysctl_nrxqs != 0) scctx->isc_nrxqsets = ctx->ifc_sysctl_nrxqs; for (i = 0; i < sctx->isc_ntxqs; i++) { if (ctx->ifc_sysctl_ntxds[i] != 0) scctx->isc_ntxd[i] = ctx->ifc_sysctl_ntxds[i]; else scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i]; } for (i = 0; i < sctx->isc_nrxqs; i++) { if (ctx->ifc_sysctl_nrxds[i] != 0) scctx->isc_nrxd[i] = ctx->ifc_sysctl_nrxds[i]; else scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i]; } for (i = 0; i < sctx->isc_nrxqs; i++) { if (scctx->isc_nrxd[i] < sctx->isc_nrxd_min[i]) { device_printf(dev, "nrxd%d: %d less than nrxd_min %d - resetting to min\n", i, scctx->isc_nrxd[i], sctx->isc_nrxd_min[i]); scctx->isc_nrxd[i] = sctx->isc_nrxd_min[i]; } if (scctx->isc_nrxd[i] > sctx->isc_nrxd_max[i]) { device_printf(dev, "nrxd%d: %d greater than nrxd_max %d - resetting to max\n", i, scctx->isc_nrxd[i], sctx->isc_nrxd_max[i]); scctx->isc_nrxd[i] = sctx->isc_nrxd_max[i]; } if (!powerof2(scctx->isc_nrxd[i])) { device_printf(dev, "nrxd%d: %d is not a power of 2 - using default value of %d\n", i, scctx->isc_nrxd[i], sctx->isc_nrxd_default[i]); scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i]; } } for (i = 0; i < sctx->isc_ntxqs; i++) { if (scctx->isc_ntxd[i] < sctx->isc_ntxd_min[i]) { device_printf(dev, "ntxd%d: %d less than ntxd_min %d - resetting to min\n", i, scctx->isc_ntxd[i], sctx->isc_ntxd_min[i]); scctx->isc_ntxd[i] = sctx->isc_ntxd_min[i]; } if (scctx->isc_ntxd[i] > sctx->isc_ntxd_max[i]) { device_printf(dev, "ntxd%d: %d greater than ntxd_max %d - resetting to max\n", i, scctx->isc_ntxd[i], sctx->isc_ntxd_max[i]); scctx->isc_ntxd[i] = sctx->isc_ntxd_max[i]; } if (!powerof2(scctx->isc_ntxd[i])) { device_printf(dev, "ntxd%d: %d is not a power of 2 - using default value of %d\n", i, scctx->isc_ntxd[i], sctx->isc_ntxd_default[i]); scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i]; } } } static void iflib_add_pfil(if_ctx_t ctx) { struct pfil_head *pfil; struct pfil_head_args pa; iflib_rxq_t rxq; int i; pa.pa_version = PFIL_VERSION; pa.pa_flags = PFIL_IN; pa.pa_type = PFIL_TYPE_ETHERNET; pa.pa_headname = ctx->ifc_ifp->if_xname; pfil = pfil_head_register(&pa); for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) { rxq->pfil = pfil; } } static void iflib_rem_pfil(if_ctx_t ctx) { struct pfil_head *pfil; iflib_rxq_t rxq; int i; rxq = ctx->ifc_rxqs; pfil = rxq->pfil; for (i = 0; i < NRXQSETS(ctx); i++, rxq++) { rxq->pfil = NULL; } pfil_head_unregister(pfil); } static uint16_t get_ctx_core_offset(if_ctx_t ctx) { if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; struct cpu_offset *op; uint16_t qc; uint16_t ret = ctx->ifc_sysctl_core_offset; if (ret != CORE_OFFSET_UNSPECIFIED) return (ret); if (ctx->ifc_sysctl_separate_txrx) qc = scctx->isc_ntxqsets + scctx->isc_nrxqsets; else qc = max(scctx->isc_ntxqsets, scctx->isc_nrxqsets); mtx_lock(&cpu_offset_mtx); SLIST_FOREACH(op, &cpu_offsets, entries) { if (CPU_CMP(&ctx->ifc_cpus, &op->set) == 0) { ret = op->offset; op->offset += qc; MPASS(op->refcount < UINT_MAX); op->refcount++; break; } } if (ret == CORE_OFFSET_UNSPECIFIED) { ret = 0; op = malloc(sizeof(struct cpu_offset), M_IFLIB, M_NOWAIT | M_ZERO); if (op == NULL) { device_printf(ctx->ifc_dev, "allocation for cpu offset failed.\n"); } else { op->offset = qc; op->refcount = 1; CPU_COPY(&ctx->ifc_cpus, &op->set); SLIST_INSERT_HEAD(&cpu_offsets, op, entries); } } mtx_unlock(&cpu_offset_mtx); return (ret); } static void unref_ctx_core_offset(if_ctx_t ctx) { struct cpu_offset *op, *top; mtx_lock(&cpu_offset_mtx); SLIST_FOREACH_SAFE(op, &cpu_offsets, entries, top) { if (CPU_CMP(&ctx->ifc_cpus, &op->set) == 0) { MPASS(op->refcount > 0); op->refcount--; if (op->refcount == 0) { SLIST_REMOVE(&cpu_offsets, op, cpu_offset, entries); free(op, M_IFLIB); } break; } } mtx_unlock(&cpu_offset_mtx); } int iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ctxp) { if_ctx_t ctx; if_t ifp; if_softc_ctx_t scctx; kobjop_desc_t kobj_desc; kobj_method_t *kobj_method; int err, msix, rid; uint16_t main_rxq, main_txq; ctx = malloc(sizeof(* ctx), M_IFLIB, M_WAITOK|M_ZERO); if (sc == NULL) { sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO); device_set_softc(dev, ctx); ctx->ifc_flags |= IFC_SC_ALLOCATED; } ctx->ifc_sctx = sctx; ctx->ifc_dev = dev; ctx->ifc_softc = sc; if ((err = iflib_register(ctx)) != 0) { device_printf(dev, "iflib_register failed %d\n", err); goto fail_ctx_free; } iflib_add_device_sysctl_pre(ctx); scctx = &ctx->ifc_softc_ctx; ifp = ctx->ifc_ifp; iflib_reset_qvalues(ctx); CTX_LOCK(ctx); if ((err = IFDI_ATTACH_PRE(ctx)) != 0) { device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err); goto fail_unlock; } _iflib_pre_assert(scctx); ctx->ifc_txrx = *scctx->isc_txrx; if (sctx->isc_flags & IFLIB_DRIVER_MEDIA) ctx->ifc_mediap = scctx->isc_media; #ifdef INVARIANTS if (scctx->isc_capabilities & IFCAP_TXCSUM) MPASS(scctx->isc_tx_csum_flags); #endif if_setcapabilities(ifp, scctx->isc_capabilities | IFCAP_HWSTATS | IFCAP_NOMAP); if_setcapenable(ifp, scctx->isc_capenable | IFCAP_HWSTATS | IFCAP_NOMAP); if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets)) scctx->isc_ntxqsets = scctx->isc_ntxqsets_max; if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets)) scctx->isc_nrxqsets = scctx->isc_nrxqsets_max; main_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0; main_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0; /* XXX change for per-queue sizes */ device_printf(dev, "Using %d TX descriptors and %d RX descriptors\n", scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]); if (scctx->isc_tx_nsegments > scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION) scctx->isc_tx_nsegments = max(1, scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION); if (scctx->isc_tx_tso_segments_max > scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION) scctx->isc_tx_tso_segments_max = max(1, scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION); /* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */ if (if_getcapabilities(ifp) & IFCAP_TSO) { /* * The stack can't handle a TSO size larger than IP_MAXPACKET, * but some MACs do. */ if_sethwtsomax(ifp, min(scctx->isc_tx_tso_size_max, IP_MAXPACKET)); /* * Take maximum number of m_pullup(9)'s in iflib_parse_header() * into account. In the worst case, each of these calls will * add another mbuf and, thus, the requirement for another DMA * segment. So for best performance, it doesn't make sense to * advertize a maximum of TSO segments that typically will * require defragmentation in iflib_encap(). */ if_sethwtsomaxsegcount(ifp, scctx->isc_tx_tso_segments_max - 3); if_sethwtsomaxsegsize(ifp, scctx->isc_tx_tso_segsize_max); } if (scctx->isc_rss_table_size == 0) scctx->isc_rss_table_size = 64; scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1; GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx); /* XXX format name */ taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx, NULL, NULL, "admin"); /* Set up cpu set. If it fails, use the set of all CPUs. */ if (bus_get_cpus(dev, INTR_CPUS, sizeof(ctx->ifc_cpus), &ctx->ifc_cpus) != 0) { device_printf(dev, "Unable to fetch CPU list\n"); CPU_COPY(&all_cpus, &ctx->ifc_cpus); } MPASS(CPU_COUNT(&ctx->ifc_cpus) > 0); /* ** Now set up MSI or MSI-X, should return us the number of supported ** vectors (will be 1 for a legacy interrupt and MSI). */ if (sctx->isc_flags & IFLIB_SKIP_MSIX) { msix = scctx->isc_vectors; } else if (scctx->isc_msix_bar != 0) /* * The simple fact that isc_msix_bar is not 0 does not mean we * we have a good value there that is known to work. */ msix = iflib_msix_init(ctx); else { scctx->isc_vectors = 1; scctx->isc_ntxqsets = 1; scctx->isc_nrxqsets = 1; scctx->isc_intr = IFLIB_INTR_LEGACY; msix = 0; } /* Get memory for the station queues */ if ((err = iflib_queues_alloc(ctx))) { device_printf(dev, "Unable to allocate queue memory\n"); goto fail_intr_free; } if ((err = iflib_qset_structures_setup(ctx))) goto fail_queues; /* * Now that we know how many queues there are, get the core offset. */ ctx->ifc_sysctl_core_offset = get_ctx_core_offset(ctx); /* * Group taskqueues aren't properly set up until SMP is started, * so we disable interrupts until we can handle them post * SI_SUB_SMP. * * XXX: disabling interrupts doesn't actually work, at least for * the non-MSI case. When they occur before SI_SUB_SMP completes, * we do null handling and depend on this not causing too large an * interrupt storm. */ IFDI_INTR_DISABLE(ctx); if (msix > 1) { /* * When using MSI-X, ensure that ifdi_{r,t}x_queue_intr_enable * aren't the default NULL implementation. */ kobj_desc = &ifdi_rx_queue_intr_enable_desc; kobj_method = kobj_lookup_method(((kobj_t)ctx)->ops->cls, NULL, kobj_desc); if (kobj_method == &kobj_desc->deflt) { device_printf(dev, "MSI-X requires ifdi_rx_queue_intr_enable method"); err = EOPNOTSUPP; goto fail_queues; } kobj_desc = &ifdi_tx_queue_intr_enable_desc; kobj_method = kobj_lookup_method(((kobj_t)ctx)->ops->cls, NULL, kobj_desc); if (kobj_method == &kobj_desc->deflt) { device_printf(dev, "MSI-X requires ifdi_tx_queue_intr_enable method"); err = EOPNOTSUPP; goto fail_queues; } /* * Assign the MSI-X vectors. * Note that the default NULL ifdi_msix_intr_assign method will * fail here, too. */ err = IFDI_MSIX_INTR_ASSIGN(ctx, msix); if (err != 0) { device_printf(dev, "IFDI_MSIX_INTR_ASSIGN failed %d\n", err); goto fail_queues; } } else if (scctx->isc_intr != IFLIB_INTR_MSIX) { rid = 0; if (scctx->isc_intr == IFLIB_INTR_MSI) { MPASS(msix == 1); rid = 1; } if ((err = iflib_legacy_setup(ctx, ctx->isc_legacy_intr, ctx->ifc_softc, &rid, "irq0")) != 0) { device_printf(dev, "iflib_legacy_setup failed %d\n", err); goto fail_queues; } } else { device_printf(dev, "Cannot use iflib with only 1 MSI-X interrupt!\n"); err = ENODEV; goto fail_intr_free; } ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac.octet); if ((err = IFDI_ATTACH_POST(ctx)) != 0) { device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err); goto fail_detach; } /* * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported. * This must appear after the call to ether_ifattach() because * ether_ifattach() sets if_hdrlen to the default value. */ if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU) if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); if ((err = iflib_netmap_attach(ctx))) { device_printf(ctx->ifc_dev, "netmap attach failed: %d\n", err); goto fail_detach; } *ctxp = ctx; DEBUGNET_SET(ctx->ifc_ifp, iflib); if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter); iflib_add_device_sysctl_post(ctx); iflib_add_pfil(ctx); ctx->ifc_flags |= IFC_INIT_DONE; CTX_UNLOCK(ctx); return (0); fail_detach: ether_ifdetach(ctx->ifc_ifp); fail_intr_free: iflib_free_intr_mem(ctx); fail_queues: iflib_tx_structures_free(ctx); iflib_rx_structures_free(ctx); taskqgroup_detach(qgroup_if_config_tqg, &ctx->ifc_admin_task); IFDI_DETACH(ctx); fail_unlock: CTX_UNLOCK(ctx); iflib_deregister(ctx); fail_ctx_free: device_set_softc(ctx->ifc_dev, NULL); if (ctx->ifc_flags & IFC_SC_ALLOCATED) free(ctx->ifc_softc, M_IFLIB); free(ctx, M_IFLIB); return (err); } int iflib_pseudo_register(device_t dev, if_shared_ctx_t sctx, if_ctx_t *ctxp, struct iflib_cloneattach_ctx *clctx) { int err; if_ctx_t ctx; if_t ifp; if_softc_ctx_t scctx; int i; void *sc; uint16_t main_txq; uint16_t main_rxq; ctx = malloc(sizeof(*ctx), M_IFLIB, M_WAITOK|M_ZERO); sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO); ctx->ifc_flags |= IFC_SC_ALLOCATED; if (sctx->isc_flags & (IFLIB_PSEUDO|IFLIB_VIRTUAL)) ctx->ifc_flags |= IFC_PSEUDO; ctx->ifc_sctx = sctx; ctx->ifc_softc = sc; ctx->ifc_dev = dev; if ((err = iflib_register(ctx)) != 0) { device_printf(dev, "%s: iflib_register failed %d\n", __func__, err); goto fail_ctx_free; } iflib_add_device_sysctl_pre(ctx); scctx = &ctx->ifc_softc_ctx; ifp = ctx->ifc_ifp; iflib_reset_qvalues(ctx); CTX_LOCK(ctx); if ((err = IFDI_ATTACH_PRE(ctx)) != 0) { device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err); goto fail_unlock; } if (sctx->isc_flags & IFLIB_GEN_MAC) ether_gen_addr(ifp, &ctx->ifc_mac); if ((err = IFDI_CLONEATTACH(ctx, clctx->cc_ifc, clctx->cc_name, clctx->cc_params)) != 0) { device_printf(dev, "IFDI_CLONEATTACH failed %d\n", err); goto fail_ctx_free; } ifmedia_add(ctx->ifc_mediap, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); ifmedia_add(ctx->ifc_mediap, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(ctx->ifc_mediap, IFM_ETHER | IFM_AUTO); #ifdef INVARIANTS if (scctx->isc_capabilities & IFCAP_TXCSUM) MPASS(scctx->isc_tx_csum_flags); #endif if_setcapabilities(ifp, scctx->isc_capabilities | IFCAP_HWSTATS | IFCAP_LINKSTATE); if_setcapenable(ifp, scctx->isc_capenable | IFCAP_HWSTATS | IFCAP_LINKSTATE); ifp->if_flags |= IFF_NOGROUP; if (sctx->isc_flags & IFLIB_PSEUDO) { ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac.octet); if ((err = IFDI_ATTACH_POST(ctx)) != 0) { device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err); goto fail_detach; } *ctxp = ctx; /* * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported. * This must appear after the call to ether_ifattach() because * ether_ifattach() sets if_hdrlen to the default value. */ if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU) if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter); iflib_add_device_sysctl_post(ctx); ctx->ifc_flags |= IFC_INIT_DONE; return (0); } _iflib_pre_assert(scctx); ctx->ifc_txrx = *scctx->isc_txrx; if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets)) scctx->isc_ntxqsets = scctx->isc_ntxqsets_max; if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets)) scctx->isc_nrxqsets = scctx->isc_nrxqsets_max; main_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0; main_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0; /* XXX change for per-queue sizes */ device_printf(dev, "Using %d TX descriptors and %d RX descriptors\n", scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]); if (scctx->isc_tx_nsegments > scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION) scctx->isc_tx_nsegments = max(1, scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION); if (scctx->isc_tx_tso_segments_max > scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION) scctx->isc_tx_tso_segments_max = max(1, scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION); /* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */ if (if_getcapabilities(ifp) & IFCAP_TSO) { /* * The stack can't handle a TSO size larger than IP_MAXPACKET, * but some MACs do. */ if_sethwtsomax(ifp, min(scctx->isc_tx_tso_size_max, IP_MAXPACKET)); /* * Take maximum number of m_pullup(9)'s in iflib_parse_header() * into account. In the worst case, each of these calls will * add another mbuf and, thus, the requirement for another DMA * segment. So for best performance, it doesn't make sense to * advertize a maximum of TSO segments that typically will * require defragmentation in iflib_encap(). */ if_sethwtsomaxsegcount(ifp, scctx->isc_tx_tso_segments_max - 3); if_sethwtsomaxsegsize(ifp, scctx->isc_tx_tso_segsize_max); } if (scctx->isc_rss_table_size == 0) scctx->isc_rss_table_size = 64; scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1; GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx); /* XXX format name */ taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx, NULL, NULL, "admin"); /* XXX --- can support > 1 -- but keep it simple for now */ scctx->isc_intr = IFLIB_INTR_LEGACY; /* Get memory for the station queues */ if ((err = iflib_queues_alloc(ctx))) { device_printf(dev, "Unable to allocate queue memory\n"); goto fail_iflib_detach; } if ((err = iflib_qset_structures_setup(ctx))) { device_printf(dev, "qset structure setup failed %d\n", err); goto fail_queues; } /* * XXX What if anything do we want to do about interrupts? */ ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac.octet); if ((err = IFDI_ATTACH_POST(ctx)) != 0) { device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err); goto fail_detach; } /* * Tell the upper layer(s) if IFCAP_VLAN_MTU is supported. * This must appear after the call to ether_ifattach() because * ether_ifattach() sets if_hdrlen to the default value. */ if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU) if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); /* XXX handle more than one queue */ for (i = 0; i < scctx->isc_nrxqsets; i++) IFDI_RX_CLSET(ctx, 0, i, ctx->ifc_rxqs[i].ifr_fl[0].ifl_sds.ifsd_cl); *ctxp = ctx; if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter); iflib_add_device_sysctl_post(ctx); ctx->ifc_flags |= IFC_INIT_DONE; CTX_UNLOCK(ctx); return (0); fail_detach: ether_ifdetach(ctx->ifc_ifp); fail_queues: iflib_tx_structures_free(ctx); iflib_rx_structures_free(ctx); fail_iflib_detach: IFDI_DETACH(ctx); fail_unlock: CTX_UNLOCK(ctx); iflib_deregister(ctx); fail_ctx_free: free(ctx->ifc_softc, M_IFLIB); free(ctx, M_IFLIB); return (err); } int iflib_pseudo_deregister(if_ctx_t ctx) { if_t ifp = ctx->ifc_ifp; iflib_txq_t txq; iflib_rxq_t rxq; int i, j; struct taskqgroup *tqg; iflib_fl_t fl; /* Unregister VLAN event handlers early */ iflib_unregister_vlan_handlers(ctx); ether_ifdetach(ifp); /* XXX drain any dependent tasks */ tqg = qgroup_if_io_tqg; for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) { callout_drain(&txq->ift_timer); if (txq->ift_task.gt_uniq != NULL) taskqgroup_detach(tqg, &txq->ift_task); } for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) { if (rxq->ifr_task.gt_uniq != NULL) taskqgroup_detach(tqg, &rxq->ifr_task); for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) free(fl->ifl_rx_bitmap, M_IFLIB); } tqg = qgroup_if_config_tqg; if (ctx->ifc_admin_task.gt_uniq != NULL) taskqgroup_detach(tqg, &ctx->ifc_admin_task); if (ctx->ifc_vflr_task.gt_uniq != NULL) taskqgroup_detach(tqg, &ctx->ifc_vflr_task); iflib_tx_structures_free(ctx); iflib_rx_structures_free(ctx); iflib_deregister(ctx); if (ctx->ifc_flags & IFC_SC_ALLOCATED) free(ctx->ifc_softc, M_IFLIB); free(ctx, M_IFLIB); return (0); } int iflib_device_attach(device_t dev) { if_ctx_t ctx; if_shared_ctx_t sctx; if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC) return (ENOTSUP); pci_enable_busmaster(dev); return (iflib_device_register(dev, NULL, sctx, &ctx)); } int iflib_device_deregister(if_ctx_t ctx) { if_t ifp = ctx->ifc_ifp; iflib_txq_t txq; iflib_rxq_t rxq; device_t dev = ctx->ifc_dev; int i, j; struct taskqgroup *tqg; iflib_fl_t fl; /* Make sure VLANS are not using driver */ if (if_vlantrunkinuse(ifp)) { device_printf(dev, "Vlan in use, detach first\n"); return (EBUSY); } #ifdef PCI_IOV if (!CTX_IS_VF(ctx) && pci_iov_detach(dev) != 0) { device_printf(dev, "SR-IOV in use; detach first.\n"); return (EBUSY); } #endif STATE_LOCK(ctx); ctx->ifc_flags |= IFC_IN_DETACH; STATE_UNLOCK(ctx); /* Unregister VLAN handlers before calling iflib_stop() */ iflib_unregister_vlan_handlers(ctx); iflib_netmap_detach(ifp); ether_ifdetach(ifp); CTX_LOCK(ctx); iflib_stop(ctx); CTX_UNLOCK(ctx); iflib_rem_pfil(ctx); if (ctx->ifc_led_dev != NULL) led_destroy(ctx->ifc_led_dev); /* XXX drain any dependent tasks */ tqg = qgroup_if_io_tqg; for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) { callout_drain(&txq->ift_timer); if (txq->ift_task.gt_uniq != NULL) taskqgroup_detach(tqg, &txq->ift_task); } for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) { if (rxq->ifr_task.gt_uniq != NULL) taskqgroup_detach(tqg, &rxq->ifr_task); for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) free(fl->ifl_rx_bitmap, M_IFLIB); } tqg = qgroup_if_config_tqg; if (ctx->ifc_admin_task.gt_uniq != NULL) taskqgroup_detach(tqg, &ctx->ifc_admin_task); if (ctx->ifc_vflr_task.gt_uniq != NULL) taskqgroup_detach(tqg, &ctx->ifc_vflr_task); CTX_LOCK(ctx); IFDI_DETACH(ctx); CTX_UNLOCK(ctx); /* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/ iflib_free_intr_mem(ctx); bus_generic_detach(dev); iflib_tx_structures_free(ctx); iflib_rx_structures_free(ctx); iflib_deregister(ctx); device_set_softc(ctx->ifc_dev, NULL); if (ctx->ifc_flags & IFC_SC_ALLOCATED) free(ctx->ifc_softc, M_IFLIB); unref_ctx_core_offset(ctx); free(ctx, M_IFLIB); return (0); } static void iflib_free_intr_mem(if_ctx_t ctx) { if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_MSIX) { iflib_irq_free(ctx, &ctx->ifc_legacy_irq); } if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_LEGACY) { pci_release_msi(ctx->ifc_dev); } if (ctx->ifc_msix_mem != NULL) { bus_release_resource(ctx->ifc_dev, SYS_RES_MEMORY, rman_get_rid(ctx->ifc_msix_mem), ctx->ifc_msix_mem); ctx->ifc_msix_mem = NULL; } } int iflib_device_detach(device_t dev) { if_ctx_t ctx = device_get_softc(dev); return (iflib_device_deregister(ctx)); } int iflib_device_suspend(device_t dev) { if_ctx_t ctx = device_get_softc(dev); CTX_LOCK(ctx); IFDI_SUSPEND(ctx); CTX_UNLOCK(ctx); return bus_generic_suspend(dev); } int iflib_device_shutdown(device_t dev) { if_ctx_t ctx = device_get_softc(dev); CTX_LOCK(ctx); IFDI_SHUTDOWN(ctx); CTX_UNLOCK(ctx); return bus_generic_suspend(dev); } int iflib_device_resume(device_t dev) { if_ctx_t ctx = device_get_softc(dev); iflib_txq_t txq = ctx->ifc_txqs; CTX_LOCK(ctx); IFDI_RESUME(ctx); iflib_if_init_locked(ctx); CTX_UNLOCK(ctx); for (int i = 0; i < NTXQSETS(ctx); i++, txq++) iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET); return (bus_generic_resume(dev)); } int iflib_device_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params) { int error; if_ctx_t ctx = device_get_softc(dev); CTX_LOCK(ctx); error = IFDI_IOV_INIT(ctx, num_vfs, params); CTX_UNLOCK(ctx); return (error); } void iflib_device_iov_uninit(device_t dev) { if_ctx_t ctx = device_get_softc(dev); CTX_LOCK(ctx); IFDI_IOV_UNINIT(ctx); CTX_UNLOCK(ctx); } int iflib_device_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params) { int error; if_ctx_t ctx = device_get_softc(dev); CTX_LOCK(ctx); error = IFDI_IOV_VF_ADD(ctx, vfnum, params); CTX_UNLOCK(ctx); return (error); } /********************************************************************* * * MODULE FUNCTION DEFINITIONS * **********************************************************************/ /* * - Start a fast taskqueue thread for each core * - Start a taskqueue for control operations */ static int iflib_module_init(void) { return (0); } static int iflib_module_event_handler(module_t mod, int what, void *arg) { int err; switch (what) { case MOD_LOAD: if ((err = iflib_module_init()) != 0) return (err); break; case MOD_UNLOAD: return (EBUSY); default: return (EOPNOTSUPP); } return (0); } /********************************************************************* * * PUBLIC FUNCTION DEFINITIONS * ordered as in iflib.h * **********************************************************************/ static void _iflib_assert(if_shared_ctx_t sctx) { int i; MPASS(sctx->isc_tx_maxsize); MPASS(sctx->isc_tx_maxsegsize); MPASS(sctx->isc_rx_maxsize); MPASS(sctx->isc_rx_nsegments); MPASS(sctx->isc_rx_maxsegsize); MPASS(sctx->isc_nrxqs >= 1 && sctx->isc_nrxqs <= 8); for (i = 0; i < sctx->isc_nrxqs; i++) { MPASS(sctx->isc_nrxd_min[i]); MPASS(powerof2(sctx->isc_nrxd_min[i])); MPASS(sctx->isc_nrxd_max[i]); MPASS(powerof2(sctx->isc_nrxd_max[i])); MPASS(sctx->isc_nrxd_default[i]); MPASS(powerof2(sctx->isc_nrxd_default[i])); } MPASS(sctx->isc_ntxqs >= 1 && sctx->isc_ntxqs <= 8); for (i = 0; i < sctx->isc_ntxqs; i++) { MPASS(sctx->isc_ntxd_min[i]); MPASS(powerof2(sctx->isc_ntxd_min[i])); MPASS(sctx->isc_ntxd_max[i]); MPASS(powerof2(sctx->isc_ntxd_max[i])); MPASS(sctx->isc_ntxd_default[i]); MPASS(powerof2(sctx->isc_ntxd_default[i])); } } static void _iflib_pre_assert(if_softc_ctx_t scctx) { MPASS(scctx->isc_txrx->ift_txd_encap); MPASS(scctx->isc_txrx->ift_txd_flush); MPASS(scctx->isc_txrx->ift_txd_credits_update); MPASS(scctx->isc_txrx->ift_rxd_available); MPASS(scctx->isc_txrx->ift_rxd_pkt_get); MPASS(scctx->isc_txrx->ift_rxd_refill); MPASS(scctx->isc_txrx->ift_rxd_flush); } static int iflib_register(if_ctx_t ctx) { if_shared_ctx_t sctx = ctx->ifc_sctx; driver_t *driver = sctx->isc_driver; device_t dev = ctx->ifc_dev; if_t ifp; _iflib_assert(sctx); CTX_LOCK_INIT(ctx); STATE_LOCK_INIT(ctx, device_get_nameunit(ctx->ifc_dev)); ifp = ctx->ifc_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not allocate ifnet structure\n"); return (ENOMEM); } /* * Initialize our context's device specific methods */ kobj_init((kobj_t) ctx, (kobj_class_t) driver); kobj_class_compile((kobj_class_t) driver); if_initname(ifp, device_get_name(dev), device_get_unit(dev)); if_setsoftc(ifp, ctx); if_setdev(ifp, dev); if_setinitfn(ifp, iflib_if_init); if_setioctlfn(ifp, iflib_if_ioctl); #ifdef ALTQ if_setstartfn(ifp, iflib_altq_if_start); if_settransmitfn(ifp, iflib_altq_if_transmit); if_setsendqready(ifp); #else if_settransmitfn(ifp, iflib_if_transmit); #endif if_setqflushfn(ifp, iflib_if_qflush); if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); ctx->ifc_vlan_attach_event = EVENTHANDLER_REGISTER(vlan_config, iflib_vlan_register, ctx, EVENTHANDLER_PRI_FIRST); ctx->ifc_vlan_detach_event = EVENTHANDLER_REGISTER(vlan_unconfig, iflib_vlan_unregister, ctx, EVENTHANDLER_PRI_FIRST); if ((sctx->isc_flags & IFLIB_DRIVER_MEDIA) == 0) { ctx->ifc_mediap = &ctx->ifc_media; ifmedia_init(ctx->ifc_mediap, IFM_IMASK, iflib_media_change, iflib_media_status); } return (0); } static void iflib_unregister_vlan_handlers(if_ctx_t ctx) { /* Unregister VLAN events */ if (ctx->ifc_vlan_attach_event != NULL) { EVENTHANDLER_DEREGISTER(vlan_config, ctx->ifc_vlan_attach_event); ctx->ifc_vlan_attach_event = NULL; } if (ctx->ifc_vlan_detach_event != NULL) { EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event); ctx->ifc_vlan_detach_event = NULL; } } static void iflib_deregister(if_ctx_t ctx) { if_t ifp = ctx->ifc_ifp; /* Remove all media */ ifmedia_removeall(&ctx->ifc_media); /* Ensure that VLAN event handlers are unregistered */ iflib_unregister_vlan_handlers(ctx); /* Release kobject reference */ kobj_delete((kobj_t) ctx, NULL); /* Free the ifnet structure */ if_free(ifp); STATE_LOCK_DESTROY(ctx); /* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/ CTX_LOCK_DESTROY(ctx); } static int iflib_queues_alloc(if_ctx_t ctx) { if_shared_ctx_t sctx = ctx->ifc_sctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; device_t dev = ctx->ifc_dev; int nrxqsets = scctx->isc_nrxqsets; int ntxqsets = scctx->isc_ntxqsets; iflib_txq_t txq; iflib_rxq_t rxq; iflib_fl_t fl = NULL; int i, j, cpu, err, txconf, rxconf; iflib_dma_info_t ifdip; uint32_t *rxqsizes = scctx->isc_rxqsizes; uint32_t *txqsizes = scctx->isc_txqsizes; uint8_t nrxqs = sctx->isc_nrxqs; uint8_t ntxqs = sctx->isc_ntxqs; int nfree_lists = sctx->isc_nfl ? sctx->isc_nfl : 1; caddr_t *vaddrs; uint64_t *paddrs; KASSERT(ntxqs > 0, ("number of queues per qset must be at least 1")); KASSERT(nrxqs > 0, ("number of queues per qset must be at least 1")); /* Allocate the TX ring struct memory */ if (!(ctx->ifc_txqs = (iflib_txq_t) malloc(sizeof(struct iflib_txq) * ntxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate TX ring memory\n"); err = ENOMEM; goto fail; } /* Now allocate the RX */ if (!(ctx->ifc_rxqs = (iflib_rxq_t) malloc(sizeof(struct iflib_rxq) * nrxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate RX ring memory\n"); err = ENOMEM; goto rx_fail; } txq = ctx->ifc_txqs; rxq = ctx->ifc_rxqs; /* * XXX handle allocation failure */ for (txconf = i = 0, cpu = CPU_FIRST(); i < ntxqsets; i++, txconf++, txq++, cpu = CPU_NEXT(cpu)) { /* Set up some basics */ if ((ifdip = malloc(sizeof(struct iflib_dma_info) * ntxqs, M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) { device_printf(dev, "Unable to allocate TX DMA info memory\n"); err = ENOMEM; goto err_tx_desc; } txq->ift_ifdi = ifdip; for (j = 0; j < ntxqs; j++, ifdip++) { if (iflib_dma_alloc(ctx, txqsizes[j], ifdip, 0)) { device_printf(dev, "Unable to allocate TX descriptors\n"); err = ENOMEM; goto err_tx_desc; } txq->ift_txd_size[j] = scctx->isc_txd_size[j]; bzero((void *)ifdip->idi_vaddr, txqsizes[j]); } txq->ift_ctx = ctx; txq->ift_id = i; if (sctx->isc_flags & IFLIB_HAS_TXCQ) { txq->ift_br_offset = 1; } else { txq->ift_br_offset = 0; } /* XXX fix this */ txq->ift_timer.c_cpu = cpu; if (iflib_txsd_alloc(txq)) { device_printf(dev, "Critical Failure setting up TX buffers\n"); err = ENOMEM; goto err_tx_desc; } /* Initialize the TX lock */ snprintf(txq->ift_mtx_name, MTX_NAME_LEN, "%s:TX(%d):callout", device_get_nameunit(dev), txq->ift_id); mtx_init(&txq->ift_mtx, txq->ift_mtx_name, NULL, MTX_DEF); callout_init_mtx(&txq->ift_timer, &txq->ift_mtx, 0); err = ifmp_ring_alloc(&txq->ift_br, 2048, txq, iflib_txq_drain, iflib_txq_can_drain, M_IFLIB, M_WAITOK); if (err) { /* XXX free any allocated rings */ device_printf(dev, "Unable to allocate buf_ring\n"); goto err_tx_desc; } } for (rxconf = i = 0; i < nrxqsets; i++, rxconf++, rxq++) { /* Set up some basics */ if ((ifdip = malloc(sizeof(struct iflib_dma_info) * nrxqs, M_IFLIB, M_NOWAIT | M_ZERO)) == NULL) { device_printf(dev, "Unable to allocate RX DMA info memory\n"); err = ENOMEM; goto err_tx_desc; } rxq->ifr_ifdi = ifdip; /* XXX this needs to be changed if #rx queues != #tx queues */ rxq->ifr_ntxqirq = 1; rxq->ifr_txqid[0] = i; for (j = 0; j < nrxqs; j++, ifdip++) { if (iflib_dma_alloc(ctx, rxqsizes[j], ifdip, 0)) { device_printf(dev, "Unable to allocate RX descriptors\n"); err = ENOMEM; goto err_tx_desc; } bzero((void *)ifdip->idi_vaddr, rxqsizes[j]); } rxq->ifr_ctx = ctx; rxq->ifr_id = i; if (sctx->isc_flags & IFLIB_HAS_RXCQ) { rxq->ifr_fl_offset = 1; } else { rxq->ifr_fl_offset = 0; } rxq->ifr_nfl = nfree_lists; if (!(fl = (iflib_fl_t) malloc(sizeof(struct iflib_fl) * nfree_lists, M_IFLIB, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate free list memory\n"); err = ENOMEM; goto err_tx_desc; } rxq->ifr_fl = fl; for (j = 0; j < nfree_lists; j++) { fl[j].ifl_rxq = rxq; fl[j].ifl_id = j; fl[j].ifl_ifdi = &rxq->ifr_ifdi[j + rxq->ifr_fl_offset]; fl[j].ifl_rxd_size = scctx->isc_rxd_size[j]; } /* Allocate receive buffers for the ring */ if (iflib_rxsd_alloc(rxq)) { device_printf(dev, "Critical Failure setting up receive buffers\n"); err = ENOMEM; goto err_rx_desc; } for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) fl->ifl_rx_bitmap = bit_alloc(fl->ifl_size, M_IFLIB, M_WAITOK); } /* TXQs */ vaddrs = malloc(sizeof(caddr_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK); paddrs = malloc(sizeof(uint64_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK); for (i = 0; i < ntxqsets; i++) { iflib_dma_info_t di = ctx->ifc_txqs[i].ift_ifdi; for (j = 0; j < ntxqs; j++, di++) { vaddrs[i*ntxqs + j] = di->idi_vaddr; paddrs[i*ntxqs + j] = di->idi_paddr; } } if ((err = IFDI_TX_QUEUES_ALLOC(ctx, vaddrs, paddrs, ntxqs, ntxqsets)) != 0) { device_printf(ctx->ifc_dev, "Unable to allocate device TX queue\n"); iflib_tx_structures_free(ctx); free(vaddrs, M_IFLIB); free(paddrs, M_IFLIB); goto err_rx_desc; } free(vaddrs, M_IFLIB); free(paddrs, M_IFLIB); /* RXQs */ vaddrs = malloc(sizeof(caddr_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK); paddrs = malloc(sizeof(uint64_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK); for (i = 0; i < nrxqsets; i++) { iflib_dma_info_t di = ctx->ifc_rxqs[i].ifr_ifdi; for (j = 0; j < nrxqs; j++, di++) { vaddrs[i*nrxqs + j] = di->idi_vaddr; paddrs[i*nrxqs + j] = di->idi_paddr; } } if ((err = IFDI_RX_QUEUES_ALLOC(ctx, vaddrs, paddrs, nrxqs, nrxqsets)) != 0) { device_printf(ctx->ifc_dev, "Unable to allocate device RX queue\n"); iflib_tx_structures_free(ctx); free(vaddrs, M_IFLIB); free(paddrs, M_IFLIB); goto err_rx_desc; } free(vaddrs, M_IFLIB); free(paddrs, M_IFLIB); return (0); /* XXX handle allocation failure changes */ err_rx_desc: err_tx_desc: rx_fail: if (ctx->ifc_rxqs != NULL) free(ctx->ifc_rxqs, M_IFLIB); ctx->ifc_rxqs = NULL; if (ctx->ifc_txqs != NULL) free(ctx->ifc_txqs, M_IFLIB); ctx->ifc_txqs = NULL; fail: return (err); } static int iflib_tx_structures_setup(if_ctx_t ctx) { iflib_txq_t txq = ctx->ifc_txqs; int i; for (i = 0; i < NTXQSETS(ctx); i++, txq++) iflib_txq_setup(txq); return (0); } static void iflib_tx_structures_free(if_ctx_t ctx) { iflib_txq_t txq = ctx->ifc_txqs; if_shared_ctx_t sctx = ctx->ifc_sctx; int i, j; for (i = 0; i < NTXQSETS(ctx); i++, txq++) { for (j = 0; j < sctx->isc_ntxqs; j++) iflib_dma_free(&txq->ift_ifdi[j]); iflib_txq_destroy(txq); } free(ctx->ifc_txqs, M_IFLIB); ctx->ifc_txqs = NULL; IFDI_QUEUES_FREE(ctx); } /********************************************************************* * * Initialize all receive rings. * **********************************************************************/ static int iflib_rx_structures_setup(if_ctx_t ctx) { iflib_rxq_t rxq = ctx->ifc_rxqs; int q; #if defined(INET6) || defined(INET) int err, i; #endif for (q = 0; q < ctx->ifc_softc_ctx.isc_nrxqsets; q++, rxq++) { #if defined(INET6) || defined(INET) if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_LRO) { err = tcp_lro_init_args(&rxq->ifr_lc, ctx->ifc_ifp, TCP_LRO_ENTRIES, min(1024, ctx->ifc_softc_ctx.isc_nrxd[rxq->ifr_fl_offset])); if (err != 0) { device_printf(ctx->ifc_dev, "LRO Initialization failed!\n"); goto fail; } } #endif IFDI_RXQ_SETUP(ctx, rxq->ifr_id); } return (0); #if defined(INET6) || defined(INET) fail: /* * Free LRO resources allocated so far, we will only handle * the rings that completed, the failing case will have * cleaned up for itself. 'q' failed, so its the terminus. */ rxq = ctx->ifc_rxqs; for (i = 0; i < q; ++i, rxq++) { if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_LRO) tcp_lro_free(&rxq->ifr_lc); } return (err); #endif } /********************************************************************* * * Free all receive rings. * **********************************************************************/ static void iflib_rx_structures_free(if_ctx_t ctx) { iflib_rxq_t rxq = ctx->ifc_rxqs; if_shared_ctx_t sctx = ctx->ifc_sctx; int i, j; for (i = 0; i < ctx->ifc_softc_ctx.isc_nrxqsets; i++, rxq++) { for (j = 0; j < sctx->isc_nrxqs; j++) iflib_dma_free(&rxq->ifr_ifdi[j]); iflib_rx_sds_free(rxq); #if defined(INET6) || defined(INET) if (if_getcapabilities(ctx->ifc_ifp) & IFCAP_LRO) tcp_lro_free(&rxq->ifr_lc); #endif } free(ctx->ifc_rxqs, M_IFLIB); ctx->ifc_rxqs = NULL; } static int iflib_qset_structures_setup(if_ctx_t ctx) { int err; /* * It is expected that the caller takes care of freeing queues if this * fails. */ if ((err = iflib_tx_structures_setup(ctx)) != 0) { device_printf(ctx->ifc_dev, "iflib_tx_structures_setup failed: %d\n", err); return (err); } if ((err = iflib_rx_structures_setup(ctx)) != 0) device_printf(ctx->ifc_dev, "iflib_rx_structures_setup failed: %d\n", err); return (err); } int iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid, driver_filter_t filter, void *filter_arg, driver_intr_t handler, void *arg, const char *name) { return (_iflib_irq_alloc(ctx, irq, rid, filter, handler, arg, name)); } #ifdef SMP static int find_nth(if_ctx_t ctx, int qid) { cpuset_t cpus; int i, cpuid, eqid, count; CPU_COPY(&ctx->ifc_cpus, &cpus); count = CPU_COUNT(&cpus); eqid = qid % count; /* clear up to the qid'th bit */ for (i = 0; i < eqid; i++) { cpuid = CPU_FFS(&cpus); MPASS(cpuid != 0); CPU_CLR(cpuid-1, &cpus); } cpuid = CPU_FFS(&cpus); MPASS(cpuid != 0); return (cpuid-1); } #ifdef SCHED_ULE extern struct cpu_group *cpu_top; /* CPU topology */ static int find_child_with_core(int cpu, struct cpu_group *grp) { int i; if (grp->cg_children == 0) return -1; MPASS(grp->cg_child); for (i = 0; i < grp->cg_children; i++) { if (CPU_ISSET(cpu, &grp->cg_child[i].cg_mask)) return i; } return -1; } /* * Find the nth "close" core to the specified core * "close" is defined as the deepest level that shares * at least an L2 cache. With threads, this will be * threads on the same core. If the shared cache is L3 * or higher, simply returns the same core. */ static int find_close_core(int cpu, int core_offset) { struct cpu_group *grp; int i; int fcpu; cpuset_t cs; grp = cpu_top; if (grp == NULL) return cpu; i = 0; while ((i = find_child_with_core(cpu, grp)) != -1) { /* If the child only has one cpu, don't descend */ if (grp->cg_child[i].cg_count <= 1) break; grp = &grp->cg_child[i]; } /* If they don't share at least an L2 cache, use the same CPU */ if (grp->cg_level > CG_SHARE_L2 || grp->cg_level == CG_SHARE_NONE) return cpu; /* Now pick one */ CPU_COPY(&grp->cg_mask, &cs); /* Add the selected CPU offset to core offset. */ for (i = 0; (fcpu = CPU_FFS(&cs)) != 0; i++) { if (fcpu - 1 == cpu) break; CPU_CLR(fcpu - 1, &cs); } MPASS(fcpu); core_offset += i; CPU_COPY(&grp->cg_mask, &cs); for (i = core_offset % grp->cg_count; i > 0; i--) { MPASS(CPU_FFS(&cs)); CPU_CLR(CPU_FFS(&cs) - 1, &cs); } MPASS(CPU_FFS(&cs)); return CPU_FFS(&cs) - 1; } #else static int find_close_core(int cpu, int core_offset __unused) { return cpu; } #endif static int get_core_offset(if_ctx_t ctx, iflib_intr_type_t type, int qid) { switch (type) { case IFLIB_INTR_TX: /* TX queues get cores which share at least an L2 cache with the corresponding RX queue */ /* XXX handle multiple RX threads per core and more than two core per L2 group */ return qid / CPU_COUNT(&ctx->ifc_cpus) + 1; case IFLIB_INTR_RX: case IFLIB_INTR_RXTX: /* RX queues get the specified core */ return qid / CPU_COUNT(&ctx->ifc_cpus); default: return -1; } } #else #define get_core_offset(ctx, type, qid) CPU_FIRST() #define find_close_core(cpuid, tid) CPU_FIRST() #define find_nth(ctx, gid) CPU_FIRST() #endif /* Just to avoid copy/paste */ static inline int iflib_irq_set_affinity(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type, int qid, struct grouptask *gtask, struct taskqgroup *tqg, void *uniq, const char *name) { device_t dev; int co, cpuid, err, tid; dev = ctx->ifc_dev; co = ctx->ifc_sysctl_core_offset; if (ctx->ifc_sysctl_separate_txrx && type == IFLIB_INTR_TX) co += ctx->ifc_softc_ctx.isc_nrxqsets; cpuid = find_nth(ctx, qid + co); tid = get_core_offset(ctx, type, qid); if (tid < 0) { device_printf(dev, "get_core_offset failed\n"); return (EOPNOTSUPP); } cpuid = find_close_core(cpuid, tid); err = taskqgroup_attach_cpu(tqg, gtask, uniq, cpuid, dev, irq->ii_res, name); if (err) { device_printf(dev, "taskqgroup_attach_cpu failed %d\n", err); return (err); } #ifdef notyet if (cpuid > ctx->ifc_cpuid_highest) ctx->ifc_cpuid_highest = cpuid; #endif return (0); } int iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid, iflib_intr_type_t type, driver_filter_t *filter, void *filter_arg, int qid, const char *name) { device_t dev; struct grouptask *gtask; struct taskqgroup *tqg; iflib_filter_info_t info; gtask_fn_t *fn; int tqrid, err; driver_filter_t *intr_fast; void *q; info = &ctx->ifc_filter_info; tqrid = rid; switch (type) { /* XXX merge tx/rx for netmap? */ case IFLIB_INTR_TX: q = &ctx->ifc_txqs[qid]; info = &ctx->ifc_txqs[qid].ift_filter_info; gtask = &ctx->ifc_txqs[qid].ift_task; tqg = qgroup_if_io_tqg; fn = _task_fn_tx; intr_fast = iflib_fast_intr; GROUPTASK_INIT(gtask, 0, fn, q); ctx->ifc_flags |= IFC_NETMAP_TX_IRQ; break; case IFLIB_INTR_RX: q = &ctx->ifc_rxqs[qid]; info = &ctx->ifc_rxqs[qid].ifr_filter_info; gtask = &ctx->ifc_rxqs[qid].ifr_task; tqg = qgroup_if_io_tqg; fn = _task_fn_rx; intr_fast = iflib_fast_intr; - GROUPTASK_INIT(gtask, 0, fn, q); + NET_GROUPTASK_INIT(gtask, 0, fn, q); break; case IFLIB_INTR_RXTX: q = &ctx->ifc_rxqs[qid]; info = &ctx->ifc_rxqs[qid].ifr_filter_info; gtask = &ctx->ifc_rxqs[qid].ifr_task; tqg = qgroup_if_io_tqg; fn = _task_fn_rx; intr_fast = iflib_fast_intr_rxtx; - GROUPTASK_INIT(gtask, 0, fn, q); + NET_GROUPTASK_INIT(gtask, 0, fn, q); break; case IFLIB_INTR_ADMIN: q = ctx; tqrid = -1; info = &ctx->ifc_filter_info; gtask = &ctx->ifc_admin_task; tqg = qgroup_if_config_tqg; fn = _task_fn_admin; intr_fast = iflib_fast_intr_ctx; break; default: device_printf(ctx->ifc_dev, "%s: unknown net intr type\n", __func__); return (EINVAL); } info->ifi_filter = filter; info->ifi_filter_arg = filter_arg; info->ifi_task = gtask; info->ifi_ctx = q; dev = ctx->ifc_dev; err = _iflib_irq_alloc(ctx, irq, rid, intr_fast, NULL, info, name); if (err != 0) { device_printf(dev, "_iflib_irq_alloc failed %d\n", err); return (err); } if (type == IFLIB_INTR_ADMIN) return (0); if (tqrid != -1) { err = iflib_irq_set_affinity(ctx, irq, type, qid, gtask, tqg, q, name); if (err) return (err); } else { taskqgroup_attach(tqg, gtask, q, dev, irq->ii_res, name); } return (0); } void iflib_softirq_alloc_generic(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type, void *arg, int qid, const char *name) { struct grouptask *gtask; struct taskqgroup *tqg; gtask_fn_t *fn; void *q; int err; switch (type) { case IFLIB_INTR_TX: q = &ctx->ifc_txqs[qid]; gtask = &ctx->ifc_txqs[qid].ift_task; tqg = qgroup_if_io_tqg; fn = _task_fn_tx; break; case IFLIB_INTR_RX: q = &ctx->ifc_rxqs[qid]; gtask = &ctx->ifc_rxqs[qid].ifr_task; tqg = qgroup_if_io_tqg; fn = _task_fn_rx; break; case IFLIB_INTR_IOV: q = ctx; gtask = &ctx->ifc_vflr_task; tqg = qgroup_if_config_tqg; fn = _task_fn_iov; break; default: panic("unknown net intr type"); } GROUPTASK_INIT(gtask, 0, fn, q); if (irq != NULL) { err = iflib_irq_set_affinity(ctx, irq, type, qid, gtask, tqg, q, name); if (err) taskqgroup_attach(tqg, gtask, q, ctx->ifc_dev, irq->ii_res, name); } else { taskqgroup_attach(tqg, gtask, q, NULL, NULL, name); } } void iflib_irq_free(if_ctx_t ctx, if_irq_t irq) { if (irq->ii_tag) bus_teardown_intr(ctx->ifc_dev, irq->ii_res, irq->ii_tag); if (irq->ii_res) bus_release_resource(ctx->ifc_dev, SYS_RES_IRQ, rman_get_rid(irq->ii_res), irq->ii_res); } static int iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filter_arg, int *rid, const char *name) { iflib_txq_t txq = ctx->ifc_txqs; iflib_rxq_t rxq = ctx->ifc_rxqs; if_irq_t irq = &ctx->ifc_legacy_irq; iflib_filter_info_t info; device_t dev; struct grouptask *gtask; struct resource *res; struct taskqgroup *tqg; gtask_fn_t *fn; void *q; int err, tqrid; bool rx_only; q = &ctx->ifc_rxqs[0]; info = &rxq[0].ifr_filter_info; gtask = &rxq[0].ifr_task; tqg = qgroup_if_io_tqg; tqrid = *rid; fn = _task_fn_rx; rx_only = (ctx->ifc_sctx->isc_flags & IFLIB_SINGLE_IRQ_RX_ONLY) != 0; ctx->ifc_flags |= IFC_LEGACY; info->ifi_filter = filter; info->ifi_filter_arg = filter_arg; info->ifi_task = gtask; info->ifi_ctx = rx_only ? ctx : q; dev = ctx->ifc_dev; /* We allocate a single interrupt resource */ err = _iflib_irq_alloc(ctx, irq, tqrid, rx_only ? iflib_fast_intr_ctx : iflib_fast_intr_rxtx, NULL, info, name); if (err != 0) return (err); GROUPTASK_INIT(gtask, 0, fn, q); res = irq->ii_res; taskqgroup_attach(tqg, gtask, q, dev, res, name); GROUPTASK_INIT(&txq->ift_task, 0, _task_fn_tx, txq); taskqgroup_attach(qgroup_if_io_tqg, &txq->ift_task, txq, dev, res, "tx"); return (0); } void iflib_led_create(if_ctx_t ctx) { ctx->ifc_led_dev = led_create(iflib_led_func, ctx, device_get_nameunit(ctx->ifc_dev)); } void iflib_tx_intr_deferred(if_ctx_t ctx, int txqid) { GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task); } void iflib_rx_intr_deferred(if_ctx_t ctx, int rxqid) { GROUPTASK_ENQUEUE(&ctx->ifc_rxqs[rxqid].ifr_task); } void iflib_admin_intr_deferred(if_ctx_t ctx) { #ifdef INVARIANTS struct grouptask *gtask; gtask = &ctx->ifc_admin_task; MPASS(gtask != NULL && gtask->gt_taskqueue != NULL); #endif GROUPTASK_ENQUEUE(&ctx->ifc_admin_task); } void iflib_iov_intr_deferred(if_ctx_t ctx) { GROUPTASK_ENQUEUE(&ctx->ifc_vflr_task); } void iflib_io_tqg_attach(struct grouptask *gt, void *uniq, int cpu, const char *name) { taskqgroup_attach_cpu(qgroup_if_io_tqg, gt, uniq, cpu, NULL, NULL, name); } void iflib_config_gtask_init(void *ctx, struct grouptask *gtask, gtask_fn_t *fn, const char *name) { GROUPTASK_INIT(gtask, 0, fn, ctx); taskqgroup_attach(qgroup_if_config_tqg, gtask, gtask, NULL, NULL, name); } void iflib_config_gtask_deinit(struct grouptask *gtask) { taskqgroup_detach(qgroup_if_config_tqg, gtask); } void iflib_link_state_change(if_ctx_t ctx, int link_state, uint64_t baudrate) { if_t ifp = ctx->ifc_ifp; iflib_txq_t txq = ctx->ifc_txqs; if_setbaudrate(ifp, baudrate); if (baudrate >= IF_Gbps(10)) { STATE_LOCK(ctx); ctx->ifc_flags |= IFC_PREFETCH; STATE_UNLOCK(ctx); } /* If link down, disable watchdog */ if ((ctx->ifc_link_state == LINK_STATE_UP) && (link_state == LINK_STATE_DOWN)) { for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxqsets; i++, txq++) txq->ift_qstatus = IFLIB_QUEUE_IDLE; } ctx->ifc_link_state = link_state; if_link_state_change(ifp, link_state); } static int iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq) { int credits; #ifdef INVARIANTS int credits_pre = txq->ift_cidx_processed; #endif bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, BUS_DMASYNC_POSTREAD); if ((credits = ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, true)) == 0) return (0); txq->ift_processed += credits; txq->ift_cidx_processed += credits; MPASS(credits_pre + credits == txq->ift_cidx_processed); if (txq->ift_cidx_processed >= txq->ift_size) txq->ift_cidx_processed -= txq->ift_size; return (credits); } static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget) { iflib_fl_t fl; u_int i; for (i = 0, fl = &rxq->ifr_fl[0]; i < rxq->ifr_nfl; i++, fl++) bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); return (ctx->isc_rxd_available(ctx->ifc_softc, rxq->ifr_id, cidx, budget)); } void iflib_add_int_delay_sysctl(if_ctx_t ctx, const char *name, const char *description, if_int_delay_info_t info, int offset, int value) { info->iidi_ctx = ctx; info->iidi_offset = offset; info->iidi_value = value; SYSCTL_ADD_PROC(device_get_sysctl_ctx(ctx->ifc_dev), SYSCTL_CHILDREN(device_get_sysctl_tree(ctx->ifc_dev)), OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, info, 0, iflib_sysctl_int_delay, "I", description); } struct sx * iflib_ctx_lock_get(if_ctx_t ctx) { return (&ctx->ifc_ctx_sx); } static int iflib_msix_init(if_ctx_t ctx) { device_t dev = ctx->ifc_dev; if_shared_ctx_t sctx = ctx->ifc_sctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; int admincnt, bar, err, iflib_num_rx_queues, iflib_num_tx_queues; int msgs, queuemsgs, queues, rx_queues, tx_queues, vectors; iflib_num_tx_queues = ctx->ifc_sysctl_ntxqs; iflib_num_rx_queues = ctx->ifc_sysctl_nrxqs; if (bootverbose) device_printf(dev, "msix_init qsets capped at %d\n", imax(scctx->isc_ntxqsets, scctx->isc_nrxqsets)); /* Override by tuneable */ if (scctx->isc_disable_msix) goto msi; /* First try MSI-X */ if ((msgs = pci_msix_count(dev)) == 0) { if (bootverbose) device_printf(dev, "MSI-X not supported or disabled\n"); goto msi; } bar = ctx->ifc_softc_ctx.isc_msix_bar; /* * bar == -1 => "trust me I know what I'm doing" * Some drivers are for hardware that is so shoddily * documented that no one knows which bars are which * so the developer has to map all bars. This hack * allows shoddy garbage to use MSI-X in this framework. */ if (bar != -1) { ctx->ifc_msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &bar, RF_ACTIVE); if (ctx->ifc_msix_mem == NULL) { device_printf(dev, "Unable to map MSI-X table\n"); goto msi; } } admincnt = sctx->isc_admin_intrcnt; #if IFLIB_DEBUG /* use only 1 qset in debug mode */ queuemsgs = min(msgs - admincnt, 1); #else queuemsgs = msgs - admincnt; #endif #ifdef RSS queues = imin(queuemsgs, rss_getnumbuckets()); #else queues = queuemsgs; #endif queues = imin(CPU_COUNT(&ctx->ifc_cpus), queues); if (bootverbose) device_printf(dev, "intr CPUs: %d queue msgs: %d admincnt: %d\n", CPU_COUNT(&ctx->ifc_cpus), queuemsgs, admincnt); #ifdef RSS /* If we're doing RSS, clamp at the number of RSS buckets */ if (queues > rss_getnumbuckets()) queues = rss_getnumbuckets(); #endif if (iflib_num_rx_queues > 0 && iflib_num_rx_queues < queuemsgs - admincnt) rx_queues = iflib_num_rx_queues; else rx_queues = queues; if (rx_queues > scctx->isc_nrxqsets) rx_queues = scctx->isc_nrxqsets; /* * We want this to be all logical CPUs by default */ if (iflib_num_tx_queues > 0 && iflib_num_tx_queues < queues) tx_queues = iflib_num_tx_queues; else tx_queues = mp_ncpus; if (tx_queues > scctx->isc_ntxqsets) tx_queues = scctx->isc_ntxqsets; if (ctx->ifc_sysctl_qs_eq_override == 0) { #ifdef INVARIANTS if (tx_queues != rx_queues) device_printf(dev, "queue equality override not set, capping rx_queues at %d and tx_queues at %d\n", min(rx_queues, tx_queues), min(rx_queues, tx_queues)); #endif tx_queues = min(rx_queues, tx_queues); rx_queues = min(rx_queues, tx_queues); } vectors = rx_queues + admincnt; if (msgs < vectors) { device_printf(dev, "insufficient number of MSI-X vectors " "(supported %d, need %d)\n", msgs, vectors); goto msi; } device_printf(dev, "Using %d RX queues %d TX queues\n", rx_queues, tx_queues); msgs = vectors; if ((err = pci_alloc_msix(dev, &vectors)) == 0) { if (vectors != msgs) { device_printf(dev, "Unable to allocate sufficient MSI-X vectors " "(got %d, need %d)\n", vectors, msgs); pci_release_msi(dev); if (bar != -1) { bus_release_resource(dev, SYS_RES_MEMORY, bar, ctx->ifc_msix_mem); ctx->ifc_msix_mem = NULL; } goto msi; } device_printf(dev, "Using MSI-X interrupts with %d vectors\n", vectors); scctx->isc_vectors = vectors; scctx->isc_nrxqsets = rx_queues; scctx->isc_ntxqsets = tx_queues; scctx->isc_intr = IFLIB_INTR_MSIX; return (vectors); } else { device_printf(dev, "failed to allocate %d MSI-X vectors, err: %d\n", vectors, err); if (bar != -1) { bus_release_resource(dev, SYS_RES_MEMORY, bar, ctx->ifc_msix_mem); ctx->ifc_msix_mem = NULL; } } msi: vectors = pci_msi_count(dev); scctx->isc_nrxqsets = 1; scctx->isc_ntxqsets = 1; scctx->isc_vectors = vectors; if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0) { device_printf(dev,"Using an MSI interrupt\n"); scctx->isc_intr = IFLIB_INTR_MSI; } else { scctx->isc_vectors = 1; device_printf(dev,"Using a Legacy interrupt\n"); scctx->isc_intr = IFLIB_INTR_LEGACY; } return (vectors); } static const char *ring_states[] = { "IDLE", "BUSY", "STALLED", "ABDICATED" }; static int mp_ring_state_handler(SYSCTL_HANDLER_ARGS) { int rc; uint16_t *state = ((uint16_t *)oidp->oid_arg1); struct sbuf *sb; const char *ring_state = "UNKNOWN"; /* XXX needed ? */ rc = sysctl_wire_old_buffer(req, 0); MPASS(rc == 0); if (rc != 0) return (rc); sb = sbuf_new_for_sysctl(NULL, NULL, 80, req); MPASS(sb != NULL); if (sb == NULL) return (ENOMEM); if (state[3] <= 3) ring_state = ring_states[state[3]]; sbuf_printf(sb, "pidx_head: %04hd pidx_tail: %04hd cidx: %04hd state: %s", state[0], state[1], state[2], ring_state); rc = sbuf_finish(sb); sbuf_delete(sb); return(rc); } enum iflib_ndesc_handler { IFLIB_NTXD_HANDLER, IFLIB_NRXD_HANDLER, }; static int mp_ndesc_handler(SYSCTL_HANDLER_ARGS) { if_ctx_t ctx = (void *)arg1; enum iflib_ndesc_handler type = arg2; char buf[256] = {0}; qidx_t *ndesc; char *p, *next; int nqs, rc, i; nqs = 8; switch(type) { case IFLIB_NTXD_HANDLER: ndesc = ctx->ifc_sysctl_ntxds; if (ctx->ifc_sctx) nqs = ctx->ifc_sctx->isc_ntxqs; break; case IFLIB_NRXD_HANDLER: ndesc = ctx->ifc_sysctl_nrxds; if (ctx->ifc_sctx) nqs = ctx->ifc_sctx->isc_nrxqs; break; default: printf("%s: unhandled type\n", __func__); return (EINVAL); } if (nqs == 0) nqs = 8; for (i=0; i<8; i++) { if (i >= nqs) break; if (i) strcat(buf, ","); sprintf(strchr(buf, 0), "%d", ndesc[i]); } rc = sysctl_handle_string(oidp, buf, sizeof(buf), req); if (rc || req->newptr == NULL) return rc; for (i = 0, next = buf, p = strsep(&next, " ,"); i < 8 && p; i++, p = strsep(&next, " ,")) { ndesc[i] = strtoul(p, NULL, 10); } return(rc); } #define NAME_BUFLEN 32 static void iflib_add_device_sysctl_pre(if_ctx_t ctx) { device_t dev = iflib_get_dev(ctx); struct sysctl_oid_list *child, *oid_list; struct sysctl_ctx_list *ctx_list; struct sysctl_oid *node; ctx_list = device_get_sysctl_ctx(dev); child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); ctx->ifc_sysctl_node = node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "iflib", CTLFLAG_RD, NULL, "IFLIB fields"); oid_list = SYSCTL_CHILDREN(node); SYSCTL_ADD_CONST_STRING(ctx_list, oid_list, OID_AUTO, "driver_version", CTLFLAG_RD, ctx->ifc_sctx->isc_driver_version, "driver version"); SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_ntxqs", CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxqs, 0, "# of txqs to use, 0 => use default #"); SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_nrxqs", CTLFLAG_RWTUN, &ctx->ifc_sysctl_nrxqs, 0, "# of rxqs to use, 0 => use default #"); SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_qs_enable", CTLFLAG_RWTUN, &ctx->ifc_sysctl_qs_eq_override, 0, "permit #txq != #rxq"); SYSCTL_ADD_INT(ctx_list, oid_list, OID_AUTO, "disable_msix", CTLFLAG_RWTUN, &ctx->ifc_softc_ctx.isc_disable_msix, 0, "disable MSI-X (default 0)"); SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "rx_budget", CTLFLAG_RWTUN, &ctx->ifc_sysctl_rx_budget, 0, "set the RX budget"); SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "tx_abdicate", CTLFLAG_RWTUN, &ctx->ifc_sysctl_tx_abdicate, 0, "cause TX to abdicate instead of running to completion"); ctx->ifc_sysctl_core_offset = CORE_OFFSET_UNSPECIFIED; SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "core_offset", CTLFLAG_RDTUN, &ctx->ifc_sysctl_core_offset, 0, "offset to start using cores at"); SYSCTL_ADD_U8(ctx_list, oid_list, OID_AUTO, "separate_txrx", CTLFLAG_RDTUN, &ctx->ifc_sysctl_separate_txrx, 0, "use separate cores for TX and RX"); /* XXX change for per-queue sizes */ SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_ntxds", CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NTXD_HANDLER, mp_ndesc_handler, "A", "list of # of TX descriptors to use, 0 = use default #"); SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_nrxds", CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NRXD_HANDLER, mp_ndesc_handler, "A", "list of # of RX descriptors to use, 0 = use default #"); } static void iflib_add_device_sysctl_post(if_ctx_t ctx) { if_shared_ctx_t sctx = ctx->ifc_sctx; if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; device_t dev = iflib_get_dev(ctx); struct sysctl_oid_list *child; struct sysctl_ctx_list *ctx_list; iflib_fl_t fl; iflib_txq_t txq; iflib_rxq_t rxq; int i, j; char namebuf[NAME_BUFLEN]; char *qfmt; struct sysctl_oid *queue_node, *fl_node, *node; struct sysctl_oid_list *queue_list, *fl_list; ctx_list = device_get_sysctl_ctx(dev); node = ctx->ifc_sysctl_node; child = SYSCTL_CHILDREN(node); if (scctx->isc_ntxqsets > 100) qfmt = "txq%03d"; else if (scctx->isc_ntxqsets > 10) qfmt = "txq%02d"; else qfmt = "txq%d"; for (i = 0, txq = ctx->ifc_txqs; i < scctx->isc_ntxqsets; i++, txq++) { snprintf(namebuf, NAME_BUFLEN, qfmt, i); queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "Queue Name"); queue_list = SYSCTL_CHILDREN(queue_node); #if MEMORY_LOGGING SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_dequeued", CTLFLAG_RD, &txq->ift_dequeued, "total mbufs freed"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_enqueued", CTLFLAG_RD, &txq->ift_enqueued, "total mbufs enqueued"); #endif SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag", CTLFLAG_RD, &txq->ift_mbuf_defrag, "# of times m_defrag was called"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "m_pullups", CTLFLAG_RD, &txq->ift_pullups, "# of times m_pullup was called"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag_failed", CTLFLAG_RD, &txq->ift_mbuf_defrag_failed, "# of times m_defrag failed"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_desc_avail", CTLFLAG_RD, &txq->ift_no_desc_avail, "# of times no descriptors were available"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "tx_map_failed", CTLFLAG_RD, &txq->ift_map_failed, "# of times DMA map failed"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txd_encap_efbig", CTLFLAG_RD, &txq->ift_txd_encap_efbig, "# of times txd_encap returned EFBIG"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_tx_dma_setup", CTLFLAG_RD, &txq->ift_no_tx_dma_setup, "# of times map failed for other than EFBIG"); SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_pidx", CTLFLAG_RD, &txq->ift_pidx, 1, "Producer Index"); SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx", CTLFLAG_RD, &txq->ift_cidx, 1, "Consumer Index"); SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx_processed", CTLFLAG_RD, &txq->ift_cidx_processed, 1, "Consumer Index seen by credit update"); SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_in_use", CTLFLAG_RD, &txq->ift_in_use, 1, "descriptors in use"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_processed", CTLFLAG_RD, &txq->ift_processed, "descriptors procesed for clean"); SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_cleaned", CTLFLAG_RD, &txq->ift_cleaned, "total cleaned"); SYSCTL_ADD_PROC(ctx_list, queue_list, OID_AUTO, "ring_state", CTLTYPE_STRING | CTLFLAG_RD, __DEVOLATILE(uint64_t *, &txq->ift_br->state), 0, mp_ring_state_handler, "A", "soft ring state"); SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_enqueues", CTLFLAG_RD, &txq->ift_br->enqueues, "# of enqueues to the mp_ring for this queue"); SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_drops", CTLFLAG_RD, &txq->ift_br->drops, "# of drops in the mp_ring for this queue"); SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_starts", CTLFLAG_RD, &txq->ift_br->starts, "# of normal consumer starts in the mp_ring for this queue"); SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_stalls", CTLFLAG_RD, &txq->ift_br->stalls, "# of consumer stalls in the mp_ring for this queue"); SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_restarts", CTLFLAG_RD, &txq->ift_br->restarts, "# of consumer restarts in the mp_ring for this queue"); SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_abdications", CTLFLAG_RD, &txq->ift_br->abdications, "# of consumer abdications in the mp_ring for this queue"); } if (scctx->isc_nrxqsets > 100) qfmt = "rxq%03d"; else if (scctx->isc_nrxqsets > 10) qfmt = "rxq%02d"; else qfmt = "rxq%d"; for (i = 0, rxq = ctx->ifc_rxqs; i < scctx->isc_nrxqsets; i++, rxq++) { snprintf(namebuf, NAME_BUFLEN, qfmt, i); queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "Queue Name"); queue_list = SYSCTL_CHILDREN(queue_node); if (sctx->isc_flags & IFLIB_HAS_RXCQ) { SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_cidx", CTLFLAG_RD, &rxq->ifr_cq_cidx, 1, "Consumer Index"); } for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) { snprintf(namebuf, NAME_BUFLEN, "rxq_fl%d", j); fl_node = SYSCTL_ADD_NODE(ctx_list, queue_list, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "freelist Name"); fl_list = SYSCTL_CHILDREN(fl_node); SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "pidx", CTLFLAG_RD, &fl->ifl_pidx, 1, "Producer Index"); SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "cidx", CTLFLAG_RD, &fl->ifl_cidx, 1, "Consumer Index"); SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "credits", CTLFLAG_RD, &fl->ifl_credits, 1, "credits available"); #if MEMORY_LOGGING SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_enqueued", CTLFLAG_RD, &fl->ifl_m_enqueued, "mbufs allocated"); SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_dequeued", CTLFLAG_RD, &fl->ifl_m_dequeued, "mbufs freed"); SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_enqueued", CTLFLAG_RD, &fl->ifl_cl_enqueued, "clusters allocated"); SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_dequeued", CTLFLAG_RD, &fl->ifl_cl_dequeued, "clusters freed"); #endif } } } void iflib_request_reset(if_ctx_t ctx) { STATE_LOCK(ctx); ctx->ifc_flags |= IFC_DO_RESET; STATE_UNLOCK(ctx); } #ifndef __NO_STRICT_ALIGNMENT static struct mbuf * iflib_fixup_rx(struct mbuf *m) { struct mbuf *n; if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) { bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len); m->m_data += ETHER_HDR_LEN; n = m; } else { MGETHDR(n, M_NOWAIT, MT_DATA); if (n == NULL) { m_freem(m); return (NULL); } bcopy(m->m_data, n->m_data, ETHER_HDR_LEN); m->m_data += ETHER_HDR_LEN; m->m_len -= ETHER_HDR_LEN; n->m_len = ETHER_HDR_LEN; M_MOVE_PKTHDR(n, m); n->m_next = m; } return (n); } #endif #ifdef DEBUGNET static void iflib_debugnet_init(if_t ifp, int *nrxr, int *ncl, int *clsize) { if_ctx_t ctx; ctx = if_getsoftc(ifp); CTX_LOCK(ctx); *nrxr = NRXQSETS(ctx); *ncl = ctx->ifc_rxqs[0].ifr_fl->ifl_size; *clsize = ctx->ifc_rxqs[0].ifr_fl->ifl_buf_size; CTX_UNLOCK(ctx); } static void iflib_debugnet_event(if_t ifp, enum debugnet_ev event) { if_ctx_t ctx; if_softc_ctx_t scctx; iflib_fl_t fl; iflib_rxq_t rxq; int i, j; ctx = if_getsoftc(ifp); scctx = &ctx->ifc_softc_ctx; switch (event) { case DEBUGNET_START: for (i = 0; i < scctx->isc_nrxqsets; i++) { rxq = &ctx->ifc_rxqs[i]; for (j = 0; j < rxq->ifr_nfl; j++) { fl = rxq->ifr_fl; fl->ifl_zone = m_getzone(fl->ifl_buf_size); } } iflib_no_tx_batch = 1; break; default: break; } } static int iflib_debugnet_transmit(if_t ifp, struct mbuf *m) { if_ctx_t ctx; iflib_txq_t txq; int error; ctx = if_getsoftc(ifp); if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return (EBUSY); txq = &ctx->ifc_txqs[0]; error = iflib_encap(txq, &m); if (error == 0) (void)iflib_txd_db_check(ctx, txq, true, txq->ift_in_use); return (error); } static int iflib_debugnet_poll(if_t ifp, int count) { struct epoch_tracker et; if_ctx_t ctx; if_softc_ctx_t scctx; iflib_txq_t txq; int i; ctx = if_getsoftc(ifp); scctx = &ctx->ifc_softc_ctx; if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return (EBUSY); txq = &ctx->ifc_txqs[0]; (void)iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx)); NET_EPOCH_ENTER(et); for (i = 0; i < scctx->isc_nrxqsets; i++) (void)iflib_rxeof(&ctx->ifc_rxqs[i], 16 /* XXX */); NET_EPOCH_EXIT(et); return (0); } #endif /* DEBUGNET */