diff --git a/sys/dev/al_eth/al_eth.c b/sys/dev/al_eth/al_eth.c index ec10313442b3..e3f9a14f4f2d 100644 --- a/sys/dev/al_eth/al_eth.c +++ b/sys/dev/al_eth/al_eth.c @@ -1,3568 +1,3568 @@ /*- * Copyright (c) 2015,2016 Annapurna Labs Ltd. and affiliates * All rights reserved. * * Developed by Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "opt_inet.h" #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #include #include #endif #ifdef INET6 #include #endif #include #include #include #include #include #include #include #include #include #include #include #include "al_eth.h" #include "al_init_eth_lm.h" #include "arm/annapurna/alpine/alpine_serdes.h" #include "miibus_if.h" #define device_printf_dbg(fmt, ...) do { \ if (AL_DBG_LEVEL >= AL_DBG_LEVEL_DBG) { AL_DBG_LOCK(); \ device_printf(fmt, __VA_ARGS__); AL_DBG_UNLOCK();} \ } while (0) MALLOC_DEFINE(M_IFAL, "if_al_malloc", "All allocated data for AL ETH driver"); /* move out to some pci header file */ #define PCI_VENDOR_ID_ANNAPURNA_LABS 0x1c36 #define PCI_DEVICE_ID_AL_ETH 0x0001 #define PCI_DEVICE_ID_AL_ETH_ADVANCED 0x0002 #define PCI_DEVICE_ID_AL_ETH_NIC 0x0003 #define PCI_DEVICE_ID_AL_ETH_FPGA_NIC 0x0030 #define PCI_DEVICE_ID_AL_CRYPTO 0x0011 #define PCI_DEVICE_ID_AL_CRYPTO_VF 0x8011 #define PCI_DEVICE_ID_AL_RAID_DMA 0x0021 #define PCI_DEVICE_ID_AL_RAID_DMA_VF 0x8021 #define PCI_DEVICE_ID_AL_USB 0x0041 #define MAC_ADDR_STR "%02x:%02x:%02x:%02x:%02x:%02x" #define MAC_ADDR(addr) addr[0], addr[1], addr[2], addr[3], addr[4], addr[5] #define AL_ETH_MAC_TABLE_UNICAST_IDX_BASE 0 #define AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT 4 #define AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX (AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + \ AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT) #define AL_ETH_MAC_TABLE_DROP_IDX (AL_ETH_FWD_MAC_NUM - 1) #define AL_ETH_MAC_TABLE_BROADCAST_IDX (AL_ETH_MAC_TABLE_DROP_IDX - 1) #define AL_ETH_THASH_UDMA_SHIFT 0 #define AL_ETH_THASH_UDMA_MASK (0xF << AL_ETH_THASH_UDMA_SHIFT) #define AL_ETH_THASH_Q_SHIFT 4 #define AL_ETH_THASH_Q_MASK (0x3 << AL_ETH_THASH_Q_SHIFT) /* the following defines should be moved to hal */ #define AL_ETH_FSM_ENTRY_IPV4_TCP 0 #define AL_ETH_FSM_ENTRY_IPV4_UDP 1 #define AL_ETH_FSM_ENTRY_IPV6_TCP 2 #define AL_ETH_FSM_ENTRY_IPV6_UDP 3 #define AL_ETH_FSM_ENTRY_IPV6_NO_UDP_TCP 4 #define AL_ETH_FSM_ENTRY_IPV4_NO_UDP_TCP 5 /* FSM DATA format */ #define AL_ETH_FSM_DATA_OUTER_2_TUPLE 0 #define AL_ETH_FSM_DATA_OUTER_4_TUPLE 1 #define AL_ETH_FSM_DATA_INNER_2_TUPLE 2 #define AL_ETH_FSM_DATA_INNER_4_TUPLE 3 #define AL_ETH_FSM_DATA_HASH_SEL (1 << 2) #define AL_ETH_FSM_DATA_DEFAULT_Q 0 #define AL_ETH_FSM_DATA_DEFAULT_UDMA 0 #define AL_BR_SIZE 512 #define AL_TSO_SIZE 65500 #define AL_DEFAULT_MTU 1500 #define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP) #define AL_IP_ALIGNMENT_OFFSET 2 #define SFP_I2C_ADDR 0x50 #define AL_MASK_GROUP_A_INT 0x7 #define AL_MASK_GROUP_B_INT 0xF #define AL_MASK_GROUP_C_INT 0xF #define AL_MASK_GROUP_D_INT 0xFFFFFFFF #define AL_REG_OFFSET_FORWARD_INTR (0x1800000 + 0x1210) #define AL_EN_FORWARD_INTR 0x1FFFF #define AL_DIS_FORWARD_INTR 0 #define AL_M2S_MASK_INIT 0x480 #define AL_S2M_MASK_INIT 0x1E0 #define AL_M2S_S2M_MASK_NOT_INT (0x3f << 25) #define AL_10BASE_T_SPEED 10 #define AL_100BASE_TX_SPEED 100 #define AL_1000BASE_T_SPEED 1000 #define AL_RX_LOCK_INIT(_sc) mtx_init(&((_sc)->if_rx_lock), "ALRXL", "ALRXL", MTX_DEF) #define AL_RX_LOCK(_sc) mtx_lock(&((_sc)->if_rx_lock)) #define AL_RX_UNLOCK(_sc) mtx_unlock(&((_sc)->if_rx_lock)) /* helper functions */ static int al_is_device_supported(device_t); static void al_eth_init_rings(struct al_eth_adapter *); static void al_eth_flow_ctrl_disable(struct al_eth_adapter *); int al_eth_fpga_read_pci_config(void *, int, uint32_t *); int al_eth_fpga_write_pci_config(void *, int, uint32_t); int al_eth_read_pci_config(void *, int, uint32_t *); int al_eth_write_pci_config(void *, int, uint32_t); void al_eth_irq_config(uint32_t *, uint32_t); void al_eth_forward_int_config(uint32_t *, uint32_t); static void al_eth_start_xmit(void *, int); static void al_eth_rx_recv_work(void *, int); static int al_eth_up(struct al_eth_adapter *); static void al_eth_down(struct al_eth_adapter *); static void al_eth_interrupts_unmask(struct al_eth_adapter *); static void al_eth_interrupts_mask(struct al_eth_adapter *); static int al_eth_check_mtu(struct al_eth_adapter *, int); static uint64_t al_get_counter(if_t, ift_counter); static void al_eth_req_rx_buff_size(struct al_eth_adapter *, int); static int al_eth_board_params_init(struct al_eth_adapter *); static int al_media_update(if_t); static void al_media_status(if_t, struct ifmediareq *); static int al_eth_function_reset(struct al_eth_adapter *); static int al_eth_hw_init_adapter(struct al_eth_adapter *); static void al_eth_serdes_init(struct al_eth_adapter *); static void al_eth_lm_config(struct al_eth_adapter *); static int al_eth_hw_init(struct al_eth_adapter *); static void al_tick_stats(void *); /* ifnet entry points */ static void al_init(void *); static int al_mq_start(if_t, struct mbuf *); static void al_qflush(if_t); static int al_ioctl(if_t ifp, u_long, caddr_t); /* bus entry points */ static int al_probe(device_t); static int al_attach(device_t); static int al_detach(device_t); static int al_shutdown(device_t); /* mii bus support routines */ static int al_miibus_readreg(device_t, int, int); static int al_miibus_writereg(device_t, int, int, int); static void al_miibus_statchg(device_t); static void al_miibus_linkchg(device_t); struct al_eth_adapter* g_adapters[16]; uint32_t g_adapters_count; /* flag for napi-like mbuf processing, controlled from sysctl */ static int napi = 0; static device_method_t al_methods[] = { /* Device interface */ DEVMETHOD(device_probe, al_probe), DEVMETHOD(device_attach, al_attach), DEVMETHOD(device_detach, al_detach), DEVMETHOD(device_shutdown, al_shutdown), DEVMETHOD(miibus_readreg, al_miibus_readreg), DEVMETHOD(miibus_writereg, al_miibus_writereg), DEVMETHOD(miibus_statchg, al_miibus_statchg), DEVMETHOD(miibus_linkchg, al_miibus_linkchg), { 0, 0 } }; static driver_t al_driver = { "al", al_methods, sizeof(struct al_eth_adapter), }; DRIVER_MODULE(al, pci, al_driver, 0, 0); DRIVER_MODULE(miibus, al, miibus_driver, 0, 0); static int al_probe(device_t dev) { if ((al_is_device_supported(dev)) != 0) { device_set_desc(dev, "al"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int al_attach(device_t dev) { struct al_eth_adapter *adapter; struct sysctl_oid_list *child; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; if_t ifp; uint32_t dev_id; uint32_t rev_id; int bar_udma; int bar_mac; int bar_ec; int err; err = 0; ifp = NULL; dev_id = rev_id = 0; ctx = device_get_sysctl_ctx(dev); tree = SYSCTL_PARENT(device_get_sysctl_tree(dev)); child = SYSCTL_CHILDREN(tree); if (g_adapters_count == 0) { SYSCTL_ADD_INT(ctx, child, OID_AUTO, "napi", CTLFLAG_RW, &napi, 0, "Use pseudo-napi mechanism"); } adapter = device_get_softc(dev); adapter->dev = dev; adapter->board_type = ALPINE_INTEGRATED; snprintf(adapter->name, AL_ETH_NAME_MAX_LEN, "%s", device_get_nameunit(dev)); AL_RX_LOCK_INIT(adapter); g_adapters[g_adapters_count] = adapter; bar_udma = PCIR_BAR(AL_ETH_UDMA_BAR); adapter->udma_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &bar_udma, RF_ACTIVE); if (adapter->udma_res == NULL) { device_printf(adapter->dev, "could not allocate memory resources for DMA.\n"); err = ENOMEM; goto err_res_dma; } adapter->udma_base = al_bus_dma_to_va(rman_get_bustag(adapter->udma_res), rman_get_bushandle(adapter->udma_res)); bar_mac = PCIR_BAR(AL_ETH_MAC_BAR); adapter->mac_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &bar_mac, RF_ACTIVE); if (adapter->mac_res == NULL) { device_printf(adapter->dev, "could not allocate memory resources for MAC.\n"); err = ENOMEM; goto err_res_mac; } adapter->mac_base = al_bus_dma_to_va(rman_get_bustag(adapter->mac_res), rman_get_bushandle(adapter->mac_res)); bar_ec = PCIR_BAR(AL_ETH_EC_BAR); adapter->ec_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &bar_ec, RF_ACTIVE); if (adapter->ec_res == NULL) { device_printf(adapter->dev, "could not allocate memory resources for EC.\n"); err = ENOMEM; goto err_res_ec; } adapter->ec_base = al_bus_dma_to_va(rman_get_bustag(adapter->ec_res), rman_get_bushandle(adapter->ec_res)); adapter->netdev = ifp = if_alloc(IFT_ETHER); if_setsoftc(ifp, adapter); if_initname(ifp, device_get_name(dev), device_get_unit(dev)); if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); if_setflags(ifp, if_getdrvflags(ifp)); if_setflagbits(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_ALLMULTI, 0); if_settransmitfn(ifp, al_mq_start); if_setqflushfn(ifp, al_qflush); if_setioctlfn(ifp, al_ioctl); if_setinitfn(ifp, al_init); if_setgetcounterfn(ifp, al_get_counter); if_setmtu(ifp, AL_DEFAULT_MTU); adapter->if_flags = if_getflags(ifp); if_setcapabilities(ifp, if_getcapenable(ifp) ); if_setcapabilitiesbit(ifp, IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_TSO | IFCAP_LRO | IFCAP_JUMBO_MTU, 0); if_setcapenable(ifp, if_getcapabilities(ifp)); adapter->id_number = g_adapters_count; if (adapter->board_type == ALPINE_INTEGRATED) { dev_id = pci_get_device(adapter->dev); rev_id = pci_get_revid(adapter->dev); } else { al_eth_fpga_read_pci_config(adapter->internal_pcie_base, PCIR_DEVICE, &dev_id); al_eth_fpga_read_pci_config(adapter->internal_pcie_base, PCIR_REVID, &rev_id); } adapter->dev_id = dev_id; adapter->rev_id = rev_id; /* set default ring sizes */ adapter->tx_ring_count = AL_ETH_DEFAULT_TX_SW_DESCS; adapter->tx_descs_count = AL_ETH_DEFAULT_TX_HW_DESCS; adapter->rx_ring_count = AL_ETH_DEFAULT_RX_DESCS; adapter->rx_descs_count = AL_ETH_DEFAULT_RX_DESCS; adapter->num_tx_queues = AL_ETH_NUM_QUEUES; adapter->num_rx_queues = AL_ETH_NUM_QUEUES; adapter->small_copy_len = AL_ETH_DEFAULT_SMALL_PACKET_LEN; adapter->link_poll_interval = AL_ETH_DEFAULT_LINK_POLL_INTERVAL; adapter->max_rx_buff_alloc_size = AL_ETH_DEFAULT_MAX_RX_BUFF_ALLOC_SIZE; al_eth_req_rx_buff_size(adapter, if_getmtu(adapter->netdev)); adapter->link_config.force_1000_base_x = AL_ETH_DEFAULT_FORCE_1000_BASEX; err = al_eth_board_params_init(adapter); if (err != 0) goto err; if (adapter->mac_mode == AL_ETH_MAC_MODE_10GbE_Serial) { ifmedia_init(&adapter->media, IFM_IMASK, al_media_update, al_media_status); ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_LX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); } al_eth_function_reset(adapter); err = al_eth_hw_init_adapter(adapter); if (err != 0) goto err; al_eth_init_rings(adapter); g_adapters_count++; al_eth_lm_config(adapter); mtx_init(&adapter->stats_mtx, "AlStatsMtx", NULL, MTX_DEF); mtx_init(&adapter->wd_mtx, "AlWdMtx", NULL, MTX_DEF); callout_init_mtx(&adapter->stats_callout, &adapter->stats_mtx, 0); callout_init_mtx(&adapter->wd_callout, &adapter->wd_mtx, 0); ether_ifattach(ifp, adapter->mac_addr); if_setmtu(ifp, AL_DEFAULT_MTU); if (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII) { al_eth_hw_init(adapter); /* Attach PHY(s) */ err = mii_attach(adapter->dev, &adapter->miibus, adapter->netdev, al_media_update, al_media_status, BMSR_DEFCAPMASK, 0, MII_OFFSET_ANY, 0); if (err != 0) { device_printf(adapter->dev, "attaching PHYs failed\n"); return (err); } adapter->mii = device_get_softc(adapter->miibus); } return (err); err: bus_release_resource(dev, SYS_RES_MEMORY, bar_ec, adapter->ec_res); err_res_ec: bus_release_resource(dev, SYS_RES_MEMORY, bar_mac, adapter->mac_res); err_res_mac: bus_release_resource(dev, SYS_RES_MEMORY, bar_udma, adapter->udma_res); err_res_dma: return (err); } static int al_detach(device_t dev) { struct al_eth_adapter *adapter; adapter = device_get_softc(dev); ether_ifdetach(adapter->netdev); mtx_destroy(&adapter->stats_mtx); mtx_destroy(&adapter->wd_mtx); al_eth_down(adapter); bus_release_resource(dev, SYS_RES_IRQ, 0, adapter->irq_res); bus_release_resource(dev, SYS_RES_MEMORY, 0, adapter->ec_res); bus_release_resource(dev, SYS_RES_MEMORY, 0, adapter->mac_res); bus_release_resource(dev, SYS_RES_MEMORY, 0, adapter->udma_res); return (0); } int al_eth_fpga_read_pci_config(void *handle, int where, uint32_t *val) { /* handle is the base address of the adapter */ *val = al_reg_read32((void*)((u_long)handle + where)); return (0); } int al_eth_fpga_write_pci_config(void *handle, int where, uint32_t val) { /* handle is the base address of the adapter */ al_reg_write32((void*)((u_long)handle + where), val); return (0); } int al_eth_read_pci_config(void *handle, int where, uint32_t *val) { /* handle is a pci_dev */ *val = pci_read_config((device_t)handle, where, sizeof(*val)); return (0); } int al_eth_write_pci_config(void *handle, int where, uint32_t val) { /* handle is a pci_dev */ pci_write_config((device_t)handle, where, val, sizeof(val)); return (0); } void al_eth_irq_config(uint32_t *offset, uint32_t value) { al_reg_write32_relaxed(offset, value); } void al_eth_forward_int_config(uint32_t *offset, uint32_t value) { al_reg_write32(offset, value); } static void al_eth_serdes_init(struct al_eth_adapter *adapter) { void __iomem *serdes_base; adapter->serdes_init = false; serdes_base = alpine_serdes_resource_get(adapter->serdes_grp); if (serdes_base == NULL) { device_printf(adapter->dev, "serdes_base get failed!\n"); return; } serdes_base = al_bus_dma_to_va(serdes_tag, serdes_base); al_serdes_handle_grp_init(serdes_base, adapter->serdes_grp, &adapter->serdes_obj); adapter->serdes_init = true; } static void al_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *paddr; paddr = arg; *paddr = segs->ds_addr; } static int al_dma_alloc_coherent(device_t dev, bus_dma_tag_t *tag, bus_dmamap_t *map, bus_addr_t *baddr, void **vaddr, uint32_t size) { int ret; uint32_t maxsize = ((size - 1)/PAGE_SIZE + 1) * PAGE_SIZE; ret = bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, maxsize, 1, maxsize, BUS_DMA_COHERENT, NULL, NULL, tag); if (ret != 0) { device_printf(dev, "failed to create bus tag, ret = %d\n", ret); return (ret); } ret = bus_dmamem_alloc(*tag, vaddr, BUS_DMA_COHERENT | BUS_DMA_ZERO, map); if (ret != 0) { device_printf(dev, "failed to allocate dmamem, ret = %d\n", ret); return (ret); } ret = bus_dmamap_load(*tag, *map, *vaddr, size, al_dma_map_addr, baddr, 0); if (ret != 0) { device_printf(dev, "failed to allocate bus_dmamap_load, ret = %d\n", ret); return (ret); } return (0); } static void al_dma_free_coherent(bus_dma_tag_t tag, bus_dmamap_t map, void *vaddr) { bus_dmamap_unload(tag, map); bus_dmamem_free(tag, vaddr, map); bus_dma_tag_destroy(tag); } static void al_eth_mac_table_unicast_add(struct al_eth_adapter *adapter, uint8_t idx, uint8_t udma_mask) { struct al_eth_fwd_mac_table_entry entry = { { 0 } }; memcpy(entry.addr, adapter->mac_addr, sizeof(adapter->mac_addr)); memset(entry.mask, 0xff, sizeof(entry.mask)); entry.rx_valid = true; entry.tx_valid = false; entry.udma_mask = udma_mask; entry.filter = false; device_printf_dbg(adapter->dev, "%s: [%d]: addr "MAC_ADDR_STR" mask "MAC_ADDR_STR"\n", __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask)); al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry); } static void al_eth_mac_table_all_multicast_add(struct al_eth_adapter *adapter, uint8_t idx, uint8_t udma_mask) { struct al_eth_fwd_mac_table_entry entry = { { 0 } }; memset(entry.addr, 0x00, sizeof(entry.addr)); memset(entry.mask, 0x00, sizeof(entry.mask)); entry.mask[0] |= 1; entry.addr[0] |= 1; entry.rx_valid = true; entry.tx_valid = false; entry.udma_mask = udma_mask; entry.filter = false; device_printf_dbg(adapter->dev, "%s: [%d]: addr "MAC_ADDR_STR" mask "MAC_ADDR_STR"\n", __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask)); al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry); } static void al_eth_mac_table_broadcast_add(struct al_eth_adapter *adapter, uint8_t idx, uint8_t udma_mask) { struct al_eth_fwd_mac_table_entry entry = { { 0 } }; memset(entry.addr, 0xff, sizeof(entry.addr)); memset(entry.mask, 0xff, sizeof(entry.mask)); entry.rx_valid = true; entry.tx_valid = false; entry.udma_mask = udma_mask; entry.filter = false; device_printf_dbg(adapter->dev, "%s: [%d]: addr "MAC_ADDR_STR" mask "MAC_ADDR_STR"\n", __func__, idx, MAC_ADDR(entry.addr), MAC_ADDR(entry.mask)); al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry); } static void al_eth_mac_table_promiscuous_set(struct al_eth_adapter *adapter, - boolean_t promiscuous) + bool promiscuous) { struct al_eth_fwd_mac_table_entry entry = { { 0 } }; memset(entry.addr, 0x00, sizeof(entry.addr)); memset(entry.mask, 0x00, sizeof(entry.mask)); entry.rx_valid = true; entry.tx_valid = false; entry.udma_mask = (promiscuous) ? 1 : 0; entry.filter = (promiscuous) ? false : true; device_printf_dbg(adapter->dev, "%s: %s promiscuous mode\n", __func__, (promiscuous) ? "enter" : "exit"); al_eth_fwd_mac_table_set(&adapter->hal_adapter, AL_ETH_MAC_TABLE_DROP_IDX, &entry); } static void al_eth_set_thash_table_entry(struct al_eth_adapter *adapter, uint8_t idx, uint8_t udma, uint32_t queue) { if (udma != 0) panic("only UDMA0 is supporter"); if (queue >= AL_ETH_NUM_QUEUES) panic("invalid queue number"); al_eth_thash_table_set(&adapter->hal_adapter, idx, udma, queue); } /* init FSM, no tunneling supported yet, if packet is tcp/udp over ipv4/ipv6, use 4 tuple hash */ static void al_eth_fsm_table_init(struct al_eth_adapter *adapter) { uint32_t val; int i; for (i = 0; i < AL_ETH_RX_FSM_TABLE_SIZE; i++) { uint8_t outer_type = AL_ETH_FSM_ENTRY_OUTER(i); switch (outer_type) { case AL_ETH_FSM_ENTRY_IPV4_TCP: case AL_ETH_FSM_ENTRY_IPV4_UDP: case AL_ETH_FSM_ENTRY_IPV6_TCP: case AL_ETH_FSM_ENTRY_IPV6_UDP: val = AL_ETH_FSM_DATA_OUTER_4_TUPLE | AL_ETH_FSM_DATA_HASH_SEL; break; case AL_ETH_FSM_ENTRY_IPV6_NO_UDP_TCP: case AL_ETH_FSM_ENTRY_IPV4_NO_UDP_TCP: val = AL_ETH_FSM_DATA_OUTER_2_TUPLE | AL_ETH_FSM_DATA_HASH_SEL; break; default: val = AL_ETH_FSM_DATA_DEFAULT_Q | AL_ETH_FSM_DATA_DEFAULT_UDMA; } al_eth_fsm_table_set(&adapter->hal_adapter, i, val); } } static void al_eth_mac_table_entry_clear(struct al_eth_adapter *adapter, uint8_t idx) { struct al_eth_fwd_mac_table_entry entry = { { 0 } }; device_printf_dbg(adapter->dev, "%s: clear entry %d\n", __func__, idx); al_eth_fwd_mac_table_set(&adapter->hal_adapter, idx, &entry); } static int al_eth_hw_init_adapter(struct al_eth_adapter *adapter) { struct al_eth_adapter_params *params = &adapter->eth_hal_params; int rc; /* params->dev_id = adapter->dev_id; */ params->rev_id = adapter->rev_id; params->udma_id = 0; params->enable_rx_parser = 1; /* enable rx epe parser*/ params->udma_regs_base = adapter->udma_base; /* UDMA register base address */ params->ec_regs_base = adapter->ec_base; /* Ethernet controller registers base address */ params->mac_regs_base = adapter->mac_base; /* Ethernet MAC registers base address */ params->name = adapter->name; params->serdes_lane = adapter->serdes_lane; rc = al_eth_adapter_init(&adapter->hal_adapter, params); if (rc != 0) device_printf(adapter->dev, "%s failed at hal init!\n", __func__); if ((adapter->board_type == ALPINE_NIC) || (adapter->board_type == ALPINE_FPGA_NIC)) { /* in pcie NIC mode, force eth UDMA to access PCIE0 using the vmid */ struct al_udma_gen_tgtid_conf conf; int i; for (i = 0; i < DMA_MAX_Q; i++) { conf.tx_q_conf[i].queue_en = AL_TRUE; conf.tx_q_conf[i].desc_en = AL_FALSE; conf.tx_q_conf[i].tgtid = 0x100; /* for access from PCIE0 */ conf.rx_q_conf[i].queue_en = AL_TRUE; conf.rx_q_conf[i].desc_en = AL_FALSE; conf.rx_q_conf[i].tgtid = 0x100; /* for access from PCIE0 */ } al_udma_gen_tgtid_conf_set(adapter->udma_base, &conf); } return (rc); } static void al_eth_lm_config(struct al_eth_adapter *adapter) { struct al_eth_lm_init_params params = {0}; params.adapter = &adapter->hal_adapter; params.serdes_obj = &adapter->serdes_obj; params.lane = adapter->serdes_lane; params.sfp_detection = adapter->sfp_detection_needed; if (adapter->sfp_detection_needed == true) { params.sfp_bus_id = adapter->i2c_adapter_id; params.sfp_i2c_addr = SFP_I2C_ADDR; } if (adapter->sfp_detection_needed == false) { switch (adapter->mac_mode) { case AL_ETH_MAC_MODE_10GbE_Serial: if ((adapter->lt_en != 0) && (adapter->an_en != 0)) params.default_mode = AL_ETH_LM_MODE_10G_DA; else params.default_mode = AL_ETH_LM_MODE_10G_OPTIC; break; case AL_ETH_MAC_MODE_SGMII: params.default_mode = AL_ETH_LM_MODE_1G; break; default: params.default_mode = AL_ETH_LM_MODE_10G_DA; } } else params.default_mode = AL_ETH_LM_MODE_10G_DA; params.link_training = adapter->lt_en; params.rx_equal = true; params.static_values = !adapter->dont_override_serdes; params.i2c_context = adapter; params.kr_fec_enable = false; params.retimer_exist = adapter->retimer.exist; params.retimer_bus_id = adapter->retimer.bus_id; params.retimer_i2c_addr = adapter->retimer.i2c_addr; params.retimer_channel = adapter->retimer.channel; al_eth_lm_init(&adapter->lm_context, ¶ms); } static int al_eth_board_params_init(struct al_eth_adapter *adapter) { if (adapter->board_type == ALPINE_NIC) { adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial; adapter->sfp_detection_needed = false; adapter->phy_exist = false; adapter->an_en = false; adapter->lt_en = false; adapter->ref_clk_freq = AL_ETH_REF_FREQ_375_MHZ; adapter->mdio_freq = AL_ETH_DEFAULT_MDIO_FREQ_KHZ; } else if (adapter->board_type == ALPINE_FPGA_NIC) { adapter->mac_mode = AL_ETH_MAC_MODE_SGMII; adapter->sfp_detection_needed = false; adapter->phy_exist = false; adapter->an_en = false; adapter->lt_en = false; adapter->ref_clk_freq = AL_ETH_REF_FREQ_375_MHZ; adapter->mdio_freq = AL_ETH_DEFAULT_MDIO_FREQ_KHZ; } else { struct al_eth_board_params params; int rc; adapter->auto_speed = false; rc = al_eth_board_params_get(adapter->mac_base, ¶ms); if (rc != 0) { device_printf(adapter->dev, "board info not available\n"); return (-1); } - adapter->phy_exist = params.phy_exist == TRUE; + adapter->phy_exist = params.phy_exist == true; adapter->phy_addr = params.phy_mdio_addr; adapter->an_en = params.autoneg_enable; adapter->lt_en = params.kr_lt_enable; adapter->serdes_grp = params.serdes_grp; adapter->serdes_lane = params.serdes_lane; adapter->sfp_detection_needed = params.sfp_plus_module_exist; adapter->i2c_adapter_id = params.i2c_adapter_id; adapter->ref_clk_freq = params.ref_clk_freq; adapter->dont_override_serdes = params.dont_override_serdes; adapter->link_config.active_duplex = !params.half_duplex; adapter->link_config.autoneg = !params.an_disable; adapter->link_config.force_1000_base_x = params.force_1000_base_x; adapter->retimer.exist = params.retimer_exist; adapter->retimer.bus_id = params.retimer_bus_id; adapter->retimer.i2c_addr = params.retimer_i2c_addr; adapter->retimer.channel = params.retimer_channel; switch (params.speed) { default: device_printf(adapter->dev, "%s: invalid speed (%d)\n", __func__, params.speed); case AL_ETH_BOARD_1G_SPEED_1000M: adapter->link_config.active_speed = 1000; break; case AL_ETH_BOARD_1G_SPEED_100M: adapter->link_config.active_speed = 100; break; case AL_ETH_BOARD_1G_SPEED_10M: adapter->link_config.active_speed = 10; break; } switch (params.mdio_freq) { default: device_printf(adapter->dev, "%s: invalid mdio freq (%d)\n", __func__, params.mdio_freq); case AL_ETH_BOARD_MDIO_FREQ_2_5_MHZ: adapter->mdio_freq = AL_ETH_DEFAULT_MDIO_FREQ_KHZ; break; case AL_ETH_BOARD_MDIO_FREQ_1_MHZ: adapter->mdio_freq = AL_ETH_MDIO_FREQ_1000_KHZ; break; } switch (params.media_type) { case AL_ETH_BOARD_MEDIA_TYPE_RGMII: - if (params.sfp_plus_module_exist == TRUE) + if (params.sfp_plus_module_exist == true) /* Backward compatibility */ adapter->mac_mode = AL_ETH_MAC_MODE_SGMII; else adapter->mac_mode = AL_ETH_MAC_MODE_RGMII; adapter->use_lm = false; break; case AL_ETH_BOARD_MEDIA_TYPE_SGMII: adapter->mac_mode = AL_ETH_MAC_MODE_SGMII; adapter->use_lm = true; break; case AL_ETH_BOARD_MEDIA_TYPE_10GBASE_SR: adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial; adapter->use_lm = true; break; case AL_ETH_BOARD_MEDIA_TYPE_AUTO_DETECT: - adapter->sfp_detection_needed = TRUE; + adapter->sfp_detection_needed = true; adapter->auto_speed = false; adapter->use_lm = true; break; case AL_ETH_BOARD_MEDIA_TYPE_AUTO_DETECT_AUTO_SPEED: - adapter->sfp_detection_needed = TRUE; + adapter->sfp_detection_needed = true; adapter->auto_speed = true; adapter->mac_mode_set = false; adapter->use_lm = true; adapter->mac_mode = AL_ETH_MAC_MODE_10GbE_Serial; break; default: device_printf(adapter->dev, "%s: unsupported media type %d\n", __func__, params.media_type); return (-1); } device_printf(adapter->dev, "Board info: phy exist %s. phy addr %d. mdio freq %u Khz. " "SFP connected %s. media %d\n", - params.phy_exist == TRUE ? "Yes" : "No", + params.phy_exist ? "Yes" : "No", params.phy_mdio_addr, adapter->mdio_freq, - params.sfp_plus_module_exist == TRUE ? "Yes" : "No", + params.sfp_plus_module_exist ? "Yes" : "No", params.media_type); } al_eth_mac_addr_read(adapter->ec_base, 0, adapter->mac_addr); return (0); } static int al_eth_function_reset(struct al_eth_adapter *adapter) { struct al_eth_board_params params; int rc; /* save board params so we restore it after reset */ al_eth_board_params_get(adapter->mac_base, ¶ms); al_eth_mac_addr_read(adapter->ec_base, 0, adapter->mac_addr); if (adapter->board_type == ALPINE_INTEGRATED) rc = al_eth_flr_rmn(&al_eth_read_pci_config, &al_eth_write_pci_config, adapter->dev, adapter->mac_base); else rc = al_eth_flr_rmn(&al_eth_fpga_read_pci_config, &al_eth_fpga_write_pci_config, adapter->internal_pcie_base, adapter->mac_base); /* restore params */ al_eth_board_params_set(adapter->mac_base, ¶ms); al_eth_mac_addr_store(adapter->ec_base, 0, adapter->mac_addr); return (rc); } static void al_eth_init_rings(struct al_eth_adapter *adapter) { int i; for (i = 0; i < adapter->num_tx_queues; i++) { struct al_eth_ring *ring = &adapter->tx_ring[i]; ring->ring_id = i; ring->dev = adapter->dev; ring->adapter = adapter; ring->netdev = adapter->netdev; al_udma_q_handle_get(&adapter->hal_adapter.tx_udma, i, &ring->dma_q); ring->sw_count = adapter->tx_ring_count; ring->hw_count = adapter->tx_descs_count; ring->unmask_reg_offset = al_udma_iofic_unmask_offset_get((struct unit_regs *)adapter->udma_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_C); ring->unmask_val = ~(1 << i); } for (i = 0; i < adapter->num_rx_queues; i++) { struct al_eth_ring *ring = &adapter->rx_ring[i]; ring->ring_id = i; ring->dev = adapter->dev; ring->adapter = adapter; ring->netdev = adapter->netdev; al_udma_q_handle_get(&adapter->hal_adapter.rx_udma, i, &ring->dma_q); ring->sw_count = adapter->rx_ring_count; ring->hw_count = adapter->rx_descs_count; ring->unmask_reg_offset = al_udma_iofic_unmask_offset_get( (struct unit_regs *)adapter->udma_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_B); ring->unmask_val = ~(1 << i); } } static void al_init_locked(void *arg) { struct al_eth_adapter *adapter = arg; if_t ifp = adapter->netdev; int rc = 0; al_eth_down(adapter); rc = al_eth_up(adapter); if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); if (rc == 0) if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); } static void al_init(void *arg) { struct al_eth_adapter *adapter = arg; al_init_locked(adapter); } static inline int al_eth_alloc_rx_buf(struct al_eth_adapter *adapter, struct al_eth_ring *rx_ring, struct al_eth_rx_buffer *rx_info) { struct al_buf *al_buf; bus_dma_segment_t segs[2]; int error; int nsegs; if (rx_info->m != NULL) return (0); rx_info->data_size = adapter->rx_mbuf_sz; AL_RX_LOCK(adapter); /* Get mbuf using UMA allocator */ rx_info->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_info->data_size); AL_RX_UNLOCK(adapter); if (rx_info->m == NULL) return (ENOMEM); rx_info->m->m_pkthdr.len = rx_info->m->m_len = adapter->rx_mbuf_sz; /* Map packets for DMA */ error = bus_dmamap_load_mbuf_sg(rx_ring->dma_buf_tag, rx_info->dma_map, rx_info->m, segs, &nsegs, BUS_DMA_NOWAIT); if (__predict_false(error)) { device_printf(rx_ring->dev, "failed to map mbuf, error = %d\n", error); m_freem(rx_info->m); rx_info->m = NULL; return (EFAULT); } al_buf = &rx_info->al_buf; al_buf->addr = segs[0].ds_addr + AL_IP_ALIGNMENT_OFFSET; al_buf->len = rx_info->data_size - AL_IP_ALIGNMENT_OFFSET; return (0); } static int al_eth_refill_rx_bufs(struct al_eth_adapter *adapter, unsigned int qid, unsigned int num) { struct al_eth_ring *rx_ring = &adapter->rx_ring[qid]; uint16_t next_to_use; unsigned int i; next_to_use = rx_ring->next_to_use; for (i = 0; i < num; i++) { int rc; struct al_eth_rx_buffer *rx_info = &rx_ring->rx_buffer_info[next_to_use]; if (__predict_false(al_eth_alloc_rx_buf(adapter, rx_ring, rx_info) < 0)) { device_printf(adapter->dev, "failed to alloc buffer for rx queue %d\n", qid); break; } rc = al_eth_rx_buffer_add(rx_ring->dma_q, &rx_info->al_buf, AL_ETH_RX_FLAGS_INT, NULL); if (__predict_false(rc)) { device_printf(adapter->dev, "failed to add buffer for rx queue %d\n", qid); break; } next_to_use = AL_ETH_RX_RING_IDX_NEXT(rx_ring, next_to_use); } if (__predict_false(i < num)) device_printf(adapter->dev, "refilled rx queue %d with %d pages only - available %d\n", qid, i, al_udma_available_get(rx_ring->dma_q)); if (__predict_true(i)) al_eth_rx_buffer_action(rx_ring->dma_q, i); rx_ring->next_to_use = next_to_use; return (i); } /* * al_eth_refill_all_rx_bufs - allocate all queues Rx buffers * @adapter: board private structure */ static void al_eth_refill_all_rx_bufs(struct al_eth_adapter *adapter) { int i; for (i = 0; i < adapter->num_rx_queues; i++) al_eth_refill_rx_bufs(adapter, i, AL_ETH_DEFAULT_RX_DESCS - 1); } static void al_eth_tx_do_cleanup(struct al_eth_ring *tx_ring) { unsigned int total_done; uint16_t next_to_clean; int qid = tx_ring->ring_id; total_done = al_eth_comp_tx_get(tx_ring->dma_q); device_printf_dbg(tx_ring->dev, "tx_poll: q %d total completed descs %x\n", qid, total_done); next_to_clean = tx_ring->next_to_clean; while (total_done != 0) { struct al_eth_tx_buffer *tx_info; struct mbuf *mbuf; tx_info = &tx_ring->tx_buffer_info[next_to_clean]; /* stop if not all descriptors of the packet are completed */ if (tx_info->tx_descs > total_done) break; mbuf = tx_info->m; tx_info->m = NULL; device_printf_dbg(tx_ring->dev, "tx_poll: q %d mbuf %p completed\n", qid, mbuf); /* map is no longer required */ bus_dmamap_unload(tx_ring->dma_buf_tag, tx_info->dma_map); m_freem(mbuf); total_done -= tx_info->tx_descs; next_to_clean = AL_ETH_TX_RING_IDX_NEXT(tx_ring, next_to_clean); } tx_ring->next_to_clean = next_to_clean; device_printf_dbg(tx_ring->dev, "tx_poll: q %d done next to clean %x\n", qid, next_to_clean); /* * need to make the rings circular update visible to * al_eth_start_xmit() before checking for netif_queue_stopped(). */ al_smp_data_memory_barrier(); } static void al_eth_tx_csum(struct al_eth_ring *tx_ring, struct al_eth_tx_buffer *tx_info, struct al_eth_pkt *hal_pkt, struct mbuf *m) { uint32_t mss = m->m_pkthdr.tso_segsz; struct ether_vlan_header *eh; uint16_t etype; #ifdef INET struct ip *ip; #endif #ifdef INET6 struct ip6_hdr *ip6; #endif struct tcphdr *th = NULL; int ehdrlen, ip_hlen = 0; uint8_t ipproto = 0; uint32_t offload = 0; if (mss != 0) offload = 1; if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) offload = 1; if ((m->m_pkthdr.csum_flags & CSUM_OFFLOAD) != 0) offload = 1; if (offload != 0) { struct al_eth_meta_data *meta = &tx_ring->hal_meta; if (mss != 0) hal_pkt->flags |= (AL_ETH_TX_FLAGS_TSO | AL_ETH_TX_FLAGS_L4_CSUM); else hal_pkt->flags |= (AL_ETH_TX_FLAGS_L4_CSUM | AL_ETH_TX_FLAGS_L4_PARTIAL_CSUM); /* * Determine where frame payload starts. * Jump over vlan headers if already present, * helpful for QinQ too. */ eh = mtod(m, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { etype = ntohs(eh->evl_proto); ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; } else { etype = ntohs(eh->evl_encap_proto); ehdrlen = ETHER_HDR_LEN; } switch (etype) { #ifdef INET case ETHERTYPE_IP: ip = (struct ip *)(m->m_data + ehdrlen); ip_hlen = ip->ip_hl << 2; ipproto = ip->ip_p; hal_pkt->l3_proto_idx = AL_ETH_PROTO_ID_IPv4; th = (struct tcphdr *)((caddr_t)ip + ip_hlen); if (mss != 0) hal_pkt->flags |= AL_ETH_TX_FLAGS_IPV4_L3_CSUM; if (ipproto == IPPROTO_TCP) hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_TCP; else hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_UDP; break; #endif /* INET */ #ifdef INET6 case ETHERTYPE_IPV6: ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen); hal_pkt->l3_proto_idx = AL_ETH_PROTO_ID_IPv6; ip_hlen = sizeof(struct ip6_hdr); th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen); ipproto = ip6->ip6_nxt; if (ipproto == IPPROTO_TCP) hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_TCP; else hal_pkt->l4_proto_idx = AL_ETH_PROTO_ID_UDP; break; #endif /* INET6 */ default: break; } meta->words_valid = 4; meta->l3_header_len = ip_hlen; meta->l3_header_offset = ehdrlen; if (th != NULL) meta->l4_header_len = th->th_off; /* this param needed only for TSO */ meta->mss_idx_sel = 0; /* check how to select MSS */ meta->mss_val = mss; hal_pkt->meta = meta; } else hal_pkt->meta = NULL; } #define XMIT_QUEUE_TIMEOUT 100 static void al_eth_xmit_mbuf(struct al_eth_ring *tx_ring, struct mbuf *m) { struct al_eth_tx_buffer *tx_info; int error; int nsegs, a; uint16_t next_to_use; bus_dma_segment_t segs[AL_ETH_PKT_MAX_BUFS + 1]; struct al_eth_pkt *hal_pkt; struct al_buf *al_buf; - boolean_t remap; + bool remap; /* Check if queue is ready */ if (unlikely(tx_ring->stall) != 0) { for (a = 0; a < XMIT_QUEUE_TIMEOUT; a++) { if (al_udma_available_get(tx_ring->dma_q) >= (AL_ETH_DEFAULT_TX_HW_DESCS - AL_ETH_TX_WAKEUP_THRESH)) { tx_ring->stall = 0; break; } pause("stall", 1); } if (a == XMIT_QUEUE_TIMEOUT) { device_printf(tx_ring->dev, "timeout waiting for queue %d ready!\n", tx_ring->ring_id); return; } else { device_printf_dbg(tx_ring->dev, "queue %d is ready!\n", tx_ring->ring_id); } } next_to_use = tx_ring->next_to_use; tx_info = &tx_ring->tx_buffer_info[next_to_use]; tx_info->m = m; hal_pkt = &tx_info->hal_pkt; if (m == NULL) { device_printf(tx_ring->dev, "mbuf is NULL\n"); return; } - remap = TRUE; + remap = true; /* Map packets for DMA */ retry: error = bus_dmamap_load_mbuf_sg(tx_ring->dma_buf_tag, tx_info->dma_map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (__predict_false(error)) { struct mbuf *m_new; if (error == EFBIG) { /* Try it again? - one try */ - if (remap == TRUE) { - remap = FALSE; + if (remap == true) { + remap = false; m_new = m_defrag(m, M_NOWAIT); if (m_new == NULL) { device_printf(tx_ring->dev, "failed to defrag mbuf\n"); goto exit; } m = m_new; goto retry; } else { device_printf(tx_ring->dev, "failed to map mbuf, error %d\n", error); goto exit; } } else { device_printf(tx_ring->dev, "failed to map mbuf, error %d\n", error); goto exit; } } /* set flags and meta data */ hal_pkt->flags = AL_ETH_TX_FLAGS_INT; al_eth_tx_csum(tx_ring, tx_info, hal_pkt, m); al_buf = hal_pkt->bufs; for (a = 0; a < nsegs; a++) { al_buf->addr = segs[a].ds_addr; al_buf->len = segs[a].ds_len; al_buf++; } hal_pkt->num_of_bufs = nsegs; /* prepare the packet's descriptors to dma engine */ tx_info->tx_descs = al_eth_tx_pkt_prepare(tx_ring->dma_q, hal_pkt); if (tx_info->tx_descs == 0) goto exit; /* * stop the queue when no more space available, the packet can have up * to AL_ETH_PKT_MAX_BUFS + 1 buffers and a meta descriptor */ if (unlikely(al_udma_available_get(tx_ring->dma_q) < (AL_ETH_PKT_MAX_BUFS + 2))) { tx_ring->stall = 1; device_printf_dbg(tx_ring->dev, "stall, stopping queue %d...\n", tx_ring->ring_id); al_data_memory_barrier(); } tx_ring->next_to_use = AL_ETH_TX_RING_IDX_NEXT(tx_ring, next_to_use); /* trigger the dma engine */ al_eth_tx_dma_action(tx_ring->dma_q, tx_info->tx_descs); return; exit: m_freem(m); } static void al_eth_tx_cmpl_work(void *arg, int pending) { struct al_eth_ring *tx_ring = arg; if (napi != 0) { tx_ring->cmpl_is_running = 1; al_data_memory_barrier(); } al_eth_tx_do_cleanup(tx_ring); if (napi != 0) { tx_ring->cmpl_is_running = 0; al_data_memory_barrier(); } /* all work done, enable IRQs */ al_eth_irq_config(tx_ring->unmask_reg_offset, tx_ring->unmask_val); } static int al_eth_tx_cmlp_irq_filter(void *arg) { struct al_eth_ring *tx_ring = arg; /* Interrupt should be auto-masked upon arrival */ device_printf_dbg(tx_ring->dev, "%s for ring ID = %d\n", __func__, tx_ring->ring_id); /* * For napi, if work is not running, schedule it. Always schedule * for casual (non-napi) packet handling. */ if ((napi == 0) || (napi && tx_ring->cmpl_is_running == 0)) taskqueue_enqueue(tx_ring->cmpl_tq, &tx_ring->cmpl_task); /* Do not run bottom half */ return (FILTER_HANDLED); } static int al_eth_rx_recv_irq_filter(void *arg) { struct al_eth_ring *rx_ring = arg; /* Interrupt should be auto-masked upon arrival */ device_printf_dbg(rx_ring->dev, "%s for ring ID = %d\n", __func__, rx_ring->ring_id); /* * For napi, if work is not running, schedule it. Always schedule * for casual (non-napi) packet handling. */ if ((napi == 0) || (napi && rx_ring->enqueue_is_running == 0)) taskqueue_enqueue(rx_ring->enqueue_tq, &rx_ring->enqueue_task); /* Do not run bottom half */ return (FILTER_HANDLED); } /* * al_eth_rx_checksum - indicate in mbuf if hw indicated a good cksum * @adapter: structure containing adapter specific data * @hal_pkt: HAL structure for the packet * @mbuf: mbuf currently being received and modified */ static inline void al_eth_rx_checksum(struct al_eth_adapter *adapter, struct al_eth_pkt *hal_pkt, struct mbuf *mbuf) { /* if IPv4 and error */ if (unlikely((if_getcapenable(adapter->netdev) & IFCAP_RXCSUM) && (hal_pkt->l3_proto_idx == AL_ETH_PROTO_ID_IPv4) && (hal_pkt->flags & AL_ETH_RX_FLAGS_L3_CSUM_ERR))) { device_printf(adapter->dev,"rx ipv4 header checksum error\n"); return; } /* if IPv6 and error */ if (unlikely((if_getcapenable(adapter->netdev) & IFCAP_RXCSUM_IPV6) && (hal_pkt->l3_proto_idx == AL_ETH_PROTO_ID_IPv6) && (hal_pkt->flags & AL_ETH_RX_FLAGS_L3_CSUM_ERR))) { device_printf(adapter->dev,"rx ipv6 header checksum error\n"); return; } /* if TCP/UDP */ if (likely((hal_pkt->l4_proto_idx == AL_ETH_PROTO_ID_TCP) || (hal_pkt->l4_proto_idx == AL_ETH_PROTO_ID_UDP))) { if (unlikely(hal_pkt->flags & AL_ETH_RX_FLAGS_L4_CSUM_ERR)) { device_printf_dbg(adapter->dev, "rx L4 checksum error\n"); /* TCP/UDP checksum error */ mbuf->m_pkthdr.csum_flags = 0; } else { device_printf_dbg(adapter->dev, "rx checksum correct\n"); /* IP Checksum Good */ mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED; mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID; } } } static struct mbuf* al_eth_rx_mbuf(struct al_eth_adapter *adapter, struct al_eth_ring *rx_ring, struct al_eth_pkt *hal_pkt, unsigned int descs, uint16_t *next_to_clean) { struct mbuf *mbuf; struct al_eth_rx_buffer *rx_info = &rx_ring->rx_buffer_info[*next_to_clean]; unsigned int len; len = hal_pkt->bufs[0].len; device_printf_dbg(adapter->dev, "rx_info %p data %p\n", rx_info, rx_info->m); if (rx_info->m == NULL) { *next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring, *next_to_clean); return (NULL); } mbuf = rx_info->m; mbuf->m_pkthdr.len = len; mbuf->m_len = len; mbuf->m_pkthdr.rcvif = rx_ring->netdev; mbuf->m_flags |= M_PKTHDR; if (len <= adapter->small_copy_len) { struct mbuf *smbuf; device_printf_dbg(adapter->dev, "rx small packet. len %d\n", len); AL_RX_LOCK(adapter); smbuf = m_gethdr(M_NOWAIT, MT_DATA); AL_RX_UNLOCK(adapter); if (__predict_false(smbuf == NULL)) { device_printf(adapter->dev, "smbuf is NULL\n"); return (NULL); } smbuf->m_data = smbuf->m_data + AL_IP_ALIGNMENT_OFFSET; memcpy(smbuf->m_data, mbuf->m_data + AL_IP_ALIGNMENT_OFFSET, len); smbuf->m_len = len; smbuf->m_pkthdr.rcvif = rx_ring->netdev; /* first desc of a non-ps chain */ smbuf->m_flags |= M_PKTHDR; smbuf->m_pkthdr.len = smbuf->m_len; *next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring, *next_to_clean); return (smbuf); } mbuf->m_data = mbuf->m_data + AL_IP_ALIGNMENT_OFFSET; /* Unmap the buffer */ bus_dmamap_unload(rx_ring->dma_buf_tag, rx_info->dma_map); rx_info->m = NULL; *next_to_clean = AL_ETH_RX_RING_IDX_NEXT(rx_ring, *next_to_clean); return (mbuf); } static void al_eth_rx_recv_work(void *arg, int pending) { struct al_eth_ring *rx_ring = arg; struct mbuf *mbuf; struct lro_entry *queued; unsigned int qid = rx_ring->ring_id; struct al_eth_pkt *hal_pkt = &rx_ring->hal_pkt; uint16_t next_to_clean = rx_ring->next_to_clean; uint32_t refill_required; uint32_t refill_actual; uint32_t do_if_input; if (napi != 0) { rx_ring->enqueue_is_running = 1; al_data_memory_barrier(); } do { unsigned int descs; descs = al_eth_pkt_rx(rx_ring->dma_q, hal_pkt); if (unlikely(descs == 0)) break; device_printf_dbg(rx_ring->dev, "rx_poll: q %d got packet " "from hal. descs %d\n", qid, descs); device_printf_dbg(rx_ring->dev, "rx_poll: q %d flags %x. " "l3 proto %d l4 proto %d\n", qid, hal_pkt->flags, hal_pkt->l3_proto_idx, hal_pkt->l4_proto_idx); /* ignore if detected dma or eth controller errors */ if ((hal_pkt->flags & (AL_ETH_RX_ERROR | AL_UDMA_CDESC_ERROR)) != 0) { device_printf(rx_ring->dev, "receive packet with error. " "flags = 0x%x\n", hal_pkt->flags); next_to_clean = AL_ETH_RX_RING_IDX_ADD(rx_ring, next_to_clean, descs); continue; } /* allocate mbuf and fill it */ mbuf = al_eth_rx_mbuf(rx_ring->adapter, rx_ring, hal_pkt, descs, &next_to_clean); /* exit if we failed to retrieve a buffer */ if (unlikely(mbuf == NULL)) { next_to_clean = AL_ETH_RX_RING_IDX_ADD(rx_ring, next_to_clean, descs); break; } if (__predict_true(if_getcapenable(rx_ring->netdev) & IFCAP_RXCSUM || if_getcapenable(rx_ring->netdev) & IFCAP_RXCSUM_IPV6)) { al_eth_rx_checksum(rx_ring->adapter, hal_pkt, mbuf); } mbuf->m_pkthdr.flowid = qid; M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE); /* * LRO is only for IP/TCP packets and TCP checksum of the packet * should be computed by hardware. */ do_if_input = 1; if ((rx_ring->lro_enabled != 0) && ((mbuf->m_pkthdr.csum_flags & CSUM_IP_VALID) != 0) && hal_pkt->l4_proto_idx == AL_ETH_PROTO_ID_TCP) { /* * Send to the stack if: * - LRO not enabled, or * - no LRO resources, or * - lro enqueue fails */ if (rx_ring->lro.lro_cnt != 0) { if (tcp_lro_rx(&rx_ring->lro, mbuf, 0) == 0) do_if_input = 0; } } if (do_if_input) if_input(rx_ring->netdev, mbuf); } while (1); rx_ring->next_to_clean = next_to_clean; refill_required = al_udma_available_get(rx_ring->dma_q); refill_actual = al_eth_refill_rx_bufs(rx_ring->adapter, qid, refill_required); if (unlikely(refill_actual < refill_required)) { device_printf_dbg(rx_ring->dev, "%s: not filling rx queue %d\n", __func__, qid); } while (((queued = LIST_FIRST(&rx_ring->lro.lro_active)) != NULL)) { LIST_REMOVE(queued, next); tcp_lro_flush(&rx_ring->lro, queued); } if (napi != 0) { rx_ring->enqueue_is_running = 0; al_data_memory_barrier(); } /* unmask irq */ al_eth_irq_config(rx_ring->unmask_reg_offset, rx_ring->unmask_val); } static void al_eth_start_xmit(void *arg, int pending) { struct al_eth_ring *tx_ring = arg; struct mbuf *mbuf; if (napi != 0) { tx_ring->enqueue_is_running = 1; al_data_memory_barrier(); } while (1) { mtx_lock(&tx_ring->br_mtx); mbuf = drbr_dequeue(NULL, tx_ring->br); mtx_unlock(&tx_ring->br_mtx); if (mbuf == NULL) break; al_eth_xmit_mbuf(tx_ring, mbuf); } if (napi != 0) { tx_ring->enqueue_is_running = 0; al_data_memory_barrier(); while (1) { mtx_lock(&tx_ring->br_mtx); mbuf = drbr_dequeue(NULL, tx_ring->br); mtx_unlock(&tx_ring->br_mtx); if (mbuf == NULL) break; al_eth_xmit_mbuf(tx_ring, mbuf); } } } static int al_mq_start(if_t ifp, struct mbuf *m) { struct al_eth_adapter *adapter = if_getsoftc(ifp); struct al_eth_ring *tx_ring; int i; int ret; /* Which queue to use */ if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) i = m->m_pkthdr.flowid % adapter->num_tx_queues; else i = curcpu % adapter->num_tx_queues; if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) { return (EFAULT); } tx_ring = &adapter->tx_ring[i]; device_printf_dbg(adapter->dev, "dgb start() - assuming link is active, " "sending packet to queue %d\n", i); ret = drbr_enqueue(ifp, tx_ring->br, m); /* * For napi, if work is not running, schedule it. Always schedule * for casual (non-napi) packet handling. */ if ((napi == 0) || ((napi != 0) && (tx_ring->enqueue_is_running == 0))) taskqueue_enqueue(tx_ring->enqueue_tq, &tx_ring->enqueue_task); return (ret); } static void al_qflush(if_t ifp) { /* unused */ } static inline void al_eth_flow_ctrl_init(struct al_eth_adapter *adapter) { uint8_t default_flow_ctrl; default_flow_ctrl = AL_ETH_FLOW_CTRL_TX_PAUSE; default_flow_ctrl |= AL_ETH_FLOW_CTRL_RX_PAUSE; adapter->link_config.flow_ctrl_supported = default_flow_ctrl; } static int al_eth_flow_ctrl_config(struct al_eth_adapter *adapter) { struct al_eth_flow_control_params *flow_ctrl_params; uint8_t active = adapter->link_config.flow_ctrl_active; int i; flow_ctrl_params = &adapter->flow_ctrl_params; flow_ctrl_params->type = AL_ETH_FLOW_CONTROL_TYPE_LINK_PAUSE; flow_ctrl_params->obay_enable = ((active & AL_ETH_FLOW_CTRL_RX_PAUSE) != 0); flow_ctrl_params->gen_enable = ((active & AL_ETH_FLOW_CTRL_TX_PAUSE) != 0); flow_ctrl_params->rx_fifo_th_high = AL_ETH_FLOW_CTRL_RX_FIFO_TH_HIGH; flow_ctrl_params->rx_fifo_th_low = AL_ETH_FLOW_CTRL_RX_FIFO_TH_LOW; flow_ctrl_params->quanta = AL_ETH_FLOW_CTRL_QUANTA; flow_ctrl_params->quanta_th = AL_ETH_FLOW_CTRL_QUANTA_TH; /* map priority to queue index, queue id = priority/2 */ for (i = 0; i < AL_ETH_FWD_PRIO_TABLE_NUM; i++) flow_ctrl_params->prio_q_map[0][i] = 1 << (i >> 1); al_eth_flow_control_config(&adapter->hal_adapter, flow_ctrl_params); return (0); } static void al_eth_flow_ctrl_enable(struct al_eth_adapter *adapter) { /* * change the active configuration to the default / force by ethtool * and call to configure */ adapter->link_config.flow_ctrl_active = adapter->link_config.flow_ctrl_supported; al_eth_flow_ctrl_config(adapter); } static void al_eth_flow_ctrl_disable(struct al_eth_adapter *adapter) { adapter->link_config.flow_ctrl_active = 0; al_eth_flow_ctrl_config(adapter); } static int al_eth_hw_init(struct al_eth_adapter *adapter) { int rc; rc = al_eth_hw_init_adapter(adapter); if (rc != 0) return (rc); rc = al_eth_mac_config(&adapter->hal_adapter, adapter->mac_mode); if (rc < 0) { device_printf(adapter->dev, "%s failed to configure mac!\n", __func__); return (rc); } if ((adapter->mac_mode == AL_ETH_MAC_MODE_SGMII) || (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII && - adapter->phy_exist == FALSE)) { + adapter->phy_exist == false)) { rc = al_eth_mac_link_config(&adapter->hal_adapter, adapter->link_config.force_1000_base_x, adapter->link_config.autoneg, adapter->link_config.active_speed, adapter->link_config.active_duplex); if (rc != 0) { device_printf(adapter->dev, "%s failed to configure link parameters!\n", __func__); return (rc); } } rc = al_eth_mdio_config(&adapter->hal_adapter, - AL_ETH_MDIO_TYPE_CLAUSE_22, TRUE /* shared_mdio_if */, + AL_ETH_MDIO_TYPE_CLAUSE_22, AL_TRUE /* shared_mdio_if */, adapter->ref_clk_freq, adapter->mdio_freq); if (rc != 0) { device_printf(adapter->dev, "%s failed at mdio config!\n", __func__); return (rc); } al_eth_flow_ctrl_init(adapter); return (rc); } static int al_eth_hw_stop(struct al_eth_adapter *adapter) { al_eth_mac_stop(&adapter->hal_adapter); /* * wait till pending rx packets written and UDMA becomes idle, * the MAC has ~10KB fifo, 10us should be enough time for the * UDMA to write to the memory */ DELAY(10); al_eth_adapter_stop(&adapter->hal_adapter); adapter->flags |= AL_ETH_FLAG_RESET_REQUESTED; /* disable flow ctrl to avoid pause packets*/ al_eth_flow_ctrl_disable(adapter); return (0); } /* * al_eth_intr_intx_all - Legacy Interrupt Handler for all interrupts * @irq: interrupt number * @data: pointer to a network interface device structure */ static int al_eth_intr_intx_all(void *data) { struct al_eth_adapter *adapter = data; struct unit_regs __iomem *regs_base = (struct unit_regs __iomem *)adapter->udma_base; uint32_t reg; reg = al_udma_iofic_read_cause(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_A); if (likely(reg)) device_printf_dbg(adapter->dev, "%s group A cause %x\n", __func__, reg); if (unlikely(reg & AL_INT_GROUP_A_GROUP_D_SUM)) { struct al_iofic_grp_ctrl __iomem *sec_ints_base; uint32_t cause_d = al_udma_iofic_read_cause(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_D); sec_ints_base = ®s_base->gen.interrupt_regs.secondary_iofic_ctrl[0]; if (cause_d != 0) { device_printf_dbg(adapter->dev, "got interrupt from group D. cause %x\n", cause_d); cause_d = al_iofic_read_cause(sec_ints_base, AL_INT_GROUP_A); device_printf(adapter->dev, "secondary A cause %x\n", cause_d); cause_d = al_iofic_read_cause(sec_ints_base, AL_INT_GROUP_B); device_printf_dbg(adapter->dev, "secondary B cause %x\n", cause_d); } } if ((reg & AL_INT_GROUP_A_GROUP_B_SUM) != 0 ) { uint32_t cause_b = al_udma_iofic_read_cause(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_B); int qid; device_printf_dbg(adapter->dev, "secondary B cause %x\n", cause_b); for (qid = 0; qid < adapter->num_rx_queues; qid++) { if (cause_b & (1 << qid)) { /* mask */ al_udma_iofic_mask( (struct unit_regs __iomem *)adapter->udma_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_B, 1 << qid); } } } if ((reg & AL_INT_GROUP_A_GROUP_C_SUM) != 0) { uint32_t cause_c = al_udma_iofic_read_cause(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_C); int qid; device_printf_dbg(adapter->dev, "secondary C cause %x\n", cause_c); for (qid = 0; qid < adapter->num_tx_queues; qid++) { if ((cause_c & (1 << qid)) != 0) { al_udma_iofic_mask( (struct unit_regs __iomem *)adapter->udma_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_C, 1 << qid); } } } al_eth_tx_cmlp_irq_filter(adapter->tx_ring); return (0); } static int al_eth_intr_msix_all(void *data) { struct al_eth_adapter *adapter = data; device_printf_dbg(adapter->dev, "%s\n", __func__); return (0); } static int al_eth_intr_msix_mgmt(void *data) { struct al_eth_adapter *adapter = data; device_printf_dbg(adapter->dev, "%s\n", __func__); return (0); } static int al_eth_enable_msix(struct al_eth_adapter *adapter) { int i, msix_vecs, rc, count; device_printf_dbg(adapter->dev, "%s\n", __func__); msix_vecs = 1 + adapter->num_rx_queues + adapter->num_tx_queues; device_printf_dbg(adapter->dev, "Try to enable MSIX, vector numbers = %d\n", msix_vecs); adapter->msix_entries = malloc(msix_vecs*sizeof(*adapter->msix_entries), M_IFAL, M_ZERO | M_WAITOK); if (adapter->msix_entries == NULL) { device_printf_dbg(adapter->dev, "failed to allocate" " msix_entries %d\n", msix_vecs); rc = ENOMEM; goto exit; } /* management vector (GROUP_A) @2*/ adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].entry = 2; adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector = 0; /* rx queues start @3 */ for (i = 0; i < adapter->num_rx_queues; i++) { int irq_idx = AL_ETH_RXQ_IRQ_IDX(adapter, i); adapter->msix_entries[irq_idx].entry = 3 + i; adapter->msix_entries[irq_idx].vector = 0; } /* tx queues start @7 */ for (i = 0; i < adapter->num_tx_queues; i++) { int irq_idx = AL_ETH_TXQ_IRQ_IDX(adapter, i); adapter->msix_entries[irq_idx].entry = 3 + AL_ETH_MAX_HW_QUEUES + i; adapter->msix_entries[irq_idx].vector = 0; } count = msix_vecs + 2; /* entries start from 2 */ rc = pci_alloc_msix(adapter->dev, &count); if (rc != 0) { device_printf_dbg(adapter->dev, "failed to allocate MSIX " "vectors %d\n", msix_vecs+2); device_printf_dbg(adapter->dev, "ret = %d\n", rc); goto msix_entries_exit; } if (count != msix_vecs + 2) { device_printf_dbg(adapter->dev, "failed to allocate all MSIX " "vectors %d, allocated %d\n", msix_vecs+2, count); rc = ENOSPC; goto msix_entries_exit; } for (i = 0; i < msix_vecs; i++) adapter->msix_entries[i].vector = 2 + 1 + i; device_printf_dbg(adapter->dev, "successfully enabled MSIX," " vectors %d\n", msix_vecs); adapter->msix_vecs = msix_vecs; adapter->flags |= AL_ETH_FLAG_MSIX_ENABLED; goto exit; msix_entries_exit: adapter->msix_vecs = 0; free(adapter->msix_entries, M_IFAL); adapter->msix_entries = NULL; exit: return (rc); } static int al_eth_setup_int_mode(struct al_eth_adapter *adapter) { int i, rc; rc = al_eth_enable_msix(adapter); if (rc != 0) { device_printf(adapter->dev, "Failed to enable MSIX mode.\n"); return (rc); } adapter->irq_vecs = max(1, adapter->msix_vecs); /* single INTX mode */ if (adapter->msix_vecs == 0) { snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name, AL_ETH_IRQNAME_SIZE, "al-eth-intx-all@pci:%s", device_get_name(adapter->dev)); adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler = al_eth_intr_intx_all; /* IRQ vector will be resolved from device resources */ adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector = 0; adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter; device_printf(adapter->dev, "%s and vector %d \n", __func__, adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector); return (0); } /* single MSI-X mode */ if (adapter->msix_vecs == 1) { snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name, AL_ETH_IRQNAME_SIZE, "al-eth-msix-all@pci:%s", device_get_name(adapter->dev)); adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler = al_eth_intr_msix_all; adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector = adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector; adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter; return (0); } /* MSI-X per queue */ snprintf(adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].name, AL_ETH_IRQNAME_SIZE, "al-eth-msix-mgmt@pci:%s", device_get_name(adapter->dev)); adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].handler = al_eth_intr_msix_mgmt; adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].data = adapter; adapter->irq_tbl[AL_ETH_MGMT_IRQ_IDX].vector = adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector; for (i = 0; i < adapter->num_rx_queues; i++) { int irq_idx = AL_ETH_RXQ_IRQ_IDX(adapter, i); snprintf(adapter->irq_tbl[irq_idx].name, AL_ETH_IRQNAME_SIZE, "al-eth-rx-comp-%d@pci:%s", i, device_get_name(adapter->dev)); adapter->irq_tbl[irq_idx].handler = al_eth_rx_recv_irq_filter; adapter->irq_tbl[irq_idx].data = &adapter->rx_ring[i]; adapter->irq_tbl[irq_idx].vector = adapter->msix_entries[irq_idx].vector; } for (i = 0; i < adapter->num_tx_queues; i++) { int irq_idx = AL_ETH_TXQ_IRQ_IDX(adapter, i); snprintf(adapter->irq_tbl[irq_idx].name, AL_ETH_IRQNAME_SIZE, "al-eth-tx-comp-%d@pci:%s", i, device_get_name(adapter->dev)); adapter->irq_tbl[irq_idx].handler = al_eth_tx_cmlp_irq_filter; adapter->irq_tbl[irq_idx].data = &adapter->tx_ring[i]; adapter->irq_tbl[irq_idx].vector = adapter->msix_entries[irq_idx].vector; } return (0); } static void __al_eth_free_irq(struct al_eth_adapter *adapter) { struct al_eth_irq *irq; int i, rc; for (i = 0; i < adapter->irq_vecs; i++) { irq = &adapter->irq_tbl[i]; if (irq->requested != 0) { device_printf_dbg(adapter->dev, "tear down irq: %d\n", irq->vector); rc = bus_teardown_intr(adapter->dev, irq->res, irq->cookie); if (rc != 0) device_printf(adapter->dev, "failed to tear " "down irq: %d\n", irq->vector); } irq->requested = 0; } } static void al_eth_free_irq(struct al_eth_adapter *adapter) { struct al_eth_irq *irq; int i, rc; #ifdef CONFIG_RFS_ACCEL if (adapter->msix_vecs >= 1) { free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap); adapter->netdev->rx_cpu_rmap = NULL; } #endif __al_eth_free_irq(adapter); for (i = 0; i < adapter->irq_vecs; i++) { irq = &adapter->irq_tbl[i]; if (irq->res == NULL) continue; device_printf_dbg(adapter->dev, "release resource irq: %d\n", irq->vector); rc = bus_release_resource(adapter->dev, SYS_RES_IRQ, irq->vector, irq->res); irq->res = NULL; if (rc != 0) device_printf(adapter->dev, "dev has no parent while " "releasing res for irq: %d\n", irq->vector); } pci_release_msi(adapter->dev); adapter->flags &= ~AL_ETH_FLAG_MSIX_ENABLED; adapter->msix_vecs = 0; free(adapter->msix_entries, M_IFAL); adapter->msix_entries = NULL; } static int al_eth_request_irq(struct al_eth_adapter *adapter) { unsigned long flags; struct al_eth_irq *irq; int rc = 0, i, v; if ((adapter->flags & AL_ETH_FLAG_MSIX_ENABLED) != 0) flags = RF_ACTIVE; else flags = RF_ACTIVE | RF_SHAREABLE; for (i = 0; i < adapter->irq_vecs; i++) { irq = &adapter->irq_tbl[i]; if (irq->requested != 0) continue; irq->res = bus_alloc_resource_any(adapter->dev, SYS_RES_IRQ, &irq->vector, flags); if (irq->res == NULL) { device_printf(adapter->dev, "could not allocate " "irq vector=%d\n", irq->vector); rc = ENXIO; goto exit_res; } if ((rc = bus_setup_intr(adapter->dev, irq->res, INTR_TYPE_NET | INTR_MPSAFE, irq->handler, NULL, irq->data, &irq->cookie)) != 0) { device_printf(adapter->dev, "failed to register " "interrupt handler for irq %ju: %d\n", (uintmax_t)rman_get_start(irq->res), rc); goto exit_intr; } irq->requested = 1; } goto exit; exit_intr: v = i - 1; /* -1 because we omit the operation that failed */ while (v-- >= 0) { int bti; irq = &adapter->irq_tbl[v]; bti = bus_teardown_intr(adapter->dev, irq->res, irq->cookie); if (bti != 0) { device_printf(adapter->dev, "failed to tear " "down irq: %d\n", irq->vector); } irq->requested = 0; device_printf_dbg(adapter->dev, "exit_intr: releasing irq %d\n", irq->vector); } exit_res: v = i - 1; /* -1 because we omit the operation that failed */ while (v-- >= 0) { int brr; irq = &adapter->irq_tbl[v]; device_printf_dbg(adapter->dev, "exit_res: releasing resource" " for irq %d\n", irq->vector); brr = bus_release_resource(adapter->dev, SYS_RES_IRQ, irq->vector, irq->res); if (brr != 0) device_printf(adapter->dev, "dev has no parent while " "releasing res for irq: %d\n", irq->vector); irq->res = NULL; } exit: return (rc); } /** * al_eth_setup_tx_resources - allocate Tx resources (Descriptors) * @adapter: network interface device structure * @qid: queue index * * Return 0 on success, negative on failure **/ static int al_eth_setup_tx_resources(struct al_eth_adapter *adapter, int qid) { struct al_eth_ring *tx_ring = &adapter->tx_ring[qid]; device_t dev = tx_ring->dev; struct al_udma_q_params *q_params = &tx_ring->q_params; int size; int ret; if (adapter->up) return (0); size = sizeof(struct al_eth_tx_buffer) * tx_ring->sw_count; tx_ring->tx_buffer_info = malloc(size, M_IFAL, M_ZERO | M_WAITOK); if (tx_ring->tx_buffer_info == NULL) return (ENOMEM); tx_ring->descs_size = tx_ring->hw_count * sizeof(union al_udma_desc); q_params->size = tx_ring->hw_count; ret = al_dma_alloc_coherent(dev, &q_params->desc_phy_base_tag, (bus_dmamap_t *)&q_params->desc_phy_base_map, (bus_addr_t *)&q_params->desc_phy_base, (void**)&q_params->desc_base, tx_ring->descs_size); if (ret != 0) { device_printf(dev, "failed to al_dma_alloc_coherent," " ret = %d\n", ret); return (ENOMEM); } if (q_params->desc_base == NULL) return (ENOMEM); device_printf_dbg(dev, "Initializing ring queues %d\n", qid); /* Allocate Ring Queue */ mtx_init(&tx_ring->br_mtx, "AlRingMtx", NULL, MTX_DEF); tx_ring->br = buf_ring_alloc(AL_BR_SIZE, M_DEVBUF, M_WAITOK, &tx_ring->br_mtx); if (tx_ring->br == NULL) { device_printf(dev, "Critical Failure setting up buf ring\n"); return (ENOMEM); } /* Allocate taskqueues */ TASK_INIT(&tx_ring->enqueue_task, 0, al_eth_start_xmit, tx_ring); tx_ring->enqueue_tq = taskqueue_create_fast("al_tx_enque", M_NOWAIT, taskqueue_thread_enqueue, &tx_ring->enqueue_tq); taskqueue_start_threads(&tx_ring->enqueue_tq, 1, PI_NET, "%s txeq", device_get_nameunit(adapter->dev)); TASK_INIT(&tx_ring->cmpl_task, 0, al_eth_tx_cmpl_work, tx_ring); tx_ring->cmpl_tq = taskqueue_create_fast("al_tx_cmpl", M_NOWAIT, taskqueue_thread_enqueue, &tx_ring->cmpl_tq); taskqueue_start_threads(&tx_ring->cmpl_tq, 1, PI_REALTIME, "%s txcq", device_get_nameunit(adapter->dev)); /* Setup DMA descriptor areas. */ ret = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ AL_TSO_SIZE, /* maxsize */ AL_ETH_PKT_MAX_BUFS, /* nsegments */ PAGE_SIZE, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &tx_ring->dma_buf_tag); if (ret != 0) { device_printf(dev,"Unable to allocate dma_buf_tag, ret = %d\n", ret); return (ret); } for (size = 0; size < tx_ring->sw_count; size++) { ret = bus_dmamap_create(tx_ring->dma_buf_tag, 0, &tx_ring->tx_buffer_info[size].dma_map); if (ret != 0) { device_printf(dev, "Unable to map DMA TX " "buffer memory [iter=%d]\n", size); return (ret); } } /* completion queue not used for tx */ q_params->cdesc_base = NULL; /* size in bytes of the udma completion ring descriptor */ q_params->cdesc_size = 8; tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; return (0); } /* * al_eth_free_tx_resources - Free Tx Resources per Queue * @adapter: network interface device structure * @qid: queue index * * Free all transmit software resources */ static void al_eth_free_tx_resources(struct al_eth_adapter *adapter, int qid) { struct al_eth_ring *tx_ring = &adapter->tx_ring[qid]; struct al_udma_q_params *q_params = &tx_ring->q_params; int size; /* At this point interrupts' handlers must be deactivated */ while (taskqueue_cancel(tx_ring->cmpl_tq, &tx_ring->cmpl_task, NULL)) taskqueue_drain(tx_ring->cmpl_tq, &tx_ring->cmpl_task); taskqueue_free(tx_ring->cmpl_tq); while (taskqueue_cancel(tx_ring->enqueue_tq, &tx_ring->enqueue_task, NULL)) { taskqueue_drain(tx_ring->enqueue_tq, &tx_ring->enqueue_task); } taskqueue_free(tx_ring->enqueue_tq); if (tx_ring->br != NULL) { drbr_flush(adapter->netdev, tx_ring->br); buf_ring_free(tx_ring->br, M_DEVBUF); } for (size = 0; size < tx_ring->sw_count; size++) { m_freem(tx_ring->tx_buffer_info[size].m); tx_ring->tx_buffer_info[size].m = NULL; bus_dmamap_unload(tx_ring->dma_buf_tag, tx_ring->tx_buffer_info[size].dma_map); bus_dmamap_destroy(tx_ring->dma_buf_tag, tx_ring->tx_buffer_info[size].dma_map); } bus_dma_tag_destroy(tx_ring->dma_buf_tag); free(tx_ring->tx_buffer_info, M_IFAL); tx_ring->tx_buffer_info = NULL; mtx_destroy(&tx_ring->br_mtx); /* if not set, then don't free */ if (q_params->desc_base == NULL) return; al_dma_free_coherent(q_params->desc_phy_base_tag, q_params->desc_phy_base_map, q_params->desc_base); q_params->desc_base = NULL; } /* * al_eth_free_all_tx_resources - Free Tx Resources for All Queues * @adapter: board private structure * * Free all transmit software resources */ static void al_eth_free_all_tx_resources(struct al_eth_adapter *adapter) { int i; for (i = 0; i < adapter->num_tx_queues; i++) if (adapter->tx_ring[i].q_params.desc_base) al_eth_free_tx_resources(adapter, i); } /* * al_eth_setup_rx_resources - allocate Rx resources (Descriptors) * @adapter: network interface device structure * @qid: queue index * * Returns 0 on success, negative on failure */ static int al_eth_setup_rx_resources(struct al_eth_adapter *adapter, unsigned int qid) { struct al_eth_ring *rx_ring = &adapter->rx_ring[qid]; device_t dev = rx_ring->dev; struct al_udma_q_params *q_params = &rx_ring->q_params; int size; int ret; size = sizeof(struct al_eth_rx_buffer) * rx_ring->sw_count; /* alloc extra element so in rx path we can always prefetch rx_info + 1 */ size += 1; rx_ring->rx_buffer_info = malloc(size, M_IFAL, M_ZERO | M_WAITOK); if (rx_ring->rx_buffer_info == NULL) return (ENOMEM); rx_ring->descs_size = rx_ring->hw_count * sizeof(union al_udma_desc); q_params->size = rx_ring->hw_count; ret = al_dma_alloc_coherent(dev, &q_params->desc_phy_base_tag, &q_params->desc_phy_base_map, (bus_addr_t *)&q_params->desc_phy_base, (void**)&q_params->desc_base, rx_ring->descs_size); if ((q_params->desc_base == NULL) || (ret != 0)) return (ENOMEM); /* size in bytes of the udma completion ring descriptor */ q_params->cdesc_size = 16; rx_ring->cdescs_size = rx_ring->hw_count * q_params->cdesc_size; ret = al_dma_alloc_coherent(dev, &q_params->cdesc_phy_base_tag, &q_params->cdesc_phy_base_map, (bus_addr_t *)&q_params->cdesc_phy_base, (void**)&q_params->cdesc_base, rx_ring->cdescs_size); if ((q_params->cdesc_base == NULL) || (ret != 0)) return (ENOMEM); /* Allocate taskqueues */ NET_TASK_INIT(&rx_ring->enqueue_task, 0, al_eth_rx_recv_work, rx_ring); rx_ring->enqueue_tq = taskqueue_create_fast("al_rx_enque", M_NOWAIT, taskqueue_thread_enqueue, &rx_ring->enqueue_tq); taskqueue_start_threads(&rx_ring->enqueue_tq, 1, PI_NET, "%s rxeq", device_get_nameunit(adapter->dev)); /* Setup DMA descriptor areas. */ ret = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ AL_TSO_SIZE, /* maxsize */ 1, /* nsegments */ AL_TSO_SIZE, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &rx_ring->dma_buf_tag); if (ret != 0) { device_printf(dev,"Unable to allocate RX dma_buf_tag\n"); return (ret); } for (size = 0; size < rx_ring->sw_count; size++) { ret = bus_dmamap_create(rx_ring->dma_buf_tag, 0, &rx_ring->rx_buffer_info[size].dma_map); if (ret != 0) { device_printf(dev,"Unable to map DMA RX buffer memory\n"); return (ret); } } /* Zero out the descriptor ring */ memset(q_params->cdesc_base, 0, rx_ring->cdescs_size); /* Create LRO for the ring */ if ((if_getcapenable(adapter->netdev) & IFCAP_LRO) != 0) { int err = tcp_lro_init(&rx_ring->lro); if (err != 0) { device_printf(adapter->dev, "LRO[%d] Initialization failed!\n", qid); } else { device_printf_dbg(adapter->dev, "RX Soft LRO[%d] Initialized\n", qid); - rx_ring->lro_enabled = TRUE; + rx_ring->lro_enabled = true; rx_ring->lro.ifp = adapter->netdev; } } rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; return (0); } /* * al_eth_free_rx_resources - Free Rx Resources * @adapter: network interface device structure * @qid: queue index * * Free all receive software resources */ static void al_eth_free_rx_resources(struct al_eth_adapter *adapter, unsigned int qid) { struct al_eth_ring *rx_ring = &adapter->rx_ring[qid]; struct al_udma_q_params *q_params = &rx_ring->q_params; int size; /* At this point interrupts' handlers must be deactivated */ while (taskqueue_cancel(rx_ring->enqueue_tq, &rx_ring->enqueue_task, NULL)) { taskqueue_drain(rx_ring->enqueue_tq, &rx_ring->enqueue_task); } taskqueue_free(rx_ring->enqueue_tq); for (size = 0; size < rx_ring->sw_count; size++) { m_freem(rx_ring->rx_buffer_info[size].m); rx_ring->rx_buffer_info[size].m = NULL; bus_dmamap_unload(rx_ring->dma_buf_tag, rx_ring->rx_buffer_info[size].dma_map); bus_dmamap_destroy(rx_ring->dma_buf_tag, rx_ring->rx_buffer_info[size].dma_map); } bus_dma_tag_destroy(rx_ring->dma_buf_tag); free(rx_ring->rx_buffer_info, M_IFAL); rx_ring->rx_buffer_info = NULL; /* if not set, then don't free */ if (q_params->desc_base == NULL) return; al_dma_free_coherent(q_params->desc_phy_base_tag, q_params->desc_phy_base_map, q_params->desc_base); q_params->desc_base = NULL; /* if not set, then don't free */ if (q_params->cdesc_base == NULL) return; al_dma_free_coherent(q_params->cdesc_phy_base_tag, q_params->cdesc_phy_base_map, q_params->cdesc_base); q_params->cdesc_phy_base = 0; /* Free LRO resources */ tcp_lro_free(&rx_ring->lro); } /* * al_eth_free_all_rx_resources - Free Rx Resources for All Queues * @adapter: board private structure * * Free all receive software resources */ static void al_eth_free_all_rx_resources(struct al_eth_adapter *adapter) { int i; for (i = 0; i < adapter->num_rx_queues; i++) if (adapter->rx_ring[i].q_params.desc_base != 0) al_eth_free_rx_resources(adapter, i); } /* * al_eth_setup_all_rx_resources - allocate all queues Rx resources * @adapter: board private structure * * Return 0 on success, negative on failure */ static int al_eth_setup_all_rx_resources(struct al_eth_adapter *adapter) { int i, rc = 0; for (i = 0; i < adapter->num_rx_queues; i++) { rc = al_eth_setup_rx_resources(adapter, i); if (rc == 0) continue; device_printf(adapter->dev, "Allocation for Rx Queue %u failed\n", i); goto err_setup_rx; } return (0); err_setup_rx: /* rewind the index freeing the rings as we go */ while (i--) al_eth_free_rx_resources(adapter, i); return (rc); } /* * al_eth_setup_all_tx_resources - allocate all queues Tx resources * @adapter: private structure * * Return 0 on success, negative on failure */ static int al_eth_setup_all_tx_resources(struct al_eth_adapter *adapter) { int i, rc = 0; for (i = 0; i < adapter->num_tx_queues; i++) { rc = al_eth_setup_tx_resources(adapter, i); if (rc == 0) continue; device_printf(adapter->dev, "Allocation for Tx Queue %u failed\n", i); goto err_setup_tx; } return (0); err_setup_tx: /* rewind the index freeing the rings as we go */ while (i--) al_eth_free_tx_resources(adapter, i); return (rc); } static void al_eth_disable_int_sync(struct al_eth_adapter *adapter) { /* disable forwarding interrupts from eth through pci end point */ if ((adapter->board_type == ALPINE_FPGA_NIC) || (adapter->board_type == ALPINE_NIC)) { al_eth_forward_int_config((uint32_t*)adapter->internal_pcie_base + AL_REG_OFFSET_FORWARD_INTR, AL_DIS_FORWARD_INTR); } /* mask hw interrupts */ al_eth_interrupts_mask(adapter); } static void al_eth_interrupts_unmask(struct al_eth_adapter *adapter) { uint32_t group_a_mask = AL_INT_GROUP_A_GROUP_D_SUM; /* enable group D summery */ uint32_t group_b_mask = (1 << adapter->num_rx_queues) - 1;/* bit per Rx q*/ uint32_t group_c_mask = (1 << adapter->num_tx_queues) - 1;/* bit per Tx q*/ uint32_t group_d_mask = 3 << 8; struct unit_regs __iomem *regs_base = (struct unit_regs __iomem *)adapter->udma_base; if (adapter->int_mode == AL_IOFIC_MODE_LEGACY) group_a_mask |= AL_INT_GROUP_A_GROUP_B_SUM | AL_INT_GROUP_A_GROUP_C_SUM | AL_INT_GROUP_A_GROUP_D_SUM; al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_A, group_a_mask); al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_B, group_b_mask); al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_C, group_c_mask); al_udma_iofic_unmask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_D, group_d_mask); } static void al_eth_interrupts_mask(struct al_eth_adapter *adapter) { struct unit_regs __iomem *regs_base = (struct unit_regs __iomem *)adapter->udma_base; /* mask all interrupts */ al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_A, AL_MASK_GROUP_A_INT); al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_B, AL_MASK_GROUP_B_INT); al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_C, AL_MASK_GROUP_C_INT); al_udma_iofic_mask(regs_base, AL_UDMA_IOFIC_LEVEL_PRIMARY, AL_INT_GROUP_D, AL_MASK_GROUP_D_INT); } static int al_eth_configure_int_mode(struct al_eth_adapter *adapter) { enum al_iofic_mode int_mode; uint32_t m2s_errors_disable = AL_M2S_MASK_INIT; uint32_t m2s_aborts_disable = AL_M2S_MASK_INIT; uint32_t s2m_errors_disable = AL_S2M_MASK_INIT; uint32_t s2m_aborts_disable = AL_S2M_MASK_INIT; /* single INTX mode */ if (adapter->msix_vecs == 0) int_mode = AL_IOFIC_MODE_LEGACY; else if (adapter->msix_vecs > 1) int_mode = AL_IOFIC_MODE_MSIX_PER_Q; else { device_printf(adapter->dev, "udma doesn't support single MSI-X mode yet.\n"); return (EIO); } if (adapter->board_type != ALPINE_INTEGRATED) { m2s_errors_disable |= AL_M2S_S2M_MASK_NOT_INT; m2s_errors_disable |= AL_M2S_S2M_MASK_NOT_INT; s2m_aborts_disable |= AL_M2S_S2M_MASK_NOT_INT; s2m_aborts_disable |= AL_M2S_S2M_MASK_NOT_INT; } if (al_udma_iofic_config((struct unit_regs __iomem *)adapter->udma_base, int_mode, m2s_errors_disable, m2s_aborts_disable, s2m_errors_disable, s2m_aborts_disable)) { device_printf(adapter->dev, "al_udma_unit_int_config failed!.\n"); return (EIO); } adapter->int_mode = int_mode; device_printf_dbg(adapter->dev, "using %s interrupt mode\n", int_mode == AL_IOFIC_MODE_LEGACY ? "INTx" : int_mode == AL_IOFIC_MODE_MSIX_PER_Q ? "MSI-X per Queue" : "Unknown"); /* set interrupt moderation resolution to 15us */ al_iofic_moder_res_config(&((struct unit_regs *)(adapter->udma_base))->gen.interrupt_regs.main_iofic, AL_INT_GROUP_B, 15); al_iofic_moder_res_config(&((struct unit_regs *)(adapter->udma_base))->gen.interrupt_regs.main_iofic, AL_INT_GROUP_C, 15); /* by default interrupt coalescing is disabled */ adapter->tx_usecs = 0; adapter->rx_usecs = 0; return (0); } /* * ethtool_rxfh_indir_default - get default value for RX flow hash indirection * @index: Index in RX flow hash indirection table * @n_rx_rings: Number of RX rings to use * * This function provides the default policy for RX flow hash indirection. */ static inline uint32_t ethtool_rxfh_indir_default(uint32_t index, uint32_t n_rx_rings) { return (index % n_rx_rings); } static void* al_eth_update_stats(struct al_eth_adapter *adapter) { struct al_eth_mac_stats *mac_stats = &adapter->mac_stats; if (adapter->up == 0) return (NULL); al_eth_mac_stats_get(&adapter->hal_adapter, mac_stats); return (NULL); } static uint64_t al_get_counter(if_t ifp, ift_counter cnt) { struct al_eth_adapter *adapter; struct al_eth_mac_stats *mac_stats; uint64_t rv; adapter = if_getsoftc(ifp); mac_stats = &adapter->mac_stats; switch (cnt) { case IFCOUNTER_IPACKETS: return (mac_stats->aFramesReceivedOK); /* including pause frames */ case IFCOUNTER_OPACKETS: return (mac_stats->aFramesTransmittedOK); case IFCOUNTER_IBYTES: return (mac_stats->aOctetsReceivedOK); case IFCOUNTER_OBYTES: return (mac_stats->aOctetsTransmittedOK); case IFCOUNTER_IMCASTS: return (mac_stats->ifInMulticastPkts); case IFCOUNTER_OMCASTS: return (mac_stats->ifOutMulticastPkts); case IFCOUNTER_COLLISIONS: return (0); case IFCOUNTER_IQDROPS: return (mac_stats->etherStatsDropEvents); case IFCOUNTER_IERRORS: rv = mac_stats->ifInErrors + mac_stats->etherStatsUndersizePkts + /* good but short */ mac_stats->etherStatsFragments + /* short and bad*/ mac_stats->etherStatsJabbers + /* with crc errors */ mac_stats->etherStatsOversizePkts + mac_stats->aFrameCheckSequenceErrors + mac_stats->aAlignmentErrors; return (rv); case IFCOUNTER_OERRORS: return (mac_stats->ifOutErrors); default: return (if_get_counter_default(ifp, cnt)); } } static u_int al_count_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { unsigned char *mac; mac = LLADDR(sdl); /* default mc address inside mac address */ if (mac[3] != 0 && mac[4] != 0 && mac[5] != 1) return (1); else return (0); } static u_int al_program_addr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { struct al_eth_adapter *adapter = arg; al_eth_mac_table_unicast_add(adapter, AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + 1 + cnt, 1); return (1); } /* * Unicast, Multicast and Promiscuous mode set * * The set_rx_mode entry point is called whenever the unicast or multicast * address lists or the network interface flags are updated. This routine is * responsible for configuring the hardware for proper unicast, multicast, * promiscuous mode, and all-multi behavior. */ static void al_eth_set_rx_mode(struct al_eth_adapter *adapter) { if_t ifp = adapter->netdev; int mc, uc; uint8_t i; /* XXXGL: why generic count won't work? */ mc = if_foreach_llmaddr(ifp, al_count_maddr, NULL); uc = if_lladdr_count(ifp); if ((if_getflags(ifp) & IFF_PROMISC) != 0) { al_eth_mac_table_promiscuous_set(adapter, true); } else { if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) { /* This interface is in all-multicasts mode (used by multicast routers). */ al_eth_mac_table_all_multicast_add(adapter, AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX, 1); } else { if (mc == 0) { al_eth_mac_table_entry_clear(adapter, AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX); } else { al_eth_mac_table_all_multicast_add(adapter, AL_ETH_MAC_TABLE_ALL_MULTICAST_IDX, 1); } } if (uc != 0) { i = AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + 1; if (uc > AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT) { /* * In this case there are more addresses then * entries in the mac table - set promiscuous */ al_eth_mac_table_promiscuous_set(adapter, true); return; } /* clear the last configuration */ while (i < (AL_ETH_MAC_TABLE_UNICAST_IDX_BASE + AL_ETH_MAC_TABLE_UNICAST_MAX_COUNT)) { al_eth_mac_table_entry_clear(adapter, i); i++; } /* set new addresses */ if_foreach_lladdr(ifp, al_program_addr, adapter); } al_eth_mac_table_promiscuous_set(adapter, false); } } static void al_eth_config_rx_fwd(struct al_eth_adapter *adapter) { struct al_eth_fwd_ctrl_table_entry entry; int i; /* let priority be equal to pbits */ for (i = 0; i < AL_ETH_FWD_PBITS_TABLE_NUM; i++) al_eth_fwd_pbits_table_set(&adapter->hal_adapter, i, i); /* map priority to queue index, queue id = priority/2 */ for (i = 0; i < AL_ETH_FWD_PRIO_TABLE_NUM; i++) al_eth_fwd_priority_table_set(&adapter->hal_adapter, i, i >> 1); entry.prio_sel = AL_ETH_CTRL_TABLE_PRIO_SEL_VAL_0; entry.queue_sel_1 = AL_ETH_CTRL_TABLE_QUEUE_SEL_1_THASH_TABLE; entry.queue_sel_2 = AL_ETH_CTRL_TABLE_QUEUE_SEL_2_NO_PRIO; entry.udma_sel = AL_ETH_CTRL_TABLE_UDMA_SEL_MAC_TABLE; - entry.filter = FALSE; + entry.filter = false; - al_eth_ctrl_table_def_set(&adapter->hal_adapter, FALSE, &entry); + al_eth_ctrl_table_def_set(&adapter->hal_adapter, AL_FALSE, &entry); /* * By default set the mac table to forward all unicast packets to our * MAC address and all broadcast. all the rest will be dropped. */ al_eth_mac_table_unicast_add(adapter, AL_ETH_MAC_TABLE_UNICAST_IDX_BASE, 1); al_eth_mac_table_broadcast_add(adapter, AL_ETH_MAC_TABLE_BROADCAST_IDX, 1); al_eth_mac_table_promiscuous_set(adapter, false); /* set toeplitz hash keys */ for (i = 0; i < sizeof(adapter->toeplitz_hash_key); i++) *((uint8_t*)adapter->toeplitz_hash_key + i) = (uint8_t)random(); for (i = 0; i < AL_ETH_RX_HASH_KEY_NUM; i++) al_eth_hash_key_set(&adapter->hal_adapter, i, htonl(adapter->toeplitz_hash_key[i])); for (i = 0; i < AL_ETH_RX_RSS_TABLE_SIZE; i++) { adapter->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, AL_ETH_NUM_QUEUES); al_eth_set_thash_table_entry(adapter, i, 0, adapter->rss_ind_tbl[i]); } al_eth_fsm_table_init(adapter); } static void al_eth_req_rx_buff_size(struct al_eth_adapter *adapter, int size) { /* * Determine the correct mbuf pool * for doing jumbo frames * Try from the smallest up to maximum supported */ adapter->rx_mbuf_sz = MCLBYTES; if (size > 2048) { if (adapter->max_rx_buff_alloc_size > 2048) adapter->rx_mbuf_sz = MJUMPAGESIZE; else return; } if (size > 4096) { if (adapter->max_rx_buff_alloc_size > 4096) adapter->rx_mbuf_sz = MJUM9BYTES; else return; } if (size > 9216) { if (adapter->max_rx_buff_alloc_size > 9216) adapter->rx_mbuf_sz = MJUM16BYTES; else return; } } static int al_eth_change_mtu(struct al_eth_adapter *adapter, int new_mtu) { int max_frame = new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN; al_eth_req_rx_buff_size(adapter, new_mtu); device_printf_dbg(adapter->dev, "set MTU to %d\n", new_mtu); al_eth_rx_pkt_limit_config(&adapter->hal_adapter, AL_ETH_MIN_FRAME_LEN, max_frame); al_eth_tso_mss_config(&adapter->hal_adapter, 0, new_mtu - 100); return (0); } static int al_eth_check_mtu(struct al_eth_adapter *adapter, int new_mtu) { int max_frame = new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN; if ((new_mtu < AL_ETH_MIN_FRAME_LEN) || (max_frame > AL_ETH_MAX_FRAME_LEN)) { return (EINVAL); } return (0); } static int al_eth_udma_queue_enable(struct al_eth_adapter *adapter, enum al_udma_type type, int qid) { int rc = 0; char *name = (type == UDMA_TX) ? "Tx" : "Rx"; struct al_udma_q_params *q_params; if (type == UDMA_TX) q_params = &adapter->tx_ring[qid].q_params; else q_params = &adapter->rx_ring[qid].q_params; rc = al_eth_queue_config(&adapter->hal_adapter, type, qid, q_params); if (rc < 0) { device_printf(adapter->dev, "config %s queue %u failed\n", name, qid); return (rc); } return (rc); } static int al_eth_udma_queues_enable_all(struct al_eth_adapter *adapter) { int i; for (i = 0; i < adapter->num_tx_queues; i++) al_eth_udma_queue_enable(adapter, UDMA_TX, i); for (i = 0; i < adapter->num_rx_queues; i++) al_eth_udma_queue_enable(adapter, UDMA_RX, i); return (0); } static void al_eth_up_complete(struct al_eth_adapter *adapter) { al_eth_configure_int_mode(adapter); al_eth_config_rx_fwd(adapter); al_eth_change_mtu(adapter, if_getmtu(adapter->netdev)); al_eth_udma_queues_enable_all(adapter); al_eth_refill_all_rx_bufs(adapter); al_eth_interrupts_unmask(adapter); /* enable forwarding interrupts from eth through pci end point */ if ((adapter->board_type == ALPINE_FPGA_NIC) || (adapter->board_type == ALPINE_NIC)) { al_eth_forward_int_config((uint32_t*)adapter->internal_pcie_base + AL_REG_OFFSET_FORWARD_INTR, AL_EN_FORWARD_INTR); } al_eth_flow_ctrl_enable(adapter); mtx_lock(&adapter->stats_mtx); callout_reset(&adapter->stats_callout, hz, al_tick_stats, (void*)adapter); mtx_unlock(&adapter->stats_mtx); al_eth_mac_start(&adapter->hal_adapter); } static int al_media_update(if_t ifp) { struct al_eth_adapter *adapter = if_getsoftc(ifp); if ((if_getflags(ifp) & IFF_UP) != 0) mii_mediachg(adapter->mii); return (0); } static void al_media_status(if_t ifp, struct ifmediareq *ifmr) { struct al_eth_adapter *sc = if_getsoftc(ifp); struct mii_data *mii; if (sc->mii == NULL) { ifmr->ifm_active = IFM_ETHER | IFM_NONE; ifmr->ifm_status = 0; return; } mii = sc->mii; mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } static void al_tick(void *arg) { struct al_eth_adapter *adapter = arg; mii_tick(adapter->mii); /* Schedule another timeout one second from now */ callout_schedule(&adapter->wd_callout, hz); } static void al_tick_stats(void *arg) { struct al_eth_adapter *adapter = arg; al_eth_update_stats(adapter); callout_schedule(&adapter->stats_callout, hz); } static int al_eth_up(struct al_eth_adapter *adapter) { if_t ifp = adapter->netdev; int rc; if (adapter->up) return (0); if ((adapter->flags & AL_ETH_FLAG_RESET_REQUESTED) != 0) { al_eth_function_reset(adapter); adapter->flags &= ~AL_ETH_FLAG_RESET_REQUESTED; } if_sethwassist(ifp, 0); if ((if_getcapenable(ifp) & IFCAP_TSO) != 0) if_sethwassistbits(ifp, CSUM_TSO, 0); if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) if_sethwassistbits(ifp, (CSUM_TCP | CSUM_UDP), 0); if ((if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6) != 0) if_sethwassistbits(ifp, (CSUM_TCP_IPV6 | CSUM_UDP_IPV6), 0); al_eth_serdes_init(adapter); rc = al_eth_hw_init(adapter); if (rc != 0) goto err_hw_init_open; rc = al_eth_setup_int_mode(adapter); if (rc != 0) { device_printf(adapter->dev, "%s failed at setup interrupt mode!\n", __func__); goto err_setup_int; } /* allocate transmit descriptors */ rc = al_eth_setup_all_tx_resources(adapter); if (rc != 0) goto err_setup_tx; /* allocate receive descriptors */ rc = al_eth_setup_all_rx_resources(adapter); if (rc != 0) goto err_setup_rx; rc = al_eth_request_irq(adapter); if (rc != 0) goto err_req_irq; al_eth_up_complete(adapter); adapter->up = true; if (adapter->mac_mode == AL_ETH_MAC_MODE_10GbE_Serial) if_link_state_change(adapter->netdev, LINK_STATE_UP); if (adapter->mac_mode == AL_ETH_MAC_MODE_RGMII) { mii_mediachg(adapter->mii); /* Schedule watchdog timeout */ mtx_lock(&adapter->wd_mtx); callout_reset(&adapter->wd_callout, hz, al_tick, adapter); mtx_unlock(&adapter->wd_mtx); mii_pollstat(adapter->mii); } return (rc); err_req_irq: al_eth_free_all_rx_resources(adapter); err_setup_rx: al_eth_free_all_tx_resources(adapter); err_setup_tx: al_eth_free_irq(adapter); err_setup_int: al_eth_hw_stop(adapter); err_hw_init_open: al_eth_function_reset(adapter); return (rc); } static int al_shutdown(device_t dev) { struct al_eth_adapter *adapter = device_get_softc(dev); al_eth_down(adapter); return (0); } static void al_eth_down(struct al_eth_adapter *adapter) { device_printf_dbg(adapter->dev, "al_eth_down: begin\n"); adapter->up = false; mtx_lock(&adapter->wd_mtx); callout_stop(&adapter->wd_callout); mtx_unlock(&adapter->wd_mtx); al_eth_disable_int_sync(adapter); mtx_lock(&adapter->stats_mtx); callout_stop(&adapter->stats_callout); mtx_unlock(&adapter->stats_mtx); al_eth_free_irq(adapter); al_eth_hw_stop(adapter); al_eth_free_all_tx_resources(adapter); al_eth_free_all_rx_resources(adapter); } static int al_ioctl(if_t ifp, u_long command, caddr_t data) { struct al_eth_adapter *adapter = if_getsoftc(ifp); struct ifreq *ifr = (struct ifreq *)data; int error = 0; switch (command) { case SIOCSIFMTU: { error = al_eth_check_mtu(adapter, ifr->ifr_mtu); if (error != 0) { device_printf(adapter->dev, "ioctl wrong mtu %u\n", if_getmtu(adapter->netdev)); break; } if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); if_setmtu(adapter->netdev, ifr->ifr_mtu); al_init(adapter); break; } case SIOCSIFFLAGS: if ((if_getflags(ifp) & IFF_UP) != 0) { if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { if (((if_getflags(ifp) ^ adapter->if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { device_printf_dbg(adapter->dev, "ioctl promisc/allmulti\n"); al_eth_set_rx_mode(adapter); } } else { error = al_eth_up(adapter); if (error == 0) if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); } } else { if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { al_eth_down(adapter); if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); } } adapter->if_flags = if_getflags(ifp); break; case SIOCADDMULTI: case SIOCDELMULTI: if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { device_printf_dbg(adapter->dev, "ioctl add/del multi before\n"); al_eth_set_rx_mode(adapter); #ifdef DEVICE_POLLING if ((if_getcapenable(ifp) & IFCAP_POLLING) == 0) #endif } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: if (adapter->mii != NULL) error = ifmedia_ioctl(ifp, ifr, &adapter->mii->mii_media, command); else error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); break; case SIOCSIFCAP: { int mask, reinit; reinit = 0; mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); #ifdef DEVICE_POLLING if ((mask & IFCAP_POLLING) != 0) { if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) { if (error != 0) return (error); if_setcapenablebit(ifp, IFCAP_POLLING, 0); } else { error = ether_poll_deregister(ifp); /* Enable interrupt even in error case */ if_setcapenablebit(ifp, 0, IFCAP_POLLING); } } #endif if ((mask & IFCAP_HWCSUM) != 0) { /* apply to both rx and tx */ if_togglecapenable(ifp, IFCAP_HWCSUM); reinit = 1; } if ((mask & IFCAP_HWCSUM_IPV6) != 0) { if_togglecapenable(ifp, IFCAP_HWCSUM_IPV6); reinit = 1; } if ((mask & IFCAP_TSO) != 0) { if_togglecapenable(ifp, IFCAP_TSO); reinit = 1; } if ((mask & IFCAP_LRO) != 0) { if_togglecapenable(ifp, IFCAP_LRO); } if ((mask & IFCAP_VLAN_HWTAGGING) != 0) { if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING); reinit = 1; } if ((mask & IFCAP_VLAN_HWFILTER) != 0) { if_togglecapenable(ifp, IFCAP_VLAN_HWFILTER); reinit = 1; } if ((mask & IFCAP_VLAN_HWTSO) != 0) { if_togglecapenable(ifp, IFCAP_VLAN_HWTSO); reinit = 1; } if ((reinit != 0) && ((if_getdrvflags(ifp) & IFF_DRV_RUNNING)) != 0) { al_init(adapter); } break; } default: error = ether_ioctl(ifp, command, data); break; } return (error); } static int al_is_device_supported(device_t dev) { uint16_t pci_vendor_id = pci_get_vendor(dev); uint16_t pci_device_id = pci_get_device(dev); return (pci_vendor_id == PCI_VENDOR_ID_ANNAPURNA_LABS && (pci_device_id == PCI_DEVICE_ID_AL_ETH || pci_device_id == PCI_DEVICE_ID_AL_ETH_ADVANCED || pci_device_id == PCI_DEVICE_ID_AL_ETH_NIC || pci_device_id == PCI_DEVICE_ID_AL_ETH_FPGA_NIC)); } /* Time in mSec to keep trying to read / write from MDIO in case of error */ #define MDIO_TIMEOUT_MSEC 100 #define MDIO_PAUSE_MSEC 10 static int al_miibus_readreg(device_t dev, int phy, int reg) { struct al_eth_adapter *adapter = device_get_softc(dev); uint16_t value = 0; int rc; int timeout = MDIO_TIMEOUT_MSEC; while (timeout > 0) { rc = al_eth_mdio_read(&adapter->hal_adapter, adapter->phy_addr, -1, reg, &value); if (rc == 0) return (value); device_printf_dbg(adapter->dev, "mdio read failed. try again in 10 msec\n"); timeout -= MDIO_PAUSE_MSEC; pause("readred pause", MDIO_PAUSE_MSEC); } if (rc != 0) device_printf(adapter->dev, "MDIO read failed on timeout\n"); return (value); } static int al_miibus_writereg(device_t dev, int phy, int reg, int value) { struct al_eth_adapter *adapter = device_get_softc(dev); int rc; int timeout = MDIO_TIMEOUT_MSEC; while (timeout > 0) { rc = al_eth_mdio_write(&adapter->hal_adapter, adapter->phy_addr, -1, reg, value); if (rc == 0) return (0); device_printf(adapter->dev, "mdio write failed. try again in 10 msec\n"); timeout -= MDIO_PAUSE_MSEC; pause("miibus writereg", MDIO_PAUSE_MSEC); } if (rc != 0) device_printf(adapter->dev, "MDIO write failed on timeout\n"); return (rc); } static void al_miibus_statchg(device_t dev) { struct al_eth_adapter *adapter = device_get_softc(dev); device_printf_dbg(adapter->dev, "al_miibus_statchg: state has changed!\n"); device_printf_dbg(adapter->dev, "al_miibus_statchg: active = 0x%x status = 0x%x\n", adapter->mii->mii_media_active, adapter->mii->mii_media_status); if (adapter->up == 0) return; if ((adapter->mii->mii_media_status & IFM_AVALID) != 0) { if (adapter->mii->mii_media_status & IFM_ACTIVE) { device_printf(adapter->dev, "link is UP\n"); if_link_state_change(adapter->netdev, LINK_STATE_UP); } else { device_printf(adapter->dev, "link is DOWN\n"); if_link_state_change(adapter->netdev, LINK_STATE_DOWN); } } } static void al_miibus_linkchg(device_t dev) { struct al_eth_adapter *adapter = device_get_softc(dev); uint8_t duplex = 0; uint8_t speed = 0; if (adapter->mii == NULL) return; if ((if_getflags(adapter->netdev) & IFF_UP) == 0) return; /* Ignore link changes when link is not ready */ if ((adapter->mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) != (IFM_AVALID | IFM_ACTIVE)) { return; } if ((adapter->mii->mii_media_active & IFM_FDX) != 0) duplex = 1; speed = IFM_SUBTYPE(adapter->mii->mii_media_active); if (speed == IFM_10_T) { al_eth_mac_link_config(&adapter->hal_adapter, 0, 1, AL_10BASE_T_SPEED, duplex); return; } if (speed == IFM_100_TX) { al_eth_mac_link_config(&adapter->hal_adapter, 0, 1, AL_100BASE_TX_SPEED, duplex); return; } if (speed == IFM_1000_T) { al_eth_mac_link_config(&adapter->hal_adapter, 0, 1, AL_1000BASE_T_SPEED, duplex); return; } device_printf(adapter->dev, "ERROR: unknown MII media active 0x%08x\n", adapter->mii->mii_media_active); } diff --git a/sys/dev/al_eth/al_eth.h b/sys/dev/al_eth/al_eth.h index 1f8d4528b545..9579aca55685 100644 --- a/sys/dev/al_eth/al_eth.h +++ b/sys/dev/al_eth/al_eth.h @@ -1,366 +1,366 @@ /*- * Copyright (c) 2015,2016 Annapurna Labs Ltd. and affiliates * All rights reserved. * * Developed by Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef __AL_ETH_H__ #define __AL_ETH_H__ #include "al_init_eth_lm.h" #include "al_hal_eth.h" #include "al_hal_udma_iofic.h" #include "al_hal_udma_debug.h" #include "al_serdes.h" enum board_t { ALPINE_INTEGRATED = 0, ALPINE_NIC = 1, ALPINE_FPGA_NIC = 2, }; #define AL_ETH_MAX_HW_QUEUES 4 #define AL_ETH_NUM_QUEUES 4 #define AL_ETH_MAX_MSIX_VEC (1 + 2 * AL_ETH_MAX_HW_QUEUES) #define AL_ETH_DEFAULT_TX_SW_DESCS (512) #define AL_ETH_DEFAULT_TX_HW_DESCS (512) #define AL_ETH_DEFAULT_RX_DESCS (512) #if ((AL_ETH_DEFAULT_TX_SW_DESCS / 4) < (AL_ETH_PKT_MAX_BUFS + 2)) #define AL_ETH_TX_WAKEUP_THRESH (AL_ETH_DEFAULT_TX_SW_DESCS / 4) #else #define AL_ETH_TX_WAKEUP_THRESH (AL_ETH_PKT_MAX_BUFS + 2) #endif #define NET_IP_ALIGN 2 #define AL_ETH_DEFAULT_SMALL_PACKET_LEN (128 - NET_IP_ALIGN) #define AL_ETH_HEADER_COPY_SIZE (128 - NET_IP_ALIGN) #define AL_ETH_DEFAULT_MAX_RX_BUFF_ALLOC_SIZE 9216 /* * Minimum the buffer size to 600 to avoid situation the mtu will be changed * from too little buffer to very big one and then the number of buffer per * packet could reach the maximum AL_ETH_PKT_MAX_BUFS */ #define AL_ETH_DEFAULT_MIN_RX_BUFF_ALLOC_SIZE 600 #define AL_ETH_DEFAULT_FORCE_1000_BASEX FALSE #define AL_ETH_DEFAULT_LINK_POLL_INTERVAL 100 #define AL_ETH_FIRST_LINK_POLL_INTERVAL 1 #define AL_ETH_NAME_MAX_LEN 20 #define AL_ETH_IRQNAME_SIZE 40 #define AL_ETH_DEFAULT_MDIO_FREQ_KHZ 2500 #define AL_ETH_MDIO_FREQ_1000_KHZ 1000 struct al_eth_irq { driver_filter_t *handler; void *data; unsigned int vector; uint8_t requested; char name[AL_ETH_IRQNAME_SIZE]; struct resource *res; void *cookie; }; struct al_eth_tx_buffer { struct mbuf *m; struct al_eth_pkt hal_pkt; bus_dmamap_t dma_map; unsigned int tx_descs; }; struct al_eth_rx_buffer { struct mbuf *m; unsigned int data_size; bus_dmamap_t dma_map; struct al_buf al_buf; }; struct al_eth_ring { device_t dev; struct al_eth_adapter *adapter; /* Used to get rx packets from hal */ struct al_eth_pkt hal_pkt; /* Udma queue handler */ struct al_udma_q *dma_q; uint32_t ring_id; uint16_t next_to_use; uint16_t next_to_clean; /* The offset of the interrupt unmask register */ uint32_t *unmask_reg_offset; /* * The value to write to the above register to * unmask the interrupt of this ring */ uint32_t unmask_val; struct al_eth_meta_data hal_meta; /* Contex of tx packet */ struct al_eth_tx_buffer *tx_buffer_info; /* Contex of rx packet */ struct al_eth_rx_buffer *rx_buffer_info; /* Number of tx/rx_buffer_info's entries */ int sw_count; /* Number of hw descriptors */ int hw_count; /* Size (in bytes) of hw descriptors */ size_t descs_size; /* Size (in bytes) of hw completion descriptors, used for rx */ size_t cdescs_size; struct ifnet *netdev; struct al_udma_q_params q_params; struct buf_ring *br; struct mtx br_mtx; struct task enqueue_task; struct taskqueue *enqueue_tq; volatile uint32_t enqueue_is_running; struct task cmpl_task; struct taskqueue *cmpl_tq; volatile uint32_t cmpl_is_running; uint32_t lro_enabled; struct lro_ctrl lro; bus_dma_tag_t dma_buf_tag; volatile uint32_t stall; }; #define AL_ETH_TX_RING_IDX_NEXT(tx_ring, idx) (((idx) + 1) & (AL_ETH_DEFAULT_TX_SW_DESCS - 1)) #define AL_ETH_RX_RING_IDX_NEXT(rx_ring, idx) (((idx) + 1) & (AL_ETH_DEFAULT_RX_DESCS - 1)) #define AL_ETH_RX_RING_IDX_ADD(rx_ring, idx, n) (((idx) + (n)) & (AL_ETH_DEFAULT_RX_DESCS - 1)) /* flow control configuration */ #define AL_ETH_FLOW_CTRL_RX_FIFO_TH_HIGH 0x160 #define AL_ETH_FLOW_CTRL_RX_FIFO_TH_LOW 0x90 #define AL_ETH_FLOW_CTRL_QUANTA 0xffff #define AL_ETH_FLOW_CTRL_QUANTA_TH 0x8000 #define AL_ETH_FLOW_CTRL_AUTONEG 1 #define AL_ETH_FLOW_CTRL_RX_PAUSE 2 #define AL_ETH_FLOW_CTRL_TX_PAUSE 4 /* link configuration for 1G port */ struct al_eth_link_config { int old_link; /* Describes what we actually have. */ int active_duplex; int active_speed; /* current flow control status */ uint8_t flow_ctrl_active; /* supported configuration (can be changed from ethtool) */ uint8_t flow_ctrl_supported; /* the following are not relevant to RGMII */ - boolean_t force_1000_base_x; - boolean_t autoneg; + bool force_1000_base_x; + bool autoneg; }; /* SFP detection event */ enum al_eth_sfp_detect_evt { /* No change (no connect, disconnect, or new SFP module */ AL_ETH_SFP_DETECT_EVT_NO_CHANGE, /* SFP module connected */ AL_ETH_SFP_DETECT_EVT_CONNECTED, /* SFP module disconnected */ AL_ETH_SFP_DETECT_EVT_DISCONNECTED, /* SFP module replaced */ AL_ETH_SFP_DETECT_EVT_CHANGED, }; /* SFP detection status */ struct al_eth_sfp_detect_stat { /* Status is valid (i.e. rest of fields are valid) */ - boolean_t valid; - boolean_t connected; + bool valid; + bool connected; uint8_t sfp_10g; uint8_t sfp_1g; uint8_t sfp_cable_tech; - boolean_t lt_en; - boolean_t an_en; + bool lt_en; + bool an_en; enum al_eth_mac_mode mac_mode; }; struct al_eth_retimer_params { - boolean_t exist; + bool exist; uint8_t bus_id; uint8_t i2c_addr; enum al_eth_retimer_channel channel; }; struct msix_entry { int entry; int vector; }; /* board specific private data structure */ struct al_eth_adapter { enum board_t board_type; device_t miibus; struct mii_data *mii; uint16_t dev_id; uint8_t rev_id; device_t dev; struct ifnet *netdev; struct ifmedia media; struct resource *udma_res; struct resource *mac_res; struct resource *ec_res; int if_flags; struct callout wd_callout; struct mtx wd_mtx; struct callout stats_callout; struct mtx stats_mtx; /* this is for intx mode */ void *irq_cookie; struct resource *irq_res; /* * Some features need tri-state capability, * thus the additional *_CAPABLE flags. */ uint32_t flags; #define AL_ETH_FLAG_MSIX_CAPABLE (uint32_t)(1 << 1) #define AL_ETH_FLAG_MSIX_ENABLED (uint32_t)(1 << 2) #define AL_ETH_FLAG_IN_NETPOLL (uint32_t)(1 << 3) #define AL_ETH_FLAG_MQ_CAPABLE (uint32_t)(1 << 4) #define AL_ETH_FLAG_SRIOV_CAPABLE (uint32_t)(1 << 5) #define AL_ETH_FLAG_SRIOV_ENABLED (uint32_t)(1 << 6) #define AL_ETH_FLAG_RESET_REQUESTED (uint32_t)(1 << 7) struct al_hal_eth_adapter hal_adapter; /* * Rx packets that shorter that this len will be copied to the mbuf */ unsigned int small_copy_len; /* Maximum size for rx buffer */ unsigned int max_rx_buff_alloc_size; uint32_t rx_mbuf_sz; /* Tx fast path data */ int num_tx_queues; /* Rx fast path data */ int num_rx_queues; /* TX */ struct al_eth_ring tx_ring[AL_ETH_NUM_QUEUES]; /* RX */ struct al_eth_ring rx_ring[AL_ETH_NUM_QUEUES]; enum al_iofic_mode int_mode; #define AL_ETH_MGMT_IRQ_IDX 0 #define AL_ETH_RXQ_IRQ_IDX(adapter, q) (1 + (q)) #define AL_ETH_TXQ_IRQ_IDX(adapter, q) (1 + (adapter)->num_rx_queues + (q)) struct al_eth_irq irq_tbl[AL_ETH_MAX_MSIX_VEC]; struct msix_entry *msix_entries; int msix_vecs; int irq_vecs; unsigned int tx_usecs, rx_usecs; /* interrupt coalescing */ unsigned int tx_ring_count; unsigned int tx_descs_count; unsigned int rx_ring_count; unsigned int rx_descs_count; /* RSS */ uint32_t toeplitz_hash_key[AL_ETH_RX_HASH_KEY_NUM]; #define AL_ETH_RX_RSS_TABLE_SIZE AL_ETH_RX_THASH_TABLE_SIZE uint8_t rss_ind_tbl[AL_ETH_RX_RSS_TABLE_SIZE]; uint32_t msg_enable; struct al_eth_mac_stats mac_stats; enum al_eth_mac_mode mac_mode; - boolean_t mac_mode_set; /* Relevant only when 'auto_speed' is set */ + bool mac_mode_set; /* Relevant only when 'auto_speed' is set */ uint8_t mac_addr[ETHER_ADDR_LEN]; /* mdio and phy*/ - boolean_t phy_exist; + bool phy_exist; struct mii_bus *mdio_bus; struct phy_device *phydev; uint8_t phy_addr; struct al_eth_link_config link_config; /* HAL layer data */ int id_number; char name[AL_ETH_NAME_MAX_LEN]; void *internal_pcie_base; /* use for ALPINE_NIC devices */ void *udma_base; void *ec_base; void *mac_base; struct al_eth_flow_control_params flow_ctrl_params; struct al_eth_adapter_params eth_hal_params; struct task link_status_task; uint32_t link_poll_interval; /* task interval in mSec */ - boolean_t serdes_init; + bool serdes_init; struct al_serdes_grp_obj serdes_obj; uint8_t serdes_grp; uint8_t serdes_lane; - boolean_t an_en; /* run kr auto-negotiation */ - boolean_t lt_en; /* run kr link-training */ + bool an_en; /* run kr auto-negotiation */ + bool lt_en; /* run kr link-training */ - boolean_t sfp_detection_needed; /* true if need to run sfp detection */ - boolean_t auto_speed; /* true if allowed to change SerDes speed configuration */ + bool sfp_detection_needed; /* true if need to run sfp detection */ + bool auto_speed; /* true if allowed to change SerDes speed configuration */ uint8_t i2c_adapter_id; /* identifier for the i2c adapter to use to access SFP+ module */ enum al_eth_ref_clk_freq ref_clk_freq; /* reference clock frequency */ unsigned int mdio_freq; /* MDIO frequency [Khz] */ - boolean_t up; + bool up; - boolean_t last_link; - boolean_t last_establish_failed; + bool last_link; + bool last_establish_failed; struct al_eth_lm_context lm_context; - boolean_t use_lm; + bool use_lm; - boolean_t dont_override_serdes; /* avoid overriding serdes parameters + bool dont_override_serdes; /* avoid overriding serdes parameters to preset static values */ struct mtx serdes_config_lock; struct mtx if_rx_lock; uint32_t wol; struct al_eth_retimer_params retimer; bool phy_fixup_needed; enum al_eth_lm_max_speed max_speed; }; #endif /* !(AL_ETH_H) */ diff --git a/sys/dev/al_eth/al_init_eth_kr.c b/sys/dev/al_eth/al_init_eth_kr.c index fba6b6154a1f..db055adda8c0 100644 --- a/sys/dev/al_eth/al_init_eth_kr.c +++ b/sys/dev/al_eth/al_init_eth_kr.c @@ -1,839 +1,839 @@ /*- * Copyright (c) 2015,2016 Annapurna Labs Ltd. and affiliates * All rights reserved. * * Developed by Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "al_init_eth_kr.h" #include "al_serdes.h" /** * Ethernet * @{ * @file al_init_eth_kr.c * * @brief auto-negotiation and link training algorithms and state machines * * The link training algorithm implemented in this file going over the * coefficients and looking for the best eye measurement possible for every one * of them. it's using state machine to move between the different states. * the state machine has 3 parts: * - preparation - waiting till the link partner (lp) will be ready and * change his state to preset. * - measurement (per coefficient) - issue decrement for the coefficient * under control till the eye measurement not increasing * and remains in the optimum. * - completion - indicate the receiver is ready and wait for the lp to * finish his work. */ /* TODO: fix with more reasonable numbers */ /* timeout in mSec before auto-negotiation will be terminated */ #define AL_ETH_KR_AN_TIMEOUT (500) #define AL_ETH_KR_EYE_MEASURE_TIMEOUT (100) /* timeout in uSec before the process will be terminated */ #define AL_ETH_KR_FRAME_LOCK_TIMEOUT (500 * 1000) #define AL_ETH_KR_LT_DONE_TIMEOUT (500 * 1000) /* number of times the receiver and transmitter tasks will be called before the * algorithm will be terminated */ #define AL_ETH_KR_LT_MAX_ROUNDS (50000) /* mac algorithm state machine */ enum al_eth_kr_mac_lt_state { TX_INIT = 0, /* start of all */ WAIT_BEGIN, /* wait for initial training lock */ DO_PRESET, /* issue PRESET to link partner */ DO_HOLD, /* issue HOLD to link partner */ /* preparation is done, start testing the coefficient. */ QMEASURE, /* EyeQ measurement. */ QCHECK, /* Check if measurement shows best value. */ DO_NEXT_TRY, /* issue DEC command to coeff for next measurement. */ END_STEPS, /* perform last steps to go back to optimum. */ END_STEPS_HOLD, /* perform last steps HOLD command. */ COEFF_DONE, /* done with the current coefficient updates. * Check if another should be done. */ /* end of training to all coefficients */ SET_READY, /* indicate local receiver ready */ TX_DONE /* transmit process completed, training can end. */ }; static const char * const al_eth_kr_mac_sm_name[] = { "TX_INIT", "WAIT_BEGIN", "DO_PRESET", "DO_HOLD", "QMEASURE", "QCHECK", "DO_NEXT_TRY", "END_STEPS", "END_STEPS_HOLD", "COEFF_DONE", "SET_READY", "TX_DONE" }; /* Constants used for the measurement. */ enum al_eth_kr_coef { AL_ETH_KR_COEF_C_MINUS, AL_ETH_KR_COEF_C_ZERO, AL_ETH_KR_COEF_C_PLUS, }; /* * test coefficients from COEFF_TO_MANIPULATE to COEFF_TO_MANIPULATE_LAST. */ #define COEFF_TO_MANIPULATE AL_ETH_KR_COEF_C_MINUS #define COEFF_TO_MANIPULATE_LAST AL_ETH_KR_COEF_C_MINUS #define QARRAY_SIZE 3 /**< how many entries we want in our history array. */ struct al_eth_kr_data { struct al_hal_eth_adapter *adapter; struct al_serdes_grp_obj *serdes_obj; enum al_serdes_lane lane; /* Receiver side data */ struct al_eth_kr_status_report_data status_report; /* report to response */ struct al_eth_kr_coef_up_data last_lpcoeff; /* last coeff received */ /* Transmitter side data */ enum al_eth_kr_mac_lt_state algo_state; /* Statemachine. */ unsigned int qarray[QARRAY_SIZE]; /* EyeQ measurements history */ /* How many entries in the array are valid for compares yet. */ unsigned int qarray_cnt; enum al_eth_kr_coef curr_coeff; /* * Status of coefficient during the last * DEC/INC command (before issuing HOLD again). */ unsigned int coeff_status_step; unsigned int end_steps_cnt; /* Number of end steps needed */ }; static int al_eth_kr_an_run(struct al_eth_kr_data *kr_data, struct al_eth_an_adv *an_adv, struct al_eth_an_adv *an_partner_adv) { int rc; - boolean_t page_received = FALSE; - boolean_t an_completed = FALSE; - boolean_t error = FALSE; + al_bool page_received = AL_FALSE; + al_bool an_completed = AL_FALSE; + al_bool error = AL_FALSE; int timeout = AL_ETH_KR_AN_TIMEOUT; rc = al_eth_kr_an_init(kr_data->adapter, an_adv); if (rc != 0) { al_err("%s %s autonegotiation init failed\n", kr_data->adapter->name, __func__); return (rc); } rc = al_eth_kr_an_start(kr_data->adapter, AL_ETH_AN__LT_LANE_0, - FALSE, TRUE); + AL_FALSE, AL_TRUE); if (rc != 0) { al_err("%s %s autonegotiation enable failed\n", kr_data->adapter->name, __func__); return (rc); } do { DELAY(10000); timeout -= 10; if (timeout <= 0) { al_info("%s %s autonegotiation failed on timeout\n", kr_data->adapter->name, __func__); return (ETIMEDOUT); } al_eth_kr_an_status_check(kr_data->adapter, &page_received, &an_completed, &error); - } while (page_received == FALSE); + } while (page_received == AL_FALSE); if (error != 0) { al_info("%s %s autonegotiation failed (status error)\n", kr_data->adapter->name, __func__); return (EIO); } al_eth_kr_an_read_adv(kr_data->adapter, an_partner_adv); al_dbg("%s %s autonegotiation completed. error = %d\n", kr_data->adapter->name, __func__, error); return (0); } /***************************** receiver side *********************************/ static enum al_eth_kr_cl72_cstate al_eth_lt_coeff_set(struct al_eth_kr_data *kr_data, enum al_serdes_tx_deemph_param param, uint32_t op) { enum al_eth_kr_cl72_cstate status = 0; switch (op) { case AL_PHY_KR_COEF_UP_HOLD: /* no need to update the serdes - return not updated*/ status = C72_CSTATE_NOT_UPDATED; break; case AL_PHY_KR_COEF_UP_INC: status = C72_CSTATE_UPDATED; if (kr_data->serdes_obj->tx_deemph_inc( kr_data->serdes_obj, kr_data->lane, param) == 0) status = C72_CSTATE_MAX; break; case AL_PHY_KR_COEF_UP_DEC: status = C72_CSTATE_UPDATED; if (kr_data->serdes_obj->tx_deemph_dec( kr_data->serdes_obj, kr_data->lane, param) == 0) status = C72_CSTATE_MIN; break; default: /* 3=reserved */ break; } return (status); } /* * Inspect the received coefficient update request and update all coefficients * in the serdes accordingly. */ static void al_eth_coeff_req_handle(struct al_eth_kr_data *kr_data, struct al_eth_kr_coef_up_data *lpcoeff) { struct al_eth_kr_status_report_data *report = &kr_data->status_report; /* First check for Init and Preset commands. */ if ((lpcoeff->preset != 0) || (lpcoeff->initialize) != 0) { kr_data->serdes_obj->tx_deemph_preset( kr_data->serdes_obj, kr_data->lane); /* * in case of preset c(0) should be set to maximum and both c(1) * and c(-1) should be updated */ report->c_minus = C72_CSTATE_UPDATED; report->c_plus = C72_CSTATE_UPDATED; report->c_zero = C72_CSTATE_MAX; return; } /* * in case preset and initialize are false need to perform per * coefficient action. */ report->c_minus = al_eth_lt_coeff_set(kr_data, AL_SERDES_TX_DEEMP_C_MINUS, lpcoeff->c_minus); report->c_zero = al_eth_lt_coeff_set(kr_data, AL_SERDES_TX_DEEMP_C_ZERO, lpcoeff->c_zero); report->c_plus = al_eth_lt_coeff_set(kr_data, AL_SERDES_TX_DEEMP_C_PLUS, lpcoeff->c_plus); al_dbg("%s: c(0) = 0x%x c(-1) = 0x%x c(1) = 0x%x\n", __func__, report->c_zero, report->c_plus, report->c_minus); } static void al_eth_kr_lt_receiver_task_init(struct al_eth_kr_data *kr_data) { al_memset(&kr_data->last_lpcoeff, 0, sizeof(struct al_eth_kr_coef_up_data)); al_memset(&kr_data->status_report, 0, sizeof(struct al_eth_kr_status_report_data)); } -static boolean_t +static bool al_eth_lp_coeff_up_change(struct al_eth_kr_data *kr_data, struct al_eth_kr_coef_up_data *lpcoeff) { struct al_eth_kr_coef_up_data *last_lpcoeff = &kr_data->last_lpcoeff; if (al_memcmp(last_lpcoeff, lpcoeff, sizeof(struct al_eth_kr_coef_up_data)) == 0) { - return (FALSE); + return (false); } al_memcpy(last_lpcoeff, lpcoeff, sizeof(struct al_eth_kr_coef_up_data)); - return (TRUE); + return (true); } /* * Run the receiver task for one cycle. * The receiver task continuously inspects the received coefficient update * requests and acts upon. * * @return <0 if error occur */ static int al_eth_kr_lt_receiver_task_run(struct al_eth_kr_data *kr_data) { struct al_eth_kr_coef_up_data new_lpcoeff; /* * First inspect status of the link. It may have dropped frame lock as * the remote did some reconfiguration of its serdes. * Then we simply have nothing to do and return immediately as caller * will call us continuously until lock comes back. */ if (al_eth_kr_receiver_frame_lock_get(kr_data->adapter, AL_ETH_AN__LT_LANE_0) != 0) { return (0); } /* check if a new update command was received */ al_eth_lp_coeff_up_get(kr_data->adapter, AL_ETH_AN__LT_LANE_0, &new_lpcoeff); if (al_eth_lp_coeff_up_change(kr_data, &new_lpcoeff) != 0) { /* got some new coefficient update request. */ al_eth_coeff_req_handle(kr_data, &new_lpcoeff); } return (0); } /******************************** transmitter side ***************************/ static int al_eth_kr_lt_transmitter_task_init(struct al_eth_kr_data *kr_data) { int i; int rc; unsigned int temp_val; for (i = 0; i < QARRAY_SIZE; i++) kr_data->qarray[i] = 0; kr_data->qarray_cnt = 0; kr_data->algo_state = TX_INIT; kr_data->curr_coeff = COEFF_TO_MANIPULATE; /* first coeff to test. */ kr_data->coeff_status_step = C72_CSTATE_NOT_UPDATED; kr_data->end_steps_cnt = QARRAY_SIZE-1; /* go back to first entry */ /* * Perform measure eye here to run the rx equalizer * for the first time to get init values */ rc = kr_data->serdes_obj->eye_measure_run( kr_data->serdes_obj, kr_data->lane, AL_ETH_KR_EYE_MEASURE_TIMEOUT, &temp_val); if (rc != 0) { al_warn("%s: Failed to run Rx equalizer (rc = 0x%x)\n", __func__, rc); return (rc); } return (0); } -static boolean_t +static bool al_eth_kr_lt_all_not_updated(struct al_eth_kr_status_report_data *report) { if ((report->c_zero == C72_CSTATE_NOT_UPDATED) && (report->c_minus == C72_CSTATE_NOT_UPDATED) && (report->c_plus == C72_CSTATE_NOT_UPDATED)) { - return (TRUE); + return (true); } - return (FALSE); + return (false); } static void al_eth_kr_lt_coef_set(struct al_eth_kr_coef_up_data *ldcoeff, enum al_eth_kr_coef coef, enum al_eth_kr_cl72_coef_op op) { switch (coef) { case AL_ETH_KR_COEF_C_MINUS: ldcoeff->c_minus = op; break; case AL_ETH_KR_COEF_C_PLUS: ldcoeff->c_plus = op; break; case AL_ETH_KR_COEF_C_ZERO: ldcoeff->c_zero = op; break; } } static enum al_eth_kr_cl72_cstate al_eth_kr_lt_coef_report_get(struct al_eth_kr_status_report_data *report, enum al_eth_kr_coef coef) { switch (coef) { case AL_ETH_KR_COEF_C_MINUS: return (report->c_minus); case AL_ETH_KR_COEF_C_PLUS: return (report->c_plus); case AL_ETH_KR_COEF_C_ZERO: return (report->c_zero); } return (0); } /* * Run the transmitter_task for one cycle. * * @return <0 if error occurs */ static int al_eth_kr_lt_transmitter_task_run(struct al_eth_kr_data *kr_data) { struct al_eth_kr_status_report_data report; unsigned int coeff_status_cur; struct al_eth_kr_coef_up_data ldcoeff = { 0, 0, 0, 0, 0 }; unsigned int val; int i; enum al_eth_kr_mac_lt_state nextstate; int rc = 0; /* * do nothing if currently there is no frame lock (which may happen * when remote updates its analogs). */ if (al_eth_kr_receiver_frame_lock_get(kr_data->adapter, AL_ETH_AN__LT_LANE_0) == 0) { return (0); } al_eth_lp_status_report_get(kr_data->adapter, AL_ETH_AN__LT_LANE_0, &report); /* extract curr status of the coefficient in use */ coeff_status_cur = al_eth_kr_lt_coef_report_get(&report, kr_data->curr_coeff); nextstate = kr_data->algo_state; /* default we stay in curr state; */ switch (kr_data->algo_state) { case TX_INIT: /* waiting for start */ if (al_eth_kr_startup_proto_prog_get(kr_data->adapter, AL_ETH_AN__LT_LANE_0) != 0) { /* training is on and frame lock */ nextstate = WAIT_BEGIN; } break; case WAIT_BEGIN: kr_data->qarray_cnt = 0; kr_data->curr_coeff = COEFF_TO_MANIPULATE; kr_data->coeff_status_step = C72_CSTATE_NOT_UPDATED; coeff_status_cur = C72_CSTATE_NOT_UPDATED; kr_data->end_steps_cnt = QARRAY_SIZE-1; /* Wait for not_updated for all coefficients from remote */ if (al_eth_kr_lt_all_not_updated(&report) != 0) { - ldcoeff.preset = TRUE; + ldcoeff.preset = AL_TRUE; nextstate = DO_PRESET; } break; case DO_PRESET: /* * Send PRESET and wait for updated for all * coefficients from remote */ if (al_eth_kr_lt_all_not_updated(&report) == 0) nextstate = DO_HOLD; else /* as long as the lp didn't response to the preset * we should continue sending it */ - ldcoeff.preset = TRUE; + ldcoeff.preset = AL_TRUE; break; case DO_HOLD: /* * clear the PRESET, issue HOLD command and wait for * hold handshake */ if (al_eth_kr_lt_all_not_updated(&report) != 0) nextstate = QMEASURE; break; case QMEASURE: /* makes a measurement and fills the new value into the array */ rc = kr_data->serdes_obj->eye_measure_run( kr_data->serdes_obj, kr_data->lane, AL_ETH_KR_EYE_MEASURE_TIMEOUT, &val); if (rc != 0) { al_warn("%s: Rx eye measurement failed\n", __func__); return (rc); } al_dbg("%s: Rx Measure eye returned 0x%x\n", __func__, val); /* put the new value into the array at the top. */ for (i = 0; i < QARRAY_SIZE-1; i++) kr_data->qarray[i] = kr_data->qarray[i+1]; kr_data->qarray[QARRAY_SIZE-1] = val; if (kr_data->qarray_cnt < QARRAY_SIZE) kr_data->qarray_cnt++; nextstate = QCHECK; break; case QCHECK: /* check if we reached the best link quality yet. */ if (kr_data->qarray_cnt < QARRAY_SIZE) { /* keep going until at least the history is * filled. check that we can keep going or if * coefficient has already reached minimum. */ if (kr_data->coeff_status_step == C72_CSTATE_MIN) nextstate = COEFF_DONE; else { /* * request a DECREMENT of the * coefficient under control */ al_eth_kr_lt_coef_set(&ldcoeff, kr_data->curr_coeff, AL_PHY_KR_COEF_UP_DEC); nextstate = DO_NEXT_TRY; } } else { /* * check if current value and last both are worse than * the 2nd last. This we take as an ending condition * assuming the minimum was reached two tries before * so we will now go back to that point. */ if ((kr_data->qarray[0] < kr_data->qarray[1]) && (kr_data->qarray[0] < kr_data->qarray[2])) { /* * request a INCREMENT of the * coefficient under control */ al_eth_kr_lt_coef_set(&ldcoeff, kr_data->curr_coeff, AL_PHY_KR_COEF_UP_INC); /* start going back to the maximum */ nextstate = END_STEPS; if (kr_data->end_steps_cnt > 0) kr_data->end_steps_cnt--; } else { if (kr_data->coeff_status_step == C72_CSTATE_MIN) { nextstate = COEFF_DONE; } else { /* * request a DECREMENT of the * coefficient under control */ al_eth_kr_lt_coef_set(&ldcoeff, kr_data->curr_coeff, AL_PHY_KR_COEF_UP_DEC); nextstate = DO_NEXT_TRY; } } } break; case DO_NEXT_TRY: /* * save the status when we issue the DEC step to the remote, * before the HOLD is done again. */ kr_data->coeff_status_step = coeff_status_cur; if (coeff_status_cur != C72_CSTATE_NOT_UPDATED) nextstate = DO_HOLD; /* go to next measurement round */ else al_eth_kr_lt_coef_set(&ldcoeff, kr_data->curr_coeff, AL_PHY_KR_COEF_UP_DEC); break; /* * Coefficient iteration completed, go back to the optimum step * In this algorithm we assume 2 before curr was best hence need to do * two INC runs. */ case END_STEPS: if (coeff_status_cur != C72_CSTATE_NOT_UPDATED) nextstate = END_STEPS_HOLD; else al_eth_kr_lt_coef_set(&ldcoeff, kr_data->curr_coeff, AL_PHY_KR_COEF_UP_INC); break; case END_STEPS_HOLD: if (coeff_status_cur == C72_CSTATE_NOT_UPDATED) { if (kr_data->end_steps_cnt != 0) { /* * request a INCREMENT of the * coefficient under control */ al_eth_kr_lt_coef_set(&ldcoeff, kr_data->curr_coeff, AL_PHY_KR_COEF_UP_INC); /* go 2nd time - dec the end step count */ nextstate = END_STEPS; if (kr_data->end_steps_cnt > 0) kr_data->end_steps_cnt--; } else { nextstate = COEFF_DONE; } } break; case COEFF_DONE: /* * now this coefficient is done. * We can now either choose to finish here, * or keep going with another coefficient. */ if ((int)kr_data->curr_coeff < COEFF_TO_MANIPULATE_LAST) { int i; for (i = 0; i < QARRAY_SIZE; i++) kr_data->qarray[i] = 0; kr_data->qarray_cnt = 0; kr_data->end_steps_cnt = QARRAY_SIZE-1; kr_data->coeff_status_step = C72_CSTATE_NOT_UPDATED; kr_data->curr_coeff++; al_dbg("[%s]: doing next coefficient: %d ---\n\n", kr_data->adapter->name, kr_data->curr_coeff); nextstate = QMEASURE; } else { nextstate = SET_READY; } break; case SET_READY: /* * our receiver is ready for data. * no training will occur any more. */ - kr_data->status_report.receiver_ready = TRUE; + kr_data->status_report.receiver_ready = AL_TRUE; /* * in addition to the status we transmit, we also must tell our * local hardware state-machine that we are done, so the * training can eventually complete when the remote indicates * it is ready also. The hardware will then automatically * give control to the PCS layer completing training. */ al_eth_receiver_ready_set(kr_data->adapter, AL_ETH_AN__LT_LANE_0); nextstate = TX_DONE; break; case TX_DONE: break; /* nothing else to do */ default: nextstate = kr_data->algo_state; break; } /* * The status we want to transmit to remote. * Note that the status combines the receiver status of all coefficients * with the transmitter's rx ready status. */ if (kr_data->algo_state != nextstate) { al_dbg("[%s] [al_eth_kr_lt_transmit_run] STM changes %s -> %s: " " Qarray=%d/%d/%d\n", kr_data->adapter->name, al_eth_kr_mac_sm_name[kr_data->algo_state], al_eth_kr_mac_sm_name[nextstate], kr_data->qarray[0], kr_data->qarray[1], kr_data->qarray[2]); } kr_data->algo_state = nextstate; /* * write fields for transmission into hardware. * Important: this must be done always, as the receiver may have * received update commands and wants to return its status. */ al_eth_ld_coeff_up_set(kr_data->adapter, AL_ETH_AN__LT_LANE_0, &ldcoeff); al_eth_ld_status_report_set(kr_data->adapter, AL_ETH_AN__LT_LANE_0, &kr_data->status_report); return (0); } /*****************************************************************************/ static int al_eth_kr_run_lt(struct al_eth_kr_data *kr_data) { unsigned int cnt; int ret = 0; - boolean_t page_received = FALSE; - boolean_t an_completed = FALSE; - boolean_t error = FALSE; - boolean_t training_failure = FALSE; + al_bool page_received = AL_FALSE; + al_bool an_completed = AL_FALSE; + al_bool error = AL_FALSE; + al_bool training_failure = AL_FALSE; al_eth_kr_lt_initialize(kr_data->adapter, AL_ETH_AN__LT_LANE_0); if (al_eth_kr_lt_frame_lock_wait(kr_data->adapter, AL_ETH_AN__LT_LANE_0, - AL_ETH_KR_FRAME_LOCK_TIMEOUT) == TRUE) { + AL_ETH_KR_FRAME_LOCK_TIMEOUT) == AL_TRUE) { /* * when locked, for the first time initialize the receiver and * transmitter tasks to prepare it for detecting coefficient * update requests. */ al_eth_kr_lt_receiver_task_init(kr_data); ret = al_eth_kr_lt_transmitter_task_init(kr_data); if (ret != 0) goto error; cnt = 0; do { ret = al_eth_kr_lt_receiver_task_run(kr_data); if (ret != 0) break; /* stop the link training */ ret = al_eth_kr_lt_transmitter_task_run(kr_data); if (ret != 0) break; /* stop the link training */ cnt++; DELAY(100); } while ((al_eth_kr_startup_proto_prog_get(kr_data->adapter, AL_ETH_AN__LT_LANE_0)) && (cnt <= AL_ETH_KR_LT_MAX_ROUNDS)); training_failure = al_eth_kr_training_status_fail_get(kr_data->adapter, AL_ETH_AN__LT_LANE_0); al_dbg("[%s] training ended after %d rounds, failed = %s\n", kr_data->adapter->name, cnt, (training_failure) ? "Yes" : "No"); if (training_failure || cnt > AL_ETH_KR_LT_MAX_ROUNDS) { al_warn("[%s] Training Fail: status: %s, timeout: %s\n", kr_data->adapter->name, (training_failure) ? "Failed" : "OK", (cnt > AL_ETH_KR_LT_MAX_ROUNDS) ? "Yes" : "No"); /* * note: link is now disabled, * until training becomes disabled (see below). */ ret = EIO; goto error; } } else { al_info("[%s] FAILED: did not achieve initial frame lock...\n", kr_data->adapter->name); ret = EIO; goto error; } /* * ensure to stop link training at the end to allow normal PCS * datapath to operate in case of training failure. */ al_eth_kr_lt_stop(kr_data->adapter, AL_ETH_AN__LT_LANE_0); cnt = AL_ETH_KR_LT_DONE_TIMEOUT; - while (an_completed == FALSE) { + while (an_completed == AL_FALSE) { al_eth_kr_an_status_check(kr_data->adapter, &page_received, &an_completed, &error); DELAY(1); if ((cnt--) == 0) { al_info("%s: wait for an complete timeout!\n", __func__); ret = ETIMEDOUT; goto error; } } error: al_eth_kr_an_stop(kr_data->adapter); return (ret); } /* execute Autonegotiation process */ int al_eth_an_lt_execute(struct al_hal_eth_adapter *adapter, struct al_serdes_grp_obj *serdes_obj, enum al_serdes_lane lane, struct al_eth_an_adv *an_adv, struct al_eth_an_adv *partner_adv) { struct al_eth_kr_data kr_data; int rc; struct al_serdes_adv_rx_params rx_params; al_memset(&kr_data, 0, sizeof(struct al_eth_kr_data)); kr_data.adapter = adapter; kr_data.serdes_obj = serdes_obj; kr_data.lane = lane; /* * the link training progress will run rx equalization so need to make * sure rx parameters is not been override */ - rx_params.override = FALSE; + rx_params.override = AL_FALSE; kr_data.serdes_obj->rx_advanced_params_set( kr_data.serdes_obj, kr_data.lane, &rx_params); rc = al_eth_kr_an_run(&kr_data, an_adv, partner_adv); if (rc != 0) { al_eth_kr_lt_stop(adapter, AL_ETH_AN__LT_LANE_0); al_eth_kr_an_stop(adapter); al_dbg("%s: auto-negotiation failed!\n", __func__); return (rc); } if (partner_adv->technology != AL_ETH_AN_TECH_10GBASE_KR) { al_eth_kr_lt_stop(adapter, AL_ETH_AN__LT_LANE_0); al_eth_kr_an_stop(adapter); al_dbg("%s: link partner isn't 10GBASE_KR.\n", __func__); return (rc); } rc = al_eth_kr_run_lt(&kr_data); if (rc != 0) { al_eth_kr_lt_stop(adapter, AL_ETH_AN__LT_LANE_0); al_eth_kr_an_stop(adapter); al_dbg("%s: Link-training failed!\n", __func__); return (rc); } return (0); } diff --git a/sys/dev/al_eth/al_init_eth_lm.c b/sys/dev/al_eth/al_init_eth_lm.c index 5caea40e3bf7..4b4ebe3628a7 100644 --- a/sys/dev/al_eth/al_init_eth_lm.c +++ b/sys/dev/al_eth/al_init_eth_lm.c @@ -1,1533 +1,1533 @@ /*- * Copyright (c) 2015,2016 Annapurna Labs Ltd. and affiliates * All rights reserved. * * Developed by Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "al_init_eth_lm.h" #include "al_serdes.h" #include "al_hal_eth.h" #include "al_init_eth_kr.h" /** * @{ * @file al_init_eth_lm.c * * @brief ethernet link management common utilities * */ /* delay before checking link status with new serdes parameters (uSec) */ #define AL_ETH_LM_LINK_STATUS_DELAY 1000 /* delay before checking link status after reconfiguring the retimer (uSec) */ #define AL_ETH_LM_RETIMER_LINK_STATUS_DELAY 50000 #define AL_ETH_LM_EQ_ITERATIONS 15 #define AL_ETH_LM_MAX_DCGAIN 8 /* num of link training failures till serdes reset */ #define AL_ETH_LT_FAILURES_TO_RESET 10 #define MODULE_IDENTIFIER_IDX 0 #define MODULE_IDENTIFIER_SFP 0x3 #define MODULE_IDENTIFIER_QSFP 0xd #define SFP_PRESENT 0 #define SFP_NOT_PRESENT 1 /* SFP+ module */ #define SFP_I2C_HEADER_10G_IDX 3 #define SFP_I2C_HEADER_10G_DA_IDX 8 #define SFP_I2C_HEADER_10G_DA_LEN_IDX 18 #define SFP_I2C_HEADER_1G_IDX 6 #define SFP_I2C_HEADER_SIGNAL_RATE 12 /* Nominal signaling rate, units of 100MBd. */ #define SFP_MIN_SIGNAL_RATE_25G 250 #define SFP_MIN_SIGNAL_RATE_10G 100 /* QSFP+ module */ #define QSFP_COMPLIANCE_CODE_IDX 131 /* 40GBASE-LR4 and 40GBASE-SR4 are optic modules */ #define QSFP_COMPLIANCE_CODE_OPTIC ((1 << 1) | (1 << 2)) #define QSFP_COMPLIANCE_CODE_DAC (1 << 3) #define QSFP_CABLE_LEN_IDX 146 /* TODO: need to check the necessary delay */ #define AL_ETH_LM_RETIMER_WAIT_FOR_LOCK 500 /* delay after retimer reset to lock (mSec) */ #define AL_ETH_LM_SERDES_WAIT_FOR_LOCK 50 /* delay after signal detect to lock (mSec) */ #define AL_ETH_LM_GEARBOX_RESET_DELAY 1000 /* (uSec) */ static const uint32_t al_eth_retimer_boost_addr[AL_ETH_RETIMER_CHANNEL_MAX][AL_ETH_RETIMER_TYPE_MAX] = { /* BR_210 | BR_410 */ /* AL_ETH_RETIMER_CHANNEL_A */ {0xf, 0x1a}, /* AL_ETH_RETIMER_CHANNEL_B */ {0x16, 0x18}, /* AL_ETH_RETIMER_CHANNEL_C */ {0x0, 0x16}, /* AL_ETH_RETIMER_CHANNEL_D */ {0x0, 0x14}, }; #define RETIMER_LENS_MAX 5 static const uint32_t al_eth_retimer_boost_lens[RETIMER_LENS_MAX] = {0, 1, 2, 3, 5}; static const uint32_t al_eth_retimer_boost_value[RETIMER_LENS_MAX + 1][AL_ETH_RETIMER_TYPE_MAX] = { /* BR_210 | BR_410 */ /* 0 */ {0x0, 0x0}, /* 1 */ {0x1, 0x1}, /* 2 */ {0x2, 0x1}, /* 3 */ {0x3, 0x3}, /* 5 */ {0x7, 0x3}, /* 5+ */{0xb, 0x7}, }; struct retimer_config_reg { uint8_t addr; uint8_t value; uint8_t mask; }; static struct retimer_config_reg retimer_ds25_25g_mode_tx_ch[] = { {.addr = 0x0A, .value = 0x0C, .mask = 0xff }, {.addr = 0x2F, .value = 0x54, .mask = 0xff }, {.addr = 0x31, .value = 0x20, .mask = 0xff }, {.addr = 0x1E, .value = 0xE9, .mask = 0xff }, {.addr = 0x1F, .value = 0x0B, .mask = 0xff }, {.addr = 0xA6, .value = 0x43, .mask = 0xff }, {.addr = 0x2A, .value = 0x5A, .mask = 0xff }, {.addr = 0x2B, .value = 0x0A, .mask = 0xff }, {.addr = 0x2C, .value = 0xF6, .mask = 0xff }, {.addr = 0x70, .value = 0x05, .mask = 0xff }, {.addr = 0x6A, .value = 0x21, .mask = 0xff }, {.addr = 0x35, .value = 0x0F, .mask = 0xff }, {.addr = 0x12, .value = 0x83, .mask = 0xff }, {.addr = 0x9C, .value = 0x24, .mask = 0xff }, {.addr = 0x98, .value = 0x00, .mask = 0xff }, {.addr = 0x42, .value = 0x50, .mask = 0xff }, {.addr = 0x44, .value = 0x90, .mask = 0xff }, {.addr = 0x45, .value = 0xC0, .mask = 0xff }, {.addr = 0x46, .value = 0xD0, .mask = 0xff }, {.addr = 0x47, .value = 0xD1, .mask = 0xff }, {.addr = 0x48, .value = 0xD5, .mask = 0xff }, {.addr = 0x49, .value = 0xD8, .mask = 0xff }, {.addr = 0x4A, .value = 0xEA, .mask = 0xff }, {.addr = 0x4B, .value = 0xF7, .mask = 0xff }, {.addr = 0x4C, .value = 0xFD, .mask = 0xff }, {.addr = 0x8E, .value = 0x00, .mask = 0xff }, {.addr = 0x3D, .value = 0x94, .mask = 0xff }, {.addr = 0x3F, .value = 0x40, .mask = 0xff }, {.addr = 0x3E, .value = 0x43, .mask = 0xff }, {.addr = 0x0A, .value = 0x00, .mask = 0xff }, }; static struct retimer_config_reg retimer_ds25_25g_mode_rx_ch[] = { {.addr = 0x0A, .value = 0x0C, .mask = 0xff}, {.addr = 0x2F, .value = 0x54, .mask = 0xff}, {.addr = 0x31, .value = 0x40, .mask = 0xff}, {.addr = 0x1E, .value = 0xE3, .mask = 0xff}, {.addr = 0x1F, .value = 0x0B, .mask = 0xff}, {.addr = 0xA6, .value = 0x43, .mask = 0xff}, {.addr = 0x2A, .value = 0x5A, .mask = 0xff}, {.addr = 0x2B, .value = 0x0A, .mask = 0xff}, {.addr = 0x2C, .value = 0xF6, .mask = 0xff}, {.addr = 0x70, .value = 0x05, .mask = 0xff}, {.addr = 0x6A, .value = 0x21, .mask = 0xff}, {.addr = 0x35, .value = 0x0F, .mask = 0xff}, {.addr = 0x12, .value = 0x83, .mask = 0xff}, {.addr = 0x9C, .value = 0x24, .mask = 0xff}, {.addr = 0x98, .value = 0x00, .mask = 0xff}, {.addr = 0x42, .value = 0x50, .mask = 0xff}, {.addr = 0x44, .value = 0x90, .mask = 0xff}, {.addr = 0x45, .value = 0xC0, .mask = 0xff}, {.addr = 0x46, .value = 0xD0, .mask = 0xff}, {.addr = 0x47, .value = 0xD1, .mask = 0xff}, {.addr = 0x48, .value = 0xD5, .mask = 0xff}, {.addr = 0x49, .value = 0xD8, .mask = 0xff}, {.addr = 0x4A, .value = 0xEA, .mask = 0xff}, {.addr = 0x4B, .value = 0xF7, .mask = 0xff}, {.addr = 0x4C, .value = 0xFD, .mask = 0xff}, {.addr = 0x8E, .value = 0x00, .mask = 0xff}, {.addr = 0x3D, .value = 0x94, .mask = 0xff}, {.addr = 0x3F, .value = 0x40, .mask = 0xff}, {.addr = 0x3E, .value = 0x43, .mask = 0xff}, {.addr = 0x0A, .value = 0x00, .mask = 0xff}, }; static struct retimer_config_reg retimer_ds25_10g_mode[] = { /* Assert CDR reset (6.3) */ {.addr = 0x0A, .value = 0x0C, .mask = 0x0C}, /* Select 10.3125Gbps standard rate mode (6.6) */ {.addr = 0x2F, .value = 0x00, .mask = 0xF0}, /* Enable loop filter auto-adjust */ {.addr = 0x1F, .value = 0x08, .mask = 0x08}, /* Set Adapt Mode 1 (6.13) */ {.addr = 0x31, .value = 0x20, .mask = 0x60}, /* Disable the DFE since most applications do not need it (6.18) */ {.addr = 0x1E, .value = 0x08, .mask = 0x08}, /* Release CDR reset (6.4) */ {.addr = 0x0A, .value = 0x00, .mask = 0x0C}, /* Enable FIR (6.12) */ {.addr = 0x3D, .value = 0x80, .mask = 0x80}, /* Set Main-cursor tap sign to positive (6.12) */ {.addr = 0x3D, .value = 0x00, .mask = 0x40}, /* Set Post-cursor tap sign to negative (6.12) */ {.addr = 0x3F, .value = 0x40, .mask = 0x40}, /* Set Pre-cursor tap sign to negative (6.12) */ {.addr = 0x3E, .value = 0x40, .mask = 0x40}, /* Set Main-cursor tap magnitude to 13 (6.12) */ {.addr = 0x3D, .value = 0x0D, .mask = 0x1F}, }; static int al_eth_lm_retimer_boost_config(struct al_eth_lm_context *lm_context); static int al_eth_lm_retimer_ds25_full_config(struct al_eth_lm_context *lm_context); -static al_bool al_eth_lm_retimer_ds25_signal_detect( +static bool al_eth_lm_retimer_ds25_signal_detect( struct al_eth_lm_context *lm_context, uint32_t channel); static int al_eth_lm_retimer_ds25_cdr_reset(struct al_eth_lm_context *lm_context, uint32_t channel); -static al_bool al_eth_lm_retimer_ds25_cdr_lock( +static bool al_eth_lm_retimer_ds25_cdr_lock( struct al_eth_lm_context *lm_context, uint32_t channel); static int al_eth_lm_retimer_25g_rx_adaptation(struct al_eth_lm_context *lm_context); struct al_eth_lm_retimer { int (*config)(struct al_eth_lm_context *lm_context); int (*reset)(struct al_eth_lm_context *lm_context, uint32_t channel); - int (*signal_detect)(struct al_eth_lm_context *lm_context, uint32_t channel); - int (*cdr_lock)(struct al_eth_lm_context *lm_context, uint32_t channel); + bool (*signal_detect)(struct al_eth_lm_context *lm_context, uint32_t channel); + bool (*cdr_lock)(struct al_eth_lm_context *lm_context, uint32_t channel); int (*rx_adaptation)(struct al_eth_lm_context *lm_context); }; static struct al_eth_lm_retimer retimer[] = { {.config = al_eth_lm_retimer_boost_config, .signal_detect = NULL, .reset = NULL, .cdr_lock = NULL, .rx_adaptation = NULL}, {.config = al_eth_lm_retimer_boost_config, .signal_detect = NULL, .reset = NULL, .cdr_lock = NULL, .rx_adaptation = NULL}, {.config = al_eth_lm_retimer_ds25_full_config, .signal_detect = al_eth_lm_retimer_ds25_signal_detect, .reset = al_eth_lm_retimer_ds25_cdr_reset, .cdr_lock = al_eth_lm_retimer_ds25_cdr_lock, .rx_adaptation = al_eth_lm_retimer_25g_rx_adaptation}, }; #define SFP_10G_DA_ACTIVE 0x8 #define SFP_10G_DA_PASSIVE 0x4 #define lm_debug(...) \ do { \ if (lm_context->debug) \ al_warn(__VA_ARGS__); \ else \ al_dbg(__VA_ARGS__); \ } while (0) static int al_eth_sfp_detect(struct al_eth_lm_context *lm_context, enum al_eth_lm_link_mode *new_mode) { int rc = 0; uint8_t sfp_10g; uint8_t sfp_1g; uint8_t sfp_cable_tech; uint8_t sfp_da_len; uint8_t signal_rate; do { rc = lm_context->i2c_read(lm_context->i2c_context, lm_context->sfp_bus_id, lm_context->sfp_i2c_addr, SFP_I2C_HEADER_10G_IDX, &sfp_10g); if (rc != 0) break; rc = lm_context->i2c_read(lm_context->i2c_context, lm_context->sfp_bus_id, lm_context->sfp_i2c_addr, SFP_I2C_HEADER_1G_IDX, &sfp_1g); if (rc != 0) break; rc = lm_context->i2c_read(lm_context->i2c_context, lm_context->sfp_bus_id, lm_context->sfp_i2c_addr, SFP_I2C_HEADER_10G_DA_IDX, &sfp_cable_tech); if (rc != 0) break; rc = lm_context->i2c_read(lm_context->i2c_context, lm_context->sfp_bus_id, lm_context->sfp_i2c_addr, SFP_I2C_HEADER_10G_DA_LEN_IDX, &sfp_da_len); if (rc != 0) break; rc = lm_context->i2c_read(lm_context->i2c_context, lm_context->sfp_bus_id, lm_context->sfp_i2c_addr, SFP_I2C_HEADER_SIGNAL_RATE, &signal_rate); } while (0); if (rc != 0) { if (rc == ETIMEDOUT) { /* ETIMEDOUT is returned when no SFP is connected */ if (lm_context->mode != AL_ETH_LM_MODE_DISCONNECTED) lm_debug("%s: SFP Disconnected\n", __func__); *new_mode = AL_ETH_LM_MODE_DISCONNECTED; } else { return (rc); } } else if ((sfp_cable_tech & (SFP_10G_DA_PASSIVE | SFP_10G_DA_ACTIVE)) != 0) { if ((signal_rate >= SFP_MIN_SIGNAL_RATE_25G) && ((lm_context->max_speed == AL_ETH_LM_MAX_SPEED_25G) || (lm_context->max_speed == AL_ETH_LM_MAX_SPEED_MAX))) *new_mode = AL_ETH_LM_MODE_25G; else if ((signal_rate >= SFP_MIN_SIGNAL_RATE_10G) && ((lm_context->max_speed == AL_ETH_LM_MAX_SPEED_10G) || (lm_context->max_speed == AL_ETH_LM_MAX_SPEED_MAX))) *new_mode = AL_ETH_LM_MODE_10G_DA; else *new_mode = AL_ETH_LM_MODE_1G; lm_debug("%s: %s DAC (%d M) detected (max signal rate %d)\n", __func__, (sfp_cable_tech & SFP_10G_DA_PASSIVE) ? "Passive" : "Active", sfp_da_len, signal_rate); /* for active direct attached need to use len 0 in the retimer configuration */ lm_context->da_len = (sfp_cable_tech & SFP_10G_DA_PASSIVE) ? sfp_da_len : 0; } else if (sfp_10g != 0) { lm_debug("%s: 10 SFP detected\n", __func__); *new_mode = AL_ETH_LM_MODE_10G_OPTIC; } else if (sfp_1g != 0) { lm_debug("%s: 1G SFP detected\n", __func__); *new_mode = AL_ETH_LM_MODE_1G; } else { al_warn("%s: unknown SFP inserted. eeprom content: 10G compliance 0x%x," " 1G compliance 0x%x, sfp+cable 0x%x. default to %s\n", __func__, sfp_10g, sfp_1g, sfp_cable_tech, al_eth_lm_mode_convert_to_str(lm_context->default_mode)); *new_mode = lm_context->default_mode; lm_context->da_len = lm_context->default_dac_len; } if ((lm_context->sfp_detect_force_mode) && (*new_mode != AL_ETH_LM_MODE_DISCONNECTED) && (*new_mode != lm_context->default_mode)) { al_warn("%s: Force mode to default (%s). mode based of the SFP EEPROM %s\n", __func__, al_eth_lm_mode_convert_to_str(lm_context->default_mode), al_eth_lm_mode_convert_to_str(*new_mode)); *new_mode = lm_context->default_mode; } lm_context->mode = *new_mode; return (0); } static int al_eth_qsfp_detect(struct al_eth_lm_context *lm_context, enum al_eth_lm_link_mode *new_mode) { int rc = 0; uint8_t qsfp_comp_code; uint8_t qsfp_da_len; do { rc = lm_context->i2c_read(lm_context->i2c_context, lm_context->sfp_bus_id, lm_context->sfp_i2c_addr, QSFP_COMPLIANCE_CODE_IDX, &qsfp_comp_code); if (rc != 0) break; rc = lm_context->i2c_read(lm_context->i2c_context, lm_context->sfp_bus_id, lm_context->sfp_i2c_addr, QSFP_CABLE_LEN_IDX, &qsfp_da_len); if (rc != 0) break; } while (0); if (rc != 0) { if (rc == ETIMEDOUT) { /* ETIMEDOUT is returned when no SFP is connected */ lm_debug("%s: SFP Disconnected\n", __func__); *new_mode = AL_ETH_LM_MODE_DISCONNECTED; } else { return (rc); } } else if ((qsfp_comp_code & QSFP_COMPLIANCE_CODE_DAC) != 0) { lm_debug("%s: 10G passive DAC (%d M) detected\n", __func__, qsfp_da_len); *new_mode = AL_ETH_LM_MODE_10G_DA; lm_context->da_len = qsfp_da_len; } else if ((qsfp_comp_code & QSFP_COMPLIANCE_CODE_OPTIC) != 0) { lm_debug("%s: 10G optic module detected\n", __func__); *new_mode = AL_ETH_LM_MODE_10G_OPTIC; } else { al_warn("%s: unknown QSFP inserted. eeprom content: 10G " "compliance 0x%x default to %s\n", __func__, qsfp_comp_code, al_eth_lm_mode_convert_to_str(lm_context->default_mode)); *new_mode = lm_context->default_mode; lm_context->da_len = lm_context->default_dac_len; } lm_context->mode = *new_mode; return (0); } static int al_eth_module_detect(struct al_eth_lm_context *lm_context, enum al_eth_lm_link_mode *new_mode) { int rc = 0; uint8_t module_idx; int sfp_present = SFP_PRESENT; if ((lm_context->gpio_get) && (lm_context->gpio_present != 0)) sfp_present = lm_context->gpio_get(lm_context->gpio_present); if (sfp_present == SFP_NOT_PRESENT) { lm_debug("%s: SFP not exist\n", __func__); *new_mode = AL_ETH_LM_MODE_DISCONNECTED; return 0; } rc = lm_context->i2c_read(lm_context->i2c_context, lm_context->sfp_bus_id, lm_context->sfp_i2c_addr, MODULE_IDENTIFIER_IDX, &module_idx); if (rc != 0) { if (rc == ETIMEDOUT) { /* ETIMEDOUT is returned when no SFP is connected */ if (lm_context->mode != AL_ETH_LM_MODE_DISCONNECTED) lm_debug("%s: SFP Disconnected\n", __func__); *new_mode = AL_ETH_LM_MODE_DISCONNECTED; return (0); } else { return (rc); } } if (module_idx == MODULE_IDENTIFIER_QSFP) return (al_eth_qsfp_detect(lm_context, new_mode)); else return (al_eth_sfp_detect(lm_context, new_mode)); return (0); } static struct al_serdes_adv_tx_params da_tx_params = { - .override = TRUE, + .override = AL_TRUE, .amp = 0x1, .total_driver_units = 0x13, .c_plus_1 = 0x2, .c_plus_2 = 0, .c_minus_1 = 0x2, .slew_rate = 0, }; static struct al_serdes_adv_rx_params da_rx_params = { - .override = TRUE, + .override = AL_TRUE, .dcgain = 0x4, .dfe_3db_freq = 0x4, .dfe_gain = 0x3, .dfe_first_tap_ctrl = 0x5, .dfe_secound_tap_ctrl = 0x1, .dfe_third_tap_ctrl = 0x8, .dfe_fourth_tap_ctrl = 0x1, .low_freq_agc_gain = 0x7, .precal_code_sel = 0, .high_freq_agc_boost = 0x1d, }; static struct al_serdes_adv_tx_params optic_tx_params = { - .override = TRUE, + .override = AL_TRUE, .amp = 0x1, .total_driver_units = 0x13, .c_plus_1 = 0x2, .c_plus_2 = 0, .c_minus_1 = 0, .slew_rate = 0, }; static struct al_serdes_adv_rx_params optic_rx_params = { - .override = TRUE, + .override = AL_TRUE, .dcgain = 0x0, .dfe_3db_freq = 0x7, .dfe_gain = 0x0, .dfe_first_tap_ctrl = 0x0, .dfe_secound_tap_ctrl = 0x8, .dfe_third_tap_ctrl = 0x0, .dfe_fourth_tap_ctrl = 0x8, .low_freq_agc_gain = 0x7, .precal_code_sel = 0, .high_freq_agc_boost = 0x4, }; static void al_eth_serdes_static_tx_params_set(struct al_eth_lm_context *lm_context) { if (lm_context->tx_param_dirty == 0) return; if (lm_context->serdes_tx_params_valid != 0) { lm_context->tx_param_dirty = 0; - lm_context->tx_params_override.override = TRUE; + lm_context->tx_params_override.override = AL_TRUE; if ((lm_context->serdes_obj->tx_advanced_params_set) == 0) { al_err("tx_advanced_params_set is not supported for this serdes group\n"); return; } lm_context->serdes_obj->tx_advanced_params_set( lm_context->serdes_obj, lm_context->lane, &lm_context->tx_params_override); } else if (lm_context->static_values != 0) { lm_context->tx_param_dirty = 0; if ((lm_context->serdes_obj->tx_advanced_params_set) == 0) { al_err("tx_advanced_params_set is not supported for this serdes group\n"); return; } if ((lm_context->retimer_exist == 0) && (lm_context->mode == AL_ETH_LM_MODE_10G_DA)) lm_context->serdes_obj->tx_advanced_params_set( lm_context->serdes_obj, lm_context->lane, &da_tx_params); else lm_context->serdes_obj->tx_advanced_params_set( lm_context->serdes_obj, lm_context->lane, &optic_tx_params); } } static void al_eth_serdes_static_rx_params_set(struct al_eth_lm_context *lm_context) { if (lm_context->rx_param_dirty == 0) return; if (lm_context->serdes_rx_params_valid != 0) { lm_context->rx_param_dirty = 0; - lm_context->rx_params_override.override = TRUE; + lm_context->rx_params_override.override = AL_TRUE; if ((lm_context->serdes_obj->rx_advanced_params_set) == 0) { al_err("rx_advanced_params_set is not supported for this serdes group\n"); return; } lm_context->serdes_obj->rx_advanced_params_set( lm_context->serdes_obj, lm_context->lane, &lm_context->rx_params_override); } else if (lm_context->static_values != 0) { lm_context->rx_param_dirty = 0; if ((lm_context->serdes_obj->rx_advanced_params_set) == 0) { al_err("rx_advanced_params_set is not supported for this serdes group\n"); return; } if ((lm_context->retimer_exist == 0) && (lm_context->mode == AL_ETH_LM_MODE_10G_DA)) lm_context->serdes_obj->rx_advanced_params_set( lm_context->serdes_obj, lm_context->lane, &da_rx_params); else lm_context->serdes_obj->rx_advanced_params_set( lm_context->serdes_obj, lm_context->lane, &optic_rx_params); } } static int al_eth_rx_equal_run(struct al_eth_lm_context *lm_context) { struct al_serdes_adv_rx_params rx_params; int dcgain; int best_dcgain = -1; int i; int best_score = -1; int test_score = -1; - rx_params.override = FALSE; + rx_params.override = AL_FALSE; lm_context->serdes_obj->rx_advanced_params_set(lm_context->serdes_obj, lm_context->lane, &rx_params); lm_debug("score | dcgain | dfe3db | dfegain | tap1 | tap2 | tap3 | " "tap4 | low freq | high freq\n"); for (dcgain = 0; dcgain < AL_ETH_LM_MAX_DCGAIN; dcgain++) { lm_context->serdes_obj->dcgain_set( lm_context->serdes_obj, dcgain); test_score = lm_context->serdes_obj->rx_equalization( lm_context->serdes_obj, lm_context->lane); if (test_score < 0) { al_warn("serdes rx equalization failed on error\n"); return (test_score); } if (test_score > best_score) { best_score = test_score; best_dcgain = dcgain; } lm_context->serdes_obj->rx_advanced_params_get( lm_context->serdes_obj, lm_context->lane, &rx_params); lm_debug("%6d|%8x|%8x|%9x|%6x|%6x|%6x|%6x|%10x|%10x|\n", test_score, rx_params.dcgain, rx_params.dfe_3db_freq, rx_params.dfe_gain, rx_params.dfe_first_tap_ctrl, rx_params.dfe_secound_tap_ctrl, rx_params.dfe_third_tap_ctrl, rx_params.dfe_fourth_tap_ctrl, rx_params.low_freq_agc_gain, rx_params.high_freq_agc_boost); } lm_context->serdes_obj->dcgain_set( lm_context->serdes_obj, best_dcgain); best_score = -1; for(i = 0; i < AL_ETH_LM_EQ_ITERATIONS; i++) { test_score = lm_context->serdes_obj->rx_equalization( lm_context->serdes_obj, lm_context->lane); if (test_score < 0) { al_warn("serdes rx equalization failed on error\n"); return (test_score); } if (test_score > best_score) { best_score = test_score; lm_context->serdes_obj->rx_advanced_params_get( lm_context->serdes_obj, lm_context->lane, &rx_params); } } rx_params.precal_code_sel = 0; - rx_params.override = TRUE; + rx_params.override = AL_TRUE; lm_context->serdes_obj->rx_advanced_params_set( lm_context->serdes_obj, lm_context->lane, &rx_params); lm_debug("-------------------- best dcgain %d ------------------------------------\n", best_dcgain); lm_debug("%6d|%8x|%8x|%9x|%6x|%6x|%6x|%6x|%10x|%10x|\n", best_score, rx_params.dcgain, rx_params.dfe_3db_freq, rx_params.dfe_gain, rx_params.dfe_first_tap_ctrl, rx_params.dfe_secound_tap_ctrl, rx_params.dfe_third_tap_ctrl, rx_params.dfe_fourth_tap_ctrl, rx_params.low_freq_agc_gain, rx_params.high_freq_agc_boost); return (0); } static int al_eth_lm_retimer_boost_config(struct al_eth_lm_context *lm_context) { int i; int rc = 0; uint8_t boost = 0; uint32_t boost_addr = al_eth_retimer_boost_addr[lm_context->retimer_channel][lm_context->retimer_type]; if (lm_context->mode != AL_ETH_LM_MODE_10G_DA) { boost = al_eth_retimer_boost_value[0][lm_context->retimer_type]; } else { for (i = 0; i < RETIMER_LENS_MAX; i++) { if (lm_context->da_len <= al_eth_retimer_boost_lens[i]) { boost = al_eth_retimer_boost_value[i][lm_context->retimer_type]; break; } } if (i == RETIMER_LENS_MAX) boost = al_eth_retimer_boost_value[RETIMER_LENS_MAX][lm_context->retimer_type]; } lm_debug("config retimer boost in channel %d (addr %x) to 0x%x\n", lm_context->retimer_channel, boost_addr, boost); rc = lm_context->i2c_write(lm_context->i2c_context, lm_context->retimer_bus_id, lm_context->retimer_i2c_addr, boost_addr, boost); if (rc != 0) { al_err("%s: Error occurred (%d) while writing retimer " "configuration (bus-id %x i2c-addr %x)\n", __func__, rc, lm_context->retimer_bus_id, lm_context->retimer_i2c_addr); return (rc); } return (0); } /******************************************************************************* ************************** retimer DS25 *************************************** ******************************************************************************/ #define LM_DS25_CHANNEL_EN_REG 0xff #define LM_DS25_CHANNEL_EN_MASK 0x03 #define LM_DS25_CHANNEL_EN_VAL 0x01 #define LM_DS25_CHANNEL_SEL_REG 0xfc #define LM_DS25_CHANNEL_SEL_MASK 0xff #define LM_DS25_CDR_RESET_REG 0x0a #define LM_DS25_CDR_RESET_MASK 0x0c #define LM_DS25_CDR_RESET_ASSERT 0x0c #define LM_DS25_CDR_RESET_RELEASE 0x00 #define LM_DS25_SIGNAL_DETECT_REG 0x78 #define LM_DS25_SIGNAL_DETECT_MASK 0x20 #define LM_DS25_CDR_LOCK_REG 0x78 #define LM_DS25_CDR_LOCK_MASK 0x10 #define LM_DS25_DRV_PD_REG 0x15 #define LM_DS25_DRV_PD_MASK 0x08 static int al_eth_lm_retimer_ds25_write_reg(struct al_eth_lm_context *lm_context, uint8_t reg_addr, uint8_t reg_mask, uint8_t reg_value) { uint8_t reg; int rc; rc = lm_context->i2c_read(lm_context->i2c_context, lm_context->retimer_bus_id, lm_context->retimer_i2c_addr, reg_addr, ®); if (rc != 0) return (EIO); reg &= ~(reg_mask); reg |= reg_value; rc = lm_context->i2c_write(lm_context->i2c_context, lm_context->retimer_bus_id, lm_context->retimer_i2c_addr, reg_addr, reg); if (rc != 0) return (EIO); return (0); } static int al_eth_lm_retimer_ds25_channel_select(struct al_eth_lm_context *lm_context, uint8_t channel) { int rc = 0; /* Write to specific channel */ rc = al_eth_lm_retimer_ds25_write_reg(lm_context, LM_DS25_CHANNEL_EN_REG, LM_DS25_CHANNEL_EN_MASK, LM_DS25_CHANNEL_EN_VAL); if (rc != 0) return (rc); rc = al_eth_lm_retimer_ds25_write_reg(lm_context, LM_DS25_CHANNEL_SEL_REG, LM_DS25_CHANNEL_SEL_MASK, (1 << channel)); return (rc); } static int al_eth_lm_retimer_ds25_channel_config(struct al_eth_lm_context *lm_context, uint8_t channel, struct retimer_config_reg *config, uint8_t config_size) { uint8_t i; int rc; rc = al_eth_lm_retimer_ds25_channel_select(lm_context, channel); if (rc != 0) goto config_error; for (i = 0; i < config_size; i++) { rc = al_eth_lm_retimer_ds25_write_reg(lm_context, config[i].addr, config[i].mask, config[i].value); if (rc != 0) goto config_error; } lm_debug("%s: retimer channel config done for channel %d\n", __func__, channel); return (0); config_error: al_err("%s: failed to access to the retimer\n", __func__); return (rc); } static int al_eth_lm_retimer_ds25_cdr_reset(struct al_eth_lm_context *lm_context, uint32_t channel) { int rc; lm_debug("Perform CDR reset to channel %d\n", channel); rc = al_eth_lm_retimer_ds25_channel_select(lm_context, channel); if (rc) goto config_error; rc = al_eth_lm_retimer_ds25_write_reg(lm_context, LM_DS25_CDR_RESET_REG, LM_DS25_CDR_RESET_MASK, LM_DS25_CDR_RESET_ASSERT); if (rc) goto config_error; rc = al_eth_lm_retimer_ds25_write_reg(lm_context, LM_DS25_CDR_RESET_REG, LM_DS25_CDR_RESET_MASK, LM_DS25_CDR_RESET_RELEASE); if (rc) goto config_error; return 0; config_error: al_err("%s: failed to access to the retimer\n", __func__); return rc; } -static al_bool al_eth_lm_retimer_ds25_signal_detect(struct al_eth_lm_context *lm_context, +static bool al_eth_lm_retimer_ds25_signal_detect(struct al_eth_lm_context *lm_context, uint32_t channel) { int rc = 0; uint8_t reg; rc = al_eth_lm_retimer_ds25_channel_select(lm_context, channel); if (rc) goto config_error; rc = lm_context->i2c_read(lm_context->i2c_context, lm_context->retimer_bus_id, lm_context->retimer_i2c_addr, LM_DS25_SIGNAL_DETECT_REG, ®); if (rc) goto config_error; if (reg & LM_DS25_SIGNAL_DETECT_MASK) - return TRUE; + return true; - return FALSE; + return false; config_error: al_err("%s: failed to access to the retimer\n", __func__); - return FALSE; + return false; } -static al_bool al_eth_lm_retimer_ds25_cdr_lock(struct al_eth_lm_context *lm_context, +static bool al_eth_lm_retimer_ds25_cdr_lock(struct al_eth_lm_context *lm_context, uint32_t channel) { int rc = 0; uint8_t reg; rc = al_eth_lm_retimer_ds25_channel_select(lm_context, channel); if (rc) goto config_error; rc = lm_context->i2c_read(lm_context->i2c_context, lm_context->retimer_bus_id, lm_context->retimer_i2c_addr, LM_DS25_CDR_LOCK_REG, ®); if (rc) goto config_error; if (reg & LM_DS25_CDR_LOCK_MASK) - return TRUE; + return true; - return FALSE; + return false; config_error: al_err("%s: failed to access to the retimer\n", __func__); - return FALSE; + return false; } -static boolean_t al_eth_lm_wait_for_lock(struct al_eth_lm_context *lm_context, +static bool al_eth_lm_wait_for_lock(struct al_eth_lm_context *lm_context, uint32_t channel) { uint32_t timeout = AL_ETH_LM_RETIMER_WAIT_FOR_LOCK; - al_bool lock = AL_FALSE; + bool lock = false; - while ((timeout > 0) && (lock == FALSE)) { + while ((timeout > 0) && (!lock)) { al_msleep(10); timeout -= 10; lock = retimer[lm_context->retimer_type].cdr_lock(lm_context, channel); } lm_debug("%s: %s to achieve CDR lock in %d msec\n", __func__, (lock) ? "succeed" : "FAILED", (AL_ETH_LM_RETIMER_WAIT_FOR_LOCK - timeout)); return lock; } static void al_eth_lm_retimer_signal_lock_check(struct al_eth_lm_context *lm_context, uint32_t channel, - boolean_t *ready) + bool *ready) { - al_bool signal_detect = TRUE; - al_bool cdr_lock = TRUE; + bool signal_detect = true; + bool cdr_lock = true; if (retimer[lm_context->retimer_type].signal_detect) { if (!retimer[lm_context->retimer_type].signal_detect(lm_context, channel)) { lm_debug("no signal detected on retimer channel %d\n", channel); - signal_detect = AL_FALSE; + signal_detect = false; } else { if (retimer[lm_context->retimer_type].cdr_lock) { cdr_lock = retimer[lm_context->retimer_type].cdr_lock( lm_context, channel); if (!cdr_lock) { if (retimer[lm_context->retimer_type].reset) { retimer[lm_context->retimer_type].reset(lm_context, channel); cdr_lock = al_eth_lm_wait_for_lock(lm_context, channel); } } } } } al_info("%s: (channel %d) signal %d cdr lock %d\n", __func__, channel, signal_detect, (signal_detect) ? cdr_lock : 0); - *ready = ((cdr_lock == TRUE) && (signal_detect == TRUE)); + *ready = (cdr_lock && signal_detect); } static int al_eth_lm_retimer_ds25_full_config(struct al_eth_lm_context *lm_context) { int rc = 0; - al_bool ready; + bool ready; struct retimer_config_reg *config_tx; uint32_t config_tx_size; struct retimer_config_reg *config_rx; uint32_t config_rx_size; if (lm_context->mode == AL_ETH_LM_MODE_25G) { config_tx = retimer_ds25_25g_mode_tx_ch; config_tx_size = AL_ARR_SIZE(retimer_ds25_25g_mode_tx_ch); config_rx = retimer_ds25_25g_mode_rx_ch; config_rx_size = AL_ARR_SIZE(retimer_ds25_25g_mode_rx_ch); } else { config_tx = retimer_ds25_10g_mode; config_tx_size = AL_ARR_SIZE(retimer_ds25_10g_mode); config_rx = retimer_ds25_10g_mode; config_rx_size = AL_ARR_SIZE(retimer_ds25_10g_mode); } rc = al_eth_lm_retimer_ds25_channel_config(lm_context, lm_context->retimer_channel, config_rx, config_rx_size); if (rc) return rc; rc = al_eth_lm_retimer_ds25_channel_config(lm_context, lm_context->retimer_tx_channel, config_tx, config_tx_size); if (rc) return rc; if (lm_context->serdes_obj->type_get() == AL_SRDS_TYPE_25G) { lm_debug("%s: serdes 25G - perform tx and rx gearbox reset\n", __func__); - al_eth_gearbox_reset(lm_context->adapter, TRUE, TRUE); + al_eth_gearbox_reset(lm_context->adapter, AL_TRUE, AL_TRUE); DELAY(AL_ETH_LM_GEARBOX_RESET_DELAY); } al_eth_lm_retimer_signal_lock_check(lm_context, lm_context->retimer_tx_channel, &ready); if (!ready) { lm_debug("%s: Failed to lock tx channel!\n", __func__); return (1); } lm_debug("%s: retimer full configuration done\n", __func__); return rc; } static int al_eth_lm_retimer_25g_rx_adaptation(struct al_eth_lm_context *lm_context) { int rc = 0; - al_bool ready; + bool ready; al_eth_lm_retimer_signal_lock_check(lm_context, lm_context->retimer_channel, &ready); if (!ready) { lm_debug("%s: no signal detected on retimer Rx channel (%d)\n", __func__, lm_context->retimer_channel); return rc; } al_msleep(AL_ETH_LM_SERDES_WAIT_FOR_LOCK); return 0; } -static int al_eth_lm_check_for_link(struct al_eth_lm_context *lm_context, boolean_t *link_up) +static int al_eth_lm_check_for_link(struct al_eth_lm_context *lm_context, bool *link_up) { struct al_eth_link_status status; int ret = 0; al_eth_link_status_clear(lm_context->adapter); al_eth_link_status_get(lm_context->adapter, &status); if (status.link_up == AL_TRUE) { lm_debug("%s: >>>> Link state DOWN ==> UP\n", __func__); al_eth_led_set(lm_context->adapter, AL_TRUE); lm_context->link_state = AL_ETH_LM_LINK_UP; - *link_up = AL_TRUE; + *link_up = true; return 0; } else if (status.local_fault) { lm_context->link_state = AL_ETH_LM_LINK_DOWN; al_eth_led_set(lm_context->adapter, AL_FALSE); al_err("%s: Failed to establish link\n", __func__); ret = 1; } else { lm_debug("%s: >>>> Link state DOWN ==> DOWN_RF\n", __func__); lm_context->link_state = AL_ETH_LM_LINK_DOWN_RF; al_eth_led_set(lm_context->adapter, AL_FALSE); ret = 0; } - *link_up = AL_FALSE; + *link_up = false; return ret; } /*****************************************************************************/ /***************************** API functions *********************************/ /*****************************************************************************/ int al_eth_lm_init(struct al_eth_lm_context *lm_context, struct al_eth_lm_init_params *params) { lm_context->adapter = params->adapter; lm_context->serdes_obj = params->serdes_obj; lm_context->lane = params->lane; lm_context->sfp_detection = params->sfp_detection; lm_context->sfp_bus_id = params->sfp_bus_id; lm_context->sfp_i2c_addr = params->sfp_i2c_addr; lm_context->retimer_exist = params->retimer_exist; lm_context->retimer_type = params->retimer_type; lm_context->retimer_bus_id = params->retimer_bus_id; lm_context->retimer_i2c_addr = params->retimer_i2c_addr; lm_context->retimer_channel = params->retimer_channel; lm_context->retimer_tx_channel = params->retimer_tx_channel; lm_context->default_mode = params->default_mode; lm_context->default_dac_len = params->default_dac_len; lm_context->link_training = params->link_training; lm_context->rx_equal = params->rx_equal; lm_context->static_values = params->static_values; lm_context->i2c_read = params->i2c_read; lm_context->i2c_write = params->i2c_write; lm_context->i2c_context = params->i2c_context; lm_context->get_random_byte = params->get_random_byte; /* eeprom_read must be provided if sfp_detection is true */ - al_assert((lm_context->sfp_detection == FALSE) || + al_assert((lm_context->sfp_detection == false) || (lm_context->i2c_read != NULL)); - al_assert((lm_context->retimer_exist == FALSE) || + al_assert((lm_context->retimer_exist == false) || (lm_context->i2c_write != NULL)); lm_context->local_adv.selector_field = 1; lm_context->local_adv.capability = 0; lm_context->local_adv.remote_fault = 0; lm_context->local_adv.acknowledge = 0; lm_context->local_adv.next_page = 0; lm_context->local_adv.technology = AL_ETH_AN_TECH_10GBASE_KR; lm_context->local_adv.fec_capability = params->kr_fec_enable; lm_context->mode = AL_ETH_LM_MODE_DISCONNECTED; - lm_context->serdes_tx_params_valid = FALSE; - lm_context->serdes_rx_params_valid = FALSE; + lm_context->serdes_tx_params_valid = false; + lm_context->serdes_rx_params_valid = false; lm_context->rx_param_dirty = 1; lm_context->tx_param_dirty = 1; lm_context->gpio_get = params->gpio_get; lm_context->gpio_present = params->gpio_present; lm_context->max_speed = params->max_speed; lm_context->sfp_detect_force_mode = params->sfp_detect_force_mode; lm_context->lm_pause = params->lm_pause; lm_context->led_config = params->led_config; - lm_context->retimer_configured = FALSE; + lm_context->retimer_configured = false; lm_context->link_state = AL_ETH_LM_LINK_DOWN; return (0); } int al_eth_lm_link_detection(struct al_eth_lm_context *lm_context, - boolean_t *link_fault, enum al_eth_lm_link_mode *old_mode, + bool *link_fault, enum al_eth_lm_link_mode *old_mode, enum al_eth_lm_link_mode *new_mode) { int err; struct al_eth_link_status status; al_assert(lm_context != NULL); al_assert(old_mode != NULL); al_assert(new_mode != NULL); /** * if Link management is disabled, report no link fault in case the link was up * before and set new mode to disconnected to avoid calling to link establish * if the link wasn't up. */ if (lm_context->lm_pause != NULL) { - boolean_t lm_pause = lm_context->lm_pause(lm_context->i2c_context); - if (lm_pause == TRUE) { + bool lm_pause = lm_context->lm_pause(lm_context->i2c_context); + if (lm_pause == true) { *new_mode = AL_ETH_LM_MODE_DISCONNECTED; if (link_fault != NULL) { if (lm_context->link_state == AL_ETH_LM_LINK_UP) - *link_fault = FALSE; + *link_fault = false; else - *link_fault = TRUE; + *link_fault = true; } return 0; } } *old_mode = lm_context->mode; *new_mode = lm_context->mode; if (link_fault != NULL) - *link_fault = TRUE; + *link_fault = true; switch (lm_context->link_state) { case AL_ETH_LM_LINK_UP: al_eth_link_status_get(lm_context->adapter, &status); if (status.link_up) { if (link_fault != NULL) - *link_fault = FALSE; + *link_fault = false; - al_eth_led_set(lm_context->adapter, TRUE); + al_eth_led_set(lm_context->adapter, AL_TRUE); return (0); } else if (status.local_fault) { lm_debug("%s: >>>> Link state UP ==> DOWN\n", __func__); lm_context->link_state = AL_ETH_LM_LINK_DOWN; } else { lm_debug("%s: >>>> Link state UP ==> DOWN_RF\n", __func__); lm_context->link_state = AL_ETH_LM_LINK_DOWN_RF; } break; case AL_ETH_LM_LINK_DOWN_RF: al_eth_link_status_get(lm_context->adapter, &status); if (status.local_fault) { lm_debug("%s: >>>> Link state DOWN_RF ==> DOWN\n", __func__); lm_context->link_state = AL_ETH_LM_LINK_DOWN; break; - } else if (status.remote_fault == FALSE) { + } else if (status.remote_fault == AL_FALSE) { lm_debug("%s: >>>> Link state DOWN_RF ==> UP\n", __func__); lm_context->link_state = AL_ETH_LM_LINK_UP; } /* in case of remote fault only no need to check SFP again */ return (0); case AL_ETH_LM_LINK_DOWN: break; }; - al_eth_led_set(lm_context->adapter, FALSE); + al_eth_led_set(lm_context->adapter, AL_FALSE); if (lm_context->sfp_detection) { err = al_eth_module_detect(lm_context, new_mode); if (err != 0) { al_err("module_detection failed!\n"); return (err); } lm_context->mode = *new_mode; } else { lm_context->mode = lm_context->default_mode; *new_mode = lm_context->mode; } if (*old_mode != *new_mode) { al_info("%s: New SFP mode detected %s -> %s\n", __func__, al_eth_lm_mode_convert_to_str(*old_mode), al_eth_lm_mode_convert_to_str(*new_mode)); lm_context->rx_param_dirty = 1; lm_context->tx_param_dirty = 1; - lm_context->new_port = TRUE; + lm_context->new_port = true; if ((*new_mode != AL_ETH_LM_MODE_DISCONNECTED) && (lm_context->led_config)) { struct al_eth_lm_led_config_data data = {0}; switch (*new_mode) { case AL_ETH_LM_MODE_10G_OPTIC: case AL_ETH_LM_MODE_10G_DA: data.speed = AL_ETH_LM_LED_CONFIG_10G; break; case AL_ETH_LM_MODE_1G: data.speed = AL_ETH_LM_LED_CONFIG_1G; break; case AL_ETH_LM_MODE_25G: data.speed = AL_ETH_LM_LED_CONFIG_25G; break; default: al_err("%s: unknown LM mode!\n", __func__); }; lm_context->led_config(lm_context->i2c_context, &data); } } return (0); } int -al_eth_lm_link_establish(struct al_eth_lm_context *lm_context, boolean_t *link_up) +al_eth_lm_link_establish(struct al_eth_lm_context *lm_context, bool *link_up) { - boolean_t signal_detected; + bool signal_detected; int ret = 0; switch (lm_context->link_state) { case AL_ETH_LM_LINK_UP: - *link_up = TRUE; + *link_up = true; lm_debug("%s: return link up\n", __func__); return (0); case AL_ETH_LM_LINK_DOWN_RF: - *link_up = FALSE; + *link_up = false; lm_debug("%s: return link down (DOWN_RF)\n", __func__); return (0); case AL_ETH_LM_LINK_DOWN: break; }; /** * At this point we will get LM disable only if changed to disable after link detection * finished. in this case link will not be established until LM will be enable again. */ if (lm_context->lm_pause) { - boolean_t lm_pause = lm_context->lm_pause(lm_context->i2c_context); - if (lm_pause == TRUE) { - *link_up = FALSE; + bool lm_pause = lm_context->lm_pause(lm_context->i2c_context); + if (lm_pause == true) { + *link_up = false; return (0); } } if ((lm_context->new_port) && (lm_context->retimer_exist)) { al_eth_serdes_static_rx_params_set(lm_context); al_eth_serdes_static_tx_params_set(lm_context); #if 0 al_eth_lm_retimer_config(lm_context); DELAY(AL_ETH_LM_RETIMER_LINK_STATUS_DELAY); #endif if (retimer[lm_context->retimer_type].config(lm_context)) { al_info("%s: failed to configure the retimer\n", __func__); - *link_up = FALSE; + *link_up = false; return (1); } - lm_context->new_port = FALSE; + lm_context->new_port = false; DELAY(1000); } if (lm_context->retimer_exist) { if (retimer[lm_context->retimer_type].rx_adaptation) { ret = retimer[lm_context->retimer_type].rx_adaptation(lm_context); if (ret != 0) { lm_debug("retimer rx is not ready\n"); - *link_up = FALSE; + *link_up = false; return (0); } } } signal_detected = lm_context->serdes_obj->signal_is_detected( lm_context->serdes_obj, lm_context->lane); - if (signal_detected == FALSE) { + if (signal_detected == false) { /* if no signal detected there is nothing to do */ lm_debug("serdes signal is down\n"); - *link_up = AL_FALSE; + *link_up = false; return 0; } if (lm_context->serdes_obj->type_get() == AL_SRDS_TYPE_25G) { lm_debug("%s: serdes 25G - perform rx gearbox reset\n", __func__); - al_eth_gearbox_reset(lm_context->adapter, FALSE, TRUE); + al_eth_gearbox_reset(lm_context->adapter, AL_FALSE, AL_TRUE); DELAY(AL_ETH_LM_GEARBOX_RESET_DELAY); } if (lm_context->retimer_exist) { DELAY(AL_ETH_LM_RETIMER_LINK_STATUS_DELAY); ret = al_eth_lm_check_for_link(lm_context, link_up); if (ret == 0) { lm_debug("%s: link is up with retimer\n", __func__); return 0; } return ret; } if ((lm_context->mode == AL_ETH_LM_MODE_10G_DA) && (lm_context->link_training)) { lm_context->local_adv.transmitted_nonce = lm_context->get_random_byte(); lm_context->local_adv.transmitted_nonce &= 0x1f; ret = al_eth_an_lt_execute(lm_context->adapter, lm_context->serdes_obj, lm_context->lane, &lm_context->local_adv, &lm_context->partner_adv); lm_context->rx_param_dirty = 1; lm_context->tx_param_dirty = 1; if (ret == 0) { al_info("%s: link training finished successfully\n", __func__); lm_context->link_training_failures = 0; ret = al_eth_lm_check_for_link(lm_context, link_up); if (ret == 0) { lm_debug("%s: link is up with LT\n", __func__); return (0); } } lm_context->link_training_failures++; if (lm_context->link_training_failures > AL_ETH_LT_FAILURES_TO_RESET) { lm_debug("%s: failed to establish LT %d times. reset serdes\n", __func__, AL_ETH_LT_FAILURES_TO_RESET); lm_context->serdes_obj->pma_hard_reset_lane( lm_context->serdes_obj, lm_context->lane, - TRUE); + AL_TRUE); lm_context->serdes_obj->pma_hard_reset_lane( lm_context->serdes_obj, lm_context->lane, - FALSE); + AL_FALSE); lm_context->link_training_failures = 0; } } al_eth_serdes_static_tx_params_set(lm_context); if ((lm_context->mode == AL_ETH_LM_MODE_10G_DA) && (lm_context->rx_equal)) { ret = al_eth_rx_equal_run(lm_context); if (ret == 0) { DELAY(AL_ETH_LM_LINK_STATUS_DELAY); ret = al_eth_lm_check_for_link(lm_context, link_up); if (ret == 0) { lm_debug("%s: link is up with Rx Equalization\n", __func__); return (0); } } } al_eth_serdes_static_rx_params_set(lm_context); DELAY(AL_ETH_LM_LINK_STATUS_DELAY); ret = al_eth_lm_check_for_link(lm_context, link_up); if (ret == 0) { lm_debug("%s: link is up with static parameters\n", __func__); return (0); } - *link_up = FALSE; + *link_up = false; return (1); } int al_eth_lm_static_parameters_override(struct al_eth_lm_context *lm_context, struct al_serdes_adv_tx_params *tx_params, struct al_serdes_adv_rx_params *rx_params) { if (tx_params != NULL) { lm_context->tx_params_override = *tx_params; lm_context->tx_param_dirty = 1; - lm_context->serdes_tx_params_valid = TRUE; + lm_context->serdes_tx_params_valid = true; } if (rx_params != NULL) { lm_context->rx_params_override = *rx_params; lm_context->rx_param_dirty = 1; - lm_context->serdes_rx_params_valid = TRUE; + lm_context->serdes_rx_params_valid = true; } return (0); } int al_eth_lm_static_parameters_override_disable(struct al_eth_lm_context *lm_context, - boolean_t tx_params, boolean_t rx_params) + bool tx_params, bool rx_params) { - if (tx_params != 0) - lm_context->serdes_tx_params_valid = FALSE; - if (rx_params != 0) - lm_context->serdes_tx_params_valid = FALSE; + if (tx_params) + lm_context->serdes_tx_params_valid = false; + if (rx_params) + lm_context->serdes_tx_params_valid = false; return (0); } int al_eth_lm_static_parameters_get(struct al_eth_lm_context *lm_context, struct al_serdes_adv_tx_params *tx_params, struct al_serdes_adv_rx_params *rx_params) { if (tx_params != NULL) { if (lm_context->serdes_tx_params_valid) *tx_params = lm_context->tx_params_override; else lm_context->serdes_obj->tx_advanced_params_get( lm_context->serdes_obj, lm_context->lane, tx_params); } if (rx_params != NULL) { if (lm_context->serdes_rx_params_valid) *rx_params = lm_context->rx_params_override; else lm_context->serdes_obj->rx_advanced_params_get( lm_context->serdes_obj, lm_context->lane, rx_params); } return (0); } const char * al_eth_lm_mode_convert_to_str(enum al_eth_lm_link_mode val) { switch (val) { case AL_ETH_LM_MODE_DISCONNECTED: return ("AL_ETH_LM_MODE_DISCONNECTED"); case AL_ETH_LM_MODE_10G_OPTIC: return ("AL_ETH_LM_MODE_10G_OPTIC"); case AL_ETH_LM_MODE_10G_DA: return ("AL_ETH_LM_MODE_10G_DA"); case AL_ETH_LM_MODE_1G: return ("AL_ETH_LM_MODE_1G"); case AL_ETH_LM_MODE_25G: return ("AL_ETH_LM_MODE_25G"); } return ("N/A"); } void al_eth_lm_debug_mode_set(struct al_eth_lm_context *lm_context, - boolean_t enable) + bool enable) { lm_context->debug = enable; } diff --git a/sys/dev/al_eth/al_init_eth_lm.h b/sys/dev/al_eth/al_init_eth_lm.h index 9e2563f4f46f..15becd4c3679 100644 --- a/sys/dev/al_eth/al_init_eth_lm.h +++ b/sys/dev/al_eth/al_init_eth_lm.h @@ -1,375 +1,375 @@ /*- * Copyright (c) 2015,2016 Annapurna Labs Ltd. and affiliates * All rights reserved. * * Developed by Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /** * Ethernet * @{ * @file al_init_eth_lm.h * * @brief ethernet link management common utilities * * Common operation example: * @code * int main() * { * struct al_eth_lm_context lm_context; * struct al_eth_lm_init_params lm_params; * enum al_eth_lm_link_mode old_mode; * enum al_eth_lm_link_mode new_mode; * al_bool fault; * al_bool link_up; * int rc = 0; * * lm_params.adapter = hal_adapter; * lm_params.serdes_obj = serdes; * lm_params.grp = grp; * lm_params.lane = lane; * lm_params.sfp_detection = true; * lm_params.link_training = true; * lm_params.rx_equal = true * lm_params.static_values = true; * lm_params.kr_fec_enable = false; * lm_params.eeprom_read = &my_eeprom_read; * lm_params.eeprom_context = context; * lm_params.get_random_byte = &my_rand_byte; * lm_params.default_mode = AL_ETH_LM_MODE_10G_DA; * * al_eth_lm_init(&lm_context, &lm_params); * * rc = al_eth_lm_link_detection(&lm_context, &fault, &old_mode, &new_mode); * if (fault == false) * return; // in this case the link is still up * * if (rc) { * printf("link detection failed on error\n"); * return; * } * * if (old_mode != new_mode) { * // perform serdes configuration if needed * * // mac stop / start / config if needed * } * * spin_lock(lock); * rc = al_eth_lm_link_establish($lm_context, &link_up); * spin_unlock(lock); * if (rc) { * printf("establish link failed\n"); * return; * } * * if (link_up) * printf("Link established successfully\n"); * else * printf("No signal found. probably the link partner is disconnected\n"); * } * @endcode * */ #ifndef __AL_INIT_ETH_LM_H__ #define __AL_INIT_ETH_LM_H__ #include #include #include "al_init_eth_kr.h" enum al_eth_lm_link_mode { AL_ETH_LM_MODE_DISCONNECTED, AL_ETH_LM_MODE_10G_OPTIC, AL_ETH_LM_MODE_10G_DA, AL_ETH_LM_MODE_1G, AL_ETH_LM_MODE_25G, }; enum al_eth_lm_max_speed { AL_ETH_LM_MAX_SPEED_MAX, AL_ETH_LM_MAX_SPEED_25G, AL_ETH_LM_MAX_SPEED_10G, AL_ETH_LM_MAX_SPEED_1G, }; enum al_eth_lm_link_state { AL_ETH_LM_LINK_DOWN, AL_ETH_LM_LINK_DOWN_RF, AL_ETH_LM_LINK_UP, }; enum al_eth_lm_led_config_speed { AL_ETH_LM_LED_CONFIG_1G, AL_ETH_LM_LED_CONFIG_10G, AL_ETH_LM_LED_CONFIG_25G, }; struct al_eth_lm_led_config_data { enum al_eth_lm_led_config_speed speed; }; struct al_eth_lm_context { struct al_hal_eth_adapter *adapter; struct al_serdes_grp_obj *serdes_obj; enum al_serdes_lane lane; uint32_t link_training_failures; - boolean_t tx_param_dirty; - boolean_t serdes_tx_params_valid; + bool tx_param_dirty; + bool serdes_tx_params_valid; struct al_serdes_adv_tx_params tx_params_override; - boolean_t rx_param_dirty; - boolean_t serdes_rx_params_valid; + bool rx_param_dirty; + bool serdes_rx_params_valid; struct al_serdes_adv_rx_params rx_params_override; struct al_eth_an_adv local_adv; struct al_eth_an_adv partner_adv; enum al_eth_lm_link_mode mode; uint8_t da_len; - boolean_t debug; + bool debug; /* configurations */ - boolean_t sfp_detection; + bool sfp_detection; uint8_t sfp_bus_id; uint8_t sfp_i2c_addr; enum al_eth_lm_link_mode default_mode; uint8_t default_dac_len; - boolean_t link_training; - boolean_t rx_equal; - boolean_t static_values; + bool link_training; + bool rx_equal; + bool static_values; - boolean_t retimer_exist; + bool retimer_exist; enum al_eth_retimer_type retimer_type; uint8_t retimer_bus_id; uint8_t retimer_i2c_addr; enum al_eth_retimer_channel retimer_channel; /* services */ int (*i2c_read)(void *handle, uint8_t bus_id, uint8_t i2c_addr, uint8_t reg_addr, uint8_t *val); int (*i2c_write)(void *handle, uint8_t bus_id, uint8_t i2c_addr, uint8_t reg_addr, uint8_t val); void *i2c_context; uint8_t (*get_random_byte)(void); int (*gpio_get)(unsigned int gpio); uint32_t gpio_present; enum al_eth_retimer_channel retimer_tx_channel; - boolean_t retimer_configured; + bool retimer_configured; enum al_eth_lm_max_speed max_speed; - boolean_t sfp_detect_force_mode; + bool sfp_detect_force_mode; enum al_eth_lm_link_state link_state; - boolean_t new_port; + bool new_port; - boolean_t (*lm_pause)(void *handle); + bool (*lm_pause)(void *handle); void (*led_config)(void *handle, struct al_eth_lm_led_config_data *data); }; struct al_eth_lm_init_params { /* pointer to HAL context */ struct al_hal_eth_adapter *adapter; /* pointer to serdes object */ struct al_serdes_grp_obj *serdes_obj; /* serdes lane for this port */ enum al_serdes_lane lane; /* * set to true to perform sfp detection if the link is down. * when set to true, eeprom_read below should NOT be NULL. */ - boolean_t sfp_detection; + bool sfp_detection; /* i2c bus id of the SFP for this port */ uint8_t sfp_bus_id; /* i2c addr of the SFP for this port */ uint8_t sfp_i2c_addr; /* * default mode, and dac length will be used in case sfp_detection * is not set or in case the detection failed. */ enum al_eth_lm_link_mode default_mode; uint8_t default_dac_len; /* the i2c bus id and addr of the retimer in case it exist */ uint8_t retimer_bus_id; uint8_t retimer_i2c_addr; /* retimer channel connected to this port */ enum al_eth_retimer_channel retimer_channel; enum al_eth_retimer_channel retimer_tx_channel; /* retimer type if exist */ enum al_eth_retimer_type retimer_type; /* * the following parameters control what mechanisms to run * on link_establish with the following steps: * - if retimer_exist is set, the retimer will be configured based on DA len. * - if link_training is set and DA detected run link training. if succeed return 0 * - if rx_equal is set serdes equalization will be run to configure the rx parameters. * - if static_values is set, tx and rx values will be set based on static values. */ - boolean_t retimer_exist; - boolean_t link_training; - boolean_t rx_equal; - boolean_t static_values; + bool retimer_exist; + bool link_training; + bool rx_equal; + bool static_values; /* enable / disable fec capabilities in AN */ - boolean_t kr_fec_enable; + bool kr_fec_enable; /* * pointer to function that's read 1 byte from eeprom * in case no eeprom is connected should return -ETIMEDOUT */ int (*i2c_read)(void *handle, uint8_t bus_id, uint8_t i2c_addr, uint8_t reg_addr, uint8_t *val); int (*i2c_write)(void *handle, uint8_t bus_id, uint8_t i2c_addr, uint8_t reg_addr, uint8_t val); void *i2c_context; /* pointer to function that return 1 rand byte */ uint8_t (*get_random_byte)(void); /* pointer to function that gets GPIO value - if NULL gpio present won't be used */ int (*gpio_get)(unsigned int gpio); /* gpio number connected to the SFP present pin */ uint32_t gpio_present; enum al_eth_lm_max_speed max_speed; /* in case force mode is true - the default mode will be set regardless to * the SFP EEPROM content */ - boolean_t sfp_detect_force_mode; + bool sfp_detect_force_mode; /* lm pause callback - in case it return true the LM will try to preserve * the current link status and will not try to establish new link (and will not * access to i2c bus) */ - boolean_t (*lm_pause)(void *handle); + bool (*lm_pause)(void *handle); /* config ethernet LEDs according to data. can be NULL if no configuration needed */ void (*led_config)(void *handle, struct al_eth_lm_led_config_data *data); }; /** * initialize link management context and set configuration * * @param lm_context pointer to link management context * @param params parameters passed from upper layer * * @return 0 in case of success. otherwise on failure. */ int al_eth_lm_init(struct al_eth_lm_context *lm_context, struct al_eth_lm_init_params *params); /** * perform link status check. in case link is down perform sfp detection * * @param lm_context pointer to link management context * @param link_fault indicate if the link is down * @param old_mode the last working mode * @param new_mode the new mode detected in this call * * @return 0 in case of success. otherwise on failure. */ int al_eth_lm_link_detection(struct al_eth_lm_context *lm_context, - boolean_t *link_fault, enum al_eth_lm_link_mode *old_mode, + bool *link_fault, enum al_eth_lm_link_mode *old_mode, enum al_eth_lm_link_mode *new_mode); /** * run LT, rx equalization and static values override according to configuration * This function MUST be called inside a lock as it using common serdes registers * * @param lm_context pointer to link management context * @param link_up set to true in case link is establish successfully * * @return < 0 in case link was failed to be established */ int al_eth_lm_link_establish(struct al_eth_lm_context *lm_context, - boolean_t *link_up); + bool *link_up); /** * override the default static parameters * * @param lm_context pointer to link management context * @param tx_params pointer to new tx params * @param rx_params pointer to new rx params * * @return 0 in case of success. otherwise on failure. **/ int al_eth_lm_static_parameters_override(struct al_eth_lm_context *lm_context, struct al_serdes_adv_tx_params *tx_params, struct al_serdes_adv_rx_params *rx_params); /** * disable serdes parameters override * * @param lm_context pointer to link management context * @param tx_params set to true to disable override of tx params * @param rx_params set to true to disable override of rx params * * @return 0 in case of success. otherwise on failure. **/ int al_eth_lm_static_parameters_override_disable(struct al_eth_lm_context *lm_context, - boolean_t tx_params, boolean_t rx_params); + bool tx_params, bool rx_params); /** * get the static parameters that are being used * if the parameters was override - return the override values * else return the current values of the parameters * * @param lm_context pointer to link management context * @param tx_params pointer to new tx params * @param rx_params pointer to new rx params * * @return 0 in case of success. otherwise on failure. */ int al_eth_lm_static_parameters_get(struct al_eth_lm_context *lm_context, struct al_serdes_adv_tx_params *tx_params, struct al_serdes_adv_rx_params *rx_params); /** * convert link management mode to string * * @param val link management mode * * @return string of the mode */ const char *al_eth_lm_mode_convert_to_str(enum al_eth_lm_link_mode val); /** * print all debug messages * * @param lm_context pointer to link management context * @param enable set to true to enable debug mode */ void al_eth_lm_debug_mode_set(struct al_eth_lm_context *lm_context, - boolean_t enable); + bool enable); #endif