diff --git a/sys/dev/igc/if_igc.c b/sys/dev/igc/if_igc.c index c78edab5e38a..006fecdab7b0 100644 --- a/sys/dev/igc/if_igc.c +++ b/sys/dev/igc/if_igc.c @@ -1,2808 +1,2912 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2016 Nicole Graziano * All rights reserved. * Copyright (c) 2021 Rubicon Communications, LLC (Netgate) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include "if_igc.h" #include #include #ifdef RSS #include #include #endif /********************************************************************* * PCI Device ID Table * * Used by probe to select devices to load on * Last entry must be all 0s * * { Vendor ID, Device ID, String } *********************************************************************/ static const pci_vendor_info_t igc_vendor_info_array[] = { /* Intel(R) PRO/1000 Network Connection - igc */ PVID(0x8086, IGC_DEV_ID_I225_LM, "Intel(R) Ethernet Controller I225-LM"), PVID(0x8086, IGC_DEV_ID_I225_V, "Intel(R) Ethernet Controller I225-V"), PVID(0x8086, IGC_DEV_ID_I225_K, "Intel(R) Ethernet Controller I225-K"), PVID(0x8086, IGC_DEV_ID_I225_I, "Intel(R) Ethernet Controller I225-I"), PVID(0x8086, IGC_DEV_ID_I220_V, "Intel(R) Ethernet Controller I220-V"), PVID(0x8086, IGC_DEV_ID_I225_K2, "Intel(R) Ethernet Controller I225-K(2)"), PVID(0x8086, IGC_DEV_ID_I225_LMVP, "Intel(R) Ethernet Controller I225-LMvP(2)"), PVID(0x8086, IGC_DEV_ID_I226_K, "Intel(R) Ethernet Controller I226-K"), PVID(0x8086, IGC_DEV_ID_I226_LMVP, "Intel(R) Ethernet Controller I226-LMvP"), PVID(0x8086, IGC_DEV_ID_I225_IT, "Intel(R) Ethernet Controller I225-IT(2)"), PVID(0x8086, IGC_DEV_ID_I226_LM, "Intel(R) Ethernet Controller I226-LM"), PVID(0x8086, IGC_DEV_ID_I226_V, "Intel(R) Ethernet Controller I226-V"), PVID(0x8086, IGC_DEV_ID_I226_IT, "Intel(R) Ethernet Controller I226-IT"), PVID(0x8086, IGC_DEV_ID_I221_V, "Intel(R) Ethernet Controller I221-V"), PVID(0x8086, IGC_DEV_ID_I226_BLANK_NVM, "Intel(R) Ethernet Controller I226(blankNVM)"), PVID(0x8086, IGC_DEV_ID_I225_BLANK_NVM, "Intel(R) Ethernet Controller I225(blankNVM)"), /* required last entry */ PVID_END }; /********************************************************************* * Function prototypes *********************************************************************/ static void *igc_register(device_t dev); static int igc_if_attach_pre(if_ctx_t ctx); static int igc_if_attach_post(if_ctx_t ctx); static int igc_if_detach(if_ctx_t ctx); static int igc_if_shutdown(if_ctx_t ctx); static int igc_if_suspend(if_ctx_t ctx); static int igc_if_resume(if_ctx_t ctx); static int igc_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets); static int igc_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets); static void igc_if_queues_free(if_ctx_t ctx); static uint64_t igc_if_get_counter(if_ctx_t, ift_counter); static void igc_if_init(if_ctx_t ctx); static void igc_if_stop(if_ctx_t ctx); static void igc_if_media_status(if_ctx_t, struct ifmediareq *); static int igc_if_media_change(if_ctx_t ctx); static int igc_if_mtu_set(if_ctx_t ctx, uint32_t mtu); static void igc_if_timer(if_ctx_t ctx, uint16_t qid); static void igc_if_watchdog_reset(if_ctx_t ctx); static bool igc_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event); static void igc_identify_hardware(if_ctx_t ctx); static int igc_allocate_pci_resources(if_ctx_t ctx); static void igc_free_pci_resources(if_ctx_t ctx); static void igc_reset(if_ctx_t ctx); static int igc_setup_interface(if_ctx_t ctx); static int igc_setup_msix(if_ctx_t ctx); static void igc_initialize_transmit_unit(if_ctx_t ctx); static void igc_initialize_receive_unit(if_ctx_t ctx); static void igc_if_intr_enable(if_ctx_t ctx); static void igc_if_intr_disable(if_ctx_t ctx); static int igc_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid); static int igc_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid); static void igc_if_multi_set(if_ctx_t ctx); static void igc_if_update_admin_status(if_ctx_t ctx); static void igc_if_debug(if_ctx_t ctx); static void igc_update_stats_counters(struct igc_adapter *); static void igc_add_hw_stats(struct igc_adapter *adapter); static int igc_if_set_promisc(if_ctx_t ctx, int flags); static void igc_setup_vlan_hw_support(if_ctx_t ctx); +static void igc_fw_version(struct igc_adapter *); +static void igc_sbuf_fw_version(struct igc_fw_version *, struct sbuf *); +static void igc_print_fw_version(struct igc_adapter *); +static int igc_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS); static int igc_sysctl_nvm_info(SYSCTL_HANDLER_ARGS); static void igc_print_nvm_info(struct igc_adapter *); static int igc_sysctl_debug_info(SYSCTL_HANDLER_ARGS); static int igc_get_rs(SYSCTL_HANDLER_ARGS); static void igc_print_debug_info(struct igc_adapter *); static int igc_is_valid_ether_addr(u8 *); /* Management and WOL Support */ static void igc_get_hw_control(struct igc_adapter *); static void igc_release_hw_control(struct igc_adapter *); static void igc_get_wakeup(if_ctx_t ctx); static void igc_enable_wakeup(if_ctx_t ctx); int igc_intr(void *arg); /* MSI-X handlers */ static int igc_if_msix_intr_assign(if_ctx_t, int); static int igc_msix_link(void *); static void igc_handle_link(void *context); static int igc_set_flowcntl(SYSCTL_HANDLER_ARGS); static int igc_sysctl_eee(SYSCTL_HANDLER_ARGS); static int igc_get_regs(SYSCTL_HANDLER_ARGS); static void igc_configure_queues(struct igc_adapter *adapter); /********************************************************************* * FreeBSD Device Interface Entry Points *********************************************************************/ static device_method_t igc_methods[] = { /* Device interface */ DEVMETHOD(device_register, igc_register), DEVMETHOD(device_probe, iflib_device_probe), DEVMETHOD(device_attach, iflib_device_attach), DEVMETHOD(device_detach, iflib_device_detach), DEVMETHOD(device_shutdown, iflib_device_shutdown), DEVMETHOD(device_suspend, iflib_device_suspend), DEVMETHOD(device_resume, iflib_device_resume), DEVMETHOD_END }; static driver_t igc_driver = { "igc", igc_methods, sizeof(struct igc_adapter), }; DRIVER_MODULE(igc, pci, igc_driver, 0, 0); MODULE_DEPEND(igc, pci, 1, 1, 1); MODULE_DEPEND(igc, ether, 1, 1, 1); MODULE_DEPEND(igc, iflib, 1, 1, 1); IFLIB_PNP_INFO(pci, igc, igc_vendor_info_array); static device_method_t igc_if_methods[] = { DEVMETHOD(ifdi_attach_pre, igc_if_attach_pre), DEVMETHOD(ifdi_attach_post, igc_if_attach_post), DEVMETHOD(ifdi_detach, igc_if_detach), DEVMETHOD(ifdi_shutdown, igc_if_shutdown), DEVMETHOD(ifdi_suspend, igc_if_suspend), DEVMETHOD(ifdi_resume, igc_if_resume), DEVMETHOD(ifdi_init, igc_if_init), DEVMETHOD(ifdi_stop, igc_if_stop), DEVMETHOD(ifdi_msix_intr_assign, igc_if_msix_intr_assign), DEVMETHOD(ifdi_intr_enable, igc_if_intr_enable), DEVMETHOD(ifdi_intr_disable, igc_if_intr_disable), DEVMETHOD(ifdi_tx_queues_alloc, igc_if_tx_queues_alloc), DEVMETHOD(ifdi_rx_queues_alloc, igc_if_rx_queues_alloc), DEVMETHOD(ifdi_queues_free, igc_if_queues_free), DEVMETHOD(ifdi_update_admin_status, igc_if_update_admin_status), DEVMETHOD(ifdi_multi_set, igc_if_multi_set), DEVMETHOD(ifdi_media_status, igc_if_media_status), DEVMETHOD(ifdi_media_change, igc_if_media_change), DEVMETHOD(ifdi_mtu_set, igc_if_mtu_set), DEVMETHOD(ifdi_promisc_set, igc_if_set_promisc), DEVMETHOD(ifdi_timer, igc_if_timer), DEVMETHOD(ifdi_watchdog_reset, igc_if_watchdog_reset), DEVMETHOD(ifdi_get_counter, igc_if_get_counter), DEVMETHOD(ifdi_rx_queue_intr_enable, igc_if_rx_queue_intr_enable), DEVMETHOD(ifdi_tx_queue_intr_enable, igc_if_tx_queue_intr_enable), DEVMETHOD(ifdi_debug, igc_if_debug), DEVMETHOD(ifdi_needs_restart, igc_if_needs_restart), DEVMETHOD_END }; static driver_t igc_if_driver = { "igc_if", igc_if_methods, sizeof(struct igc_adapter) }; /********************************************************************* * Tunable default values. *********************************************************************/ /* Allow common code without TSO */ #ifndef CSUM_TSO #define CSUM_TSO 0 #endif static SYSCTL_NODE(_hw, OID_AUTO, igc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "igc driver parameters"); static int igc_disable_crc_stripping = 0; SYSCTL_INT(_hw_igc, OID_AUTO, disable_crc_stripping, CTLFLAG_RDTUN, &igc_disable_crc_stripping, 0, "Disable CRC Stripping"); static int igc_smart_pwr_down = false; SYSCTL_INT(_hw_igc, OID_AUTO, smart_pwr_down, CTLFLAG_RDTUN, &igc_smart_pwr_down, 0, "Set to true to leave smart power down enabled on newer adapters"); /* Controls whether promiscuous also shows bad packets */ static int igc_debug_sbp = true; SYSCTL_INT(_hw_igc, OID_AUTO, sbp, CTLFLAG_RDTUN, &igc_debug_sbp, 0, "Show bad packets in promiscuous mode"); /* Energy efficient ethernet - default to OFF */ static int igc_eee_setting = 1; SYSCTL_INT(_hw_igc, OID_AUTO, eee_setting, CTLFLAG_RDTUN, &igc_eee_setting, 0, "Enable Energy Efficient Ethernet"); /* ** Tuneable Interrupt rate */ static int igc_max_interrupt_rate = 20000; SYSCTL_INT(_hw_igc, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, &igc_max_interrupt_rate, 0, "Maximum interrupts per second"); extern struct if_txrx igc_txrx; static struct if_shared_ctx igc_sctx_init = { .isc_magic = IFLIB_MAGIC, .isc_q_align = PAGE_SIZE, .isc_tx_maxsize = IGC_TSO_SIZE + sizeof(struct ether_vlan_header), .isc_tx_maxsegsize = PAGE_SIZE, .isc_tso_maxsize = IGC_TSO_SIZE + sizeof(struct ether_vlan_header), .isc_tso_maxsegsize = IGC_TSO_SEG_SIZE, .isc_rx_maxsize = MAX_JUMBO_FRAME_SIZE, .isc_rx_nsegments = 1, .isc_rx_maxsegsize = MJUM9BYTES, .isc_nfl = 1, .isc_nrxqs = 1, .isc_ntxqs = 1, .isc_admin_intrcnt = 1, .isc_vendor_info = igc_vendor_info_array, .isc_driver_version = "1", .isc_driver = &igc_if_driver, .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_TSO_INIT_IP | IFLIB_NEED_ZERO_CSUM, .isc_nrxd_min = {IGC_MIN_RXD}, .isc_ntxd_min = {IGC_MIN_TXD}, .isc_nrxd_max = {IGC_MAX_RXD}, .isc_ntxd_max = {IGC_MAX_TXD}, .isc_nrxd_default = {IGC_DEFAULT_RXD}, .isc_ntxd_default = {IGC_DEFAULT_TXD}, }; /***************************************************************** * * Dump Registers * ****************************************************************/ #define IGC_REGS_LEN 739 static int igc_get_regs(SYSCTL_HANDLER_ARGS) { struct igc_adapter *adapter = (struct igc_adapter *)arg1; struct igc_hw *hw = &adapter->hw; struct sbuf *sb; u32 *regs_buff; int rc; regs_buff = malloc(sizeof(u32) * IGC_REGS_LEN, M_DEVBUF, M_WAITOK); memset(regs_buff, 0, IGC_REGS_LEN * sizeof(u32)); rc = sysctl_wire_old_buffer(req, 0); MPASS(rc == 0); if (rc != 0) { free(regs_buff, M_DEVBUF); return (rc); } sb = sbuf_new_for_sysctl(NULL, NULL, 32*400, req); MPASS(sb != NULL); if (sb == NULL) { free(regs_buff, M_DEVBUF); return (ENOMEM); } /* General Registers */ regs_buff[0] = IGC_READ_REG(hw, IGC_CTRL); regs_buff[1] = IGC_READ_REG(hw, IGC_STATUS); regs_buff[2] = IGC_READ_REG(hw, IGC_CTRL_EXT); regs_buff[3] = IGC_READ_REG(hw, IGC_ICR); regs_buff[4] = IGC_READ_REG(hw, IGC_RCTL); regs_buff[5] = IGC_READ_REG(hw, IGC_RDLEN(0)); regs_buff[6] = IGC_READ_REG(hw, IGC_RDH(0)); regs_buff[7] = IGC_READ_REG(hw, IGC_RDT(0)); regs_buff[8] = IGC_READ_REG(hw, IGC_RXDCTL(0)); regs_buff[9] = IGC_READ_REG(hw, IGC_RDBAL(0)); regs_buff[10] = IGC_READ_REG(hw, IGC_RDBAH(0)); regs_buff[11] = IGC_READ_REG(hw, IGC_TCTL); regs_buff[12] = IGC_READ_REG(hw, IGC_TDBAL(0)); regs_buff[13] = IGC_READ_REG(hw, IGC_TDBAH(0)); regs_buff[14] = IGC_READ_REG(hw, IGC_TDLEN(0)); regs_buff[15] = IGC_READ_REG(hw, IGC_TDH(0)); regs_buff[16] = IGC_READ_REG(hw, IGC_TDT(0)); regs_buff[17] = IGC_READ_REG(hw, IGC_TXDCTL(0)); sbuf_printf(sb, "General Registers\n"); sbuf_printf(sb, "\tCTRL\t %08x\n", regs_buff[0]); sbuf_printf(sb, "\tSTATUS\t %08x\n", regs_buff[1]); sbuf_printf(sb, "\tCTRL_EXIT\t %08x\n\n", regs_buff[2]); sbuf_printf(sb, "Interrupt Registers\n"); sbuf_printf(sb, "\tICR\t %08x\n\n", regs_buff[3]); sbuf_printf(sb, "RX Registers\n"); sbuf_printf(sb, "\tRCTL\t %08x\n", regs_buff[4]); sbuf_printf(sb, "\tRDLEN\t %08x\n", regs_buff[5]); sbuf_printf(sb, "\tRDH\t %08x\n", regs_buff[6]); sbuf_printf(sb, "\tRDT\t %08x\n", regs_buff[7]); sbuf_printf(sb, "\tRXDCTL\t %08x\n", regs_buff[8]); sbuf_printf(sb, "\tRDBAL\t %08x\n", regs_buff[9]); sbuf_printf(sb, "\tRDBAH\t %08x\n\n", regs_buff[10]); sbuf_printf(sb, "TX Registers\n"); sbuf_printf(sb, "\tTCTL\t %08x\n", regs_buff[11]); sbuf_printf(sb, "\tTDBAL\t %08x\n", regs_buff[12]); sbuf_printf(sb, "\tTDBAH\t %08x\n", regs_buff[13]); sbuf_printf(sb, "\tTDLEN\t %08x\n", regs_buff[14]); sbuf_printf(sb, "\tTDH\t %08x\n", regs_buff[15]); sbuf_printf(sb, "\tTDT\t %08x\n", regs_buff[16]); sbuf_printf(sb, "\tTXDCTL\t %08x\n", regs_buff[17]); sbuf_printf(sb, "\tTDFH\t %08x\n", regs_buff[18]); sbuf_printf(sb, "\tTDFT\t %08x\n", regs_buff[19]); sbuf_printf(sb, "\tTDFHS\t %08x\n", regs_buff[20]); sbuf_printf(sb, "\tTDFPC\t %08x\n\n", regs_buff[21]); free(regs_buff, M_DEVBUF); #ifdef DUMP_DESCS { if_softc_ctx_t scctx = adapter->shared; struct rx_ring *rxr = &rx_que->rxr; struct tx_ring *txr = &tx_que->txr; int ntxd = scctx->isc_ntxd[0]; int nrxd = scctx->isc_nrxd[0]; int j; for (j = 0; j < nrxd; j++) { u32 staterr = le32toh(rxr->rx_base[j].wb.upper.status_error); u32 length = le32toh(rxr->rx_base[j].wb.upper.length); sbuf_printf(sb, "\tReceive Descriptor Address %d: %08" PRIx64 " Error:%d Length:%d\n", j, rxr->rx_base[j].read.buffer_addr, staterr, length); } for (j = 0; j < min(ntxd, 256); j++) { unsigned int *ptr = (unsigned int *)&txr->tx_base[j]; sbuf_printf(sb, "\tTXD[%03d] [0]: %08x [1]: %08x [2]: %08x [3]: %08x eop: %d DD=%d\n", j, ptr[0], ptr[1], ptr[2], ptr[3], buf->eop, buf->eop != -1 ? txr->tx_base[buf->eop].upper.fields.status & IGC_TXD_STAT_DD : 0); } } #endif rc = sbuf_finish(sb); sbuf_delete(sb); return(rc); } static void * igc_register(device_t dev) { return (&igc_sctx_init); } static int igc_set_num_queues(if_ctx_t ctx) { int maxqueues; maxqueues = 4; return (maxqueues); } #define IGC_CAPS \ IFCAP_HWCSUM | IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | \ IFCAP_VLAN_HWCSUM | IFCAP_WOL | IFCAP_TSO4 | IFCAP_LRO | \ IFCAP_VLAN_HWTSO | IFCAP_JUMBO_MTU | IFCAP_HWCSUM_IPV6 | IFCAP_TSO6 /********************************************************************* * Device initialization routine * * The attach entry point is called when the driver is being loaded. * This routine identifies the type of hardware, allocates all resources * and initializes the hardware. * * return 0 on success, positive on failure *********************************************************************/ static int igc_if_attach_pre(if_ctx_t ctx) { struct igc_adapter *adapter; if_softc_ctx_t scctx; device_t dev; struct igc_hw *hw; int error = 0; INIT_DEBUGOUT("igc_if_attach_pre: begin"); dev = iflib_get_dev(ctx); adapter = iflib_get_softc(ctx); adapter->ctx = adapter->osdep.ctx = ctx; adapter->dev = adapter->osdep.dev = dev; scctx = adapter->shared = iflib_get_softc_ctx(ctx); adapter->media = iflib_get_media(ctx); hw = &adapter->hw; /* SYSCTL stuff */ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "nvm", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, adapter, 0, igc_sysctl_nvm_info, "I", "NVM Information"); + SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), + SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), + OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD, + adapter, 0, igc_sysctl_print_fw_version, "A", + "Prints FW/NVM Versions"); + SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, adapter, 0, igc_sysctl_debug_info, "I", "Debug Information"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, adapter, 0, igc_set_flowcntl, "I", "Flow Control"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "reg_dump", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, 0, igc_get_regs, "A", "Dump Registers"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "rs_dump", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, adapter, 0, igc_get_rs, "I", "Dump RS indexes"); /* Determine hardware and mac info */ igc_identify_hardware(ctx); scctx->isc_tx_nsegments = IGC_MAX_SCATTER; scctx->isc_nrxqsets_max = scctx->isc_ntxqsets_max = igc_set_num_queues(ctx); if (bootverbose) device_printf(dev, "attach_pre capping queues at %d\n", scctx->isc_ntxqsets_max); scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] * sizeof(union igc_adv_tx_desc), IGC_DBA_ALIGN); scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] * sizeof(union igc_adv_rx_desc), IGC_DBA_ALIGN); scctx->isc_txd_size[0] = sizeof(union igc_adv_tx_desc); scctx->isc_rxd_size[0] = sizeof(union igc_adv_rx_desc); scctx->isc_txrx = &igc_txrx; scctx->isc_tx_tso_segments_max = IGC_MAX_SCATTER; scctx->isc_tx_tso_size_max = IGC_TSO_SIZE; scctx->isc_tx_tso_segsize_max = IGC_TSO_SEG_SIZE; scctx->isc_capabilities = scctx->isc_capenable = IGC_CAPS; scctx->isc_tx_csum_flags = CSUM_TCP | CSUM_UDP | CSUM_TSO | CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_SCTP | CSUM_IP6_SCTP; /* ** Some new devices, as with ixgbe, now may ** use a different BAR, so we need to keep ** track of which is used. */ scctx->isc_msix_bar = PCIR_BAR(IGC_MSIX_BAR); if (pci_read_config(dev, scctx->isc_msix_bar, 4) == 0) scctx->isc_msix_bar += 4; /* Setup PCI resources */ if (igc_allocate_pci_resources(ctx)) { device_printf(dev, "Allocation of PCI resources failed\n"); error = ENXIO; goto err_pci; } /* Do Shared Code initialization */ error = igc_setup_init_funcs(hw, true); if (error) { device_printf(dev, "Setup of Shared code failed, error %d\n", error); error = ENXIO; goto err_pci; } igc_setup_msix(ctx); igc_get_bus_info(hw); hw->mac.autoneg = DO_AUTO_NEG; hw->phy.autoneg_wait_to_complete = false; hw->phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; /* Copper options */ if (hw->phy.media_type == igc_media_type_copper) { hw->phy.mdix = AUTO_ALL_MODES; } /* * Set the frame limits assuming * standard ethernet sized frames. */ scctx->isc_max_frame_size = adapter->hw.mac.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE; /* Allocate multicast array memory. */ adapter->mta = malloc(sizeof(u8) * ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT); if (adapter->mta == NULL) { device_printf(dev, "Can not allocate multicast setup array\n"); error = ENOMEM; goto err_late; } /* Check SOL/IDER usage */ if (igc_check_reset_block(hw)) device_printf(dev, "PHY reset is blocked" " due to SOL/IDER session.\n"); /* Sysctl for setting Energy Efficient Ethernet */ adapter->hw.dev_spec._i225.eee_disable = igc_eee_setting; SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "eee_control", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, adapter, 0, igc_sysctl_eee, "I", "Disable Energy Efficient Ethernet"); /* ** Start from a known state, this is ** important in reading the nvm and ** mac from that. */ igc_reset_hw(hw); /* Make sure we have a good EEPROM before we read from it */ if (igc_validate_nvm_checksum(hw) < 0) { /* ** Some PCI-E parts fail the first check due to ** the link being in sleep state, call it again, ** if it fails a second time its a real issue. */ if (igc_validate_nvm_checksum(hw) < 0) { device_printf(dev, "The EEPROM Checksum Is Not Valid\n"); error = EIO; goto err_late; } } /* Copy the permanent MAC address out of the EEPROM */ if (igc_read_mac_addr(hw) < 0) { device_printf(dev, "EEPROM read error while reading MAC" " address\n"); error = EIO; goto err_late; } if (!igc_is_valid_ether_addr(hw->mac.addr)) { device_printf(dev, "Invalid MAC address\n"); error = EIO; goto err_late; } + /* Save the EEPROM/NVM versions */ + igc_fw_version(adapter); + + igc_print_fw_version(adapter); + /* * Get Wake-on-Lan and Management info for later use */ igc_get_wakeup(ctx); /* Enable only WOL MAGIC by default */ scctx->isc_capenable &= ~IFCAP_WOL; if (adapter->wol != 0) scctx->isc_capenable |= IFCAP_WOL_MAGIC; iflib_set_mac(ctx, hw->mac.addr); return (0); err_late: igc_release_hw_control(adapter); err_pci: igc_free_pci_resources(ctx); free(adapter->mta, M_DEVBUF); return (error); } static int igc_if_attach_post(if_ctx_t ctx) { struct igc_adapter *adapter = iflib_get_softc(ctx); struct igc_hw *hw = &adapter->hw; int error = 0; /* Setup OS specific network interface */ error = igc_setup_interface(ctx); if (error != 0) { goto err_late; } igc_reset(ctx); /* Initialize statistics */ igc_update_stats_counters(adapter); hw->mac.get_link_status = true; igc_if_update_admin_status(ctx); igc_add_hw_stats(adapter); /* the driver can now take control from firmware */ igc_get_hw_control(adapter); INIT_DEBUGOUT("igc_if_attach_post: end"); return (error); err_late: igc_release_hw_control(adapter); igc_free_pci_resources(ctx); igc_if_queues_free(ctx); free(adapter->mta, M_DEVBUF); return (error); } /********************************************************************* * Device removal routine * * The detach entry point is called when the driver is being removed. * This routine stops the adapter and deallocates all the resources * that were allocated for driver operation. * * return 0 on success, positive on failure *********************************************************************/ static int igc_if_detach(if_ctx_t ctx) { struct igc_adapter *adapter = iflib_get_softc(ctx); INIT_DEBUGOUT("igc_if_detach: begin"); igc_phy_hw_reset(&adapter->hw); igc_release_hw_control(adapter); igc_free_pci_resources(ctx); return (0); } /********************************************************************* * * Shutdown entry point * **********************************************************************/ static int igc_if_shutdown(if_ctx_t ctx) { return igc_if_suspend(ctx); } /* * Suspend/resume device methods. */ static int igc_if_suspend(if_ctx_t ctx) { struct igc_adapter *adapter = iflib_get_softc(ctx); igc_release_hw_control(adapter); igc_enable_wakeup(ctx); return (0); } static int igc_if_resume(if_ctx_t ctx) { igc_if_init(ctx); return(0); } static int igc_if_mtu_set(if_ctx_t ctx, uint32_t mtu) { int max_frame_size; struct igc_adapter *adapter = iflib_get_softc(ctx); if_softc_ctx_t scctx = iflib_get_softc_ctx(ctx); IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)"); /* 9K Jumbo Frame size */ max_frame_size = 9234; if (mtu > max_frame_size - ETHER_HDR_LEN - ETHER_CRC_LEN) { return (EINVAL); } scctx->isc_max_frame_size = adapter->hw.mac.max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; return (0); } /********************************************************************* * Init entry point * * This routine is used in two ways. It is used by the stack as * init entry point in network interface structure. It is also used * by the driver as a hw/sw initialization routine to get to a * consistent state. * **********************************************************************/ static void igc_if_init(if_ctx_t ctx) { struct igc_adapter *adapter = iflib_get_softc(ctx); if_softc_ctx_t scctx = adapter->shared; if_t ifp = iflib_get_ifp(ctx); struct igc_tx_queue *tx_que; int i; INIT_DEBUGOUT("igc_if_init: begin"); /* Get the latest mac address, User can use a LAA */ bcopy(if_getlladdr(ifp), adapter->hw.mac.addr, ETHER_ADDR_LEN); /* Put the address into the Receive Address Array */ igc_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); /* Initialize the hardware */ igc_reset(ctx); igc_if_update_admin_status(ctx); for (i = 0, tx_que = adapter->tx_queues; i < adapter->tx_num_queues; i++, tx_que++) { struct tx_ring *txr = &tx_que->txr; txr->tx_rs_cidx = txr->tx_rs_pidx; /* Initialize the last processed descriptor to be the end of * the ring, rather than the start, so that we avoid an * off-by-one error when calculating how many descriptors are * done in the credits_update function. */ txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1; } /* Setup VLAN support, basic and offload if available */ IGC_WRITE_REG(&adapter->hw, IGC_VET, ETHERTYPE_VLAN); /* Prepare transmit descriptors and buffers */ igc_initialize_transmit_unit(ctx); /* Setup Multicast table */ igc_if_multi_set(ctx); adapter->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx); igc_initialize_receive_unit(ctx); /* Set up VLAN support */ igc_setup_vlan_hw_support(ctx); /* Don't lose promiscuous settings */ igc_if_set_promisc(ctx, if_getflags(ifp)); igc_clear_hw_cntrs_base_generic(&adapter->hw); if (adapter->intr_type == IFLIB_INTR_MSIX) /* Set up queue routing */ igc_configure_queues(adapter); /* this clears any pending interrupts */ IGC_READ_REG(&adapter->hw, IGC_ICR); IGC_WRITE_REG(&adapter->hw, IGC_ICS, IGC_ICS_LSC); /* the driver can now take control from firmware */ igc_get_hw_control(adapter); /* Set Energy Efficient Ethernet */ igc_set_eee_i225(&adapter->hw, true, true, true); } /********************************************************************* * * Fast Legacy/MSI Combined Interrupt Service routine * *********************************************************************/ int igc_intr(void *arg) { struct igc_adapter *adapter = arg; if_ctx_t ctx = adapter->ctx; u32 reg_icr; reg_icr = IGC_READ_REG(&adapter->hw, IGC_ICR); /* Hot eject? */ if (reg_icr == 0xffffffff) return FILTER_STRAY; /* Definitely not our interrupt. */ if (reg_icr == 0x0) return FILTER_STRAY; if ((reg_icr & IGC_ICR_INT_ASSERTED) == 0) return FILTER_STRAY; /* * Only MSI-X interrupts have one-shot behavior by taking advantage * of the EIAC register. Thus, explicitly disable interrupts. This * also works around the MSI message reordering errata on certain * systems. */ IFDI_INTR_DISABLE(ctx); /* Link status change */ if (reg_icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) igc_handle_link(ctx); if (reg_icr & IGC_ICR_RXO) adapter->rx_overruns++; return (FILTER_SCHEDULE_THREAD); } static int igc_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) { struct igc_adapter *adapter = iflib_get_softc(ctx); struct igc_rx_queue *rxq = &adapter->rx_queues[rxqid]; IGC_WRITE_REG(&adapter->hw, IGC_EIMS, rxq->eims); return (0); } static int igc_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid) { struct igc_adapter *adapter = iflib_get_softc(ctx); struct igc_tx_queue *txq = &adapter->tx_queues[txqid]; IGC_WRITE_REG(&adapter->hw, IGC_EIMS, txq->eims); return (0); } /********************************************************************* * * MSI-X RX Interrupt Service routine * **********************************************************************/ static int igc_msix_que(void *arg) { struct igc_rx_queue *que = arg; ++que->irqs; return (FILTER_SCHEDULE_THREAD); } /********************************************************************* * * MSI-X Link Fast Interrupt Service routine * **********************************************************************/ static int igc_msix_link(void *arg) { struct igc_adapter *adapter = arg; u32 reg_icr; ++adapter->link_irq; MPASS(adapter->hw.back != NULL); reg_icr = IGC_READ_REG(&adapter->hw, IGC_ICR); if (reg_icr & IGC_ICR_RXO) adapter->rx_overruns++; if (reg_icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) { igc_handle_link(adapter->ctx); } IGC_WRITE_REG(&adapter->hw, IGC_IMS, IGC_IMS_LSC); IGC_WRITE_REG(&adapter->hw, IGC_EIMS, adapter->link_mask); return (FILTER_HANDLED); } static void igc_handle_link(void *context) { if_ctx_t ctx = context; struct igc_adapter *adapter = iflib_get_softc(ctx); adapter->hw.mac.get_link_status = true; iflib_admin_intr_deferred(ctx); } /********************************************************************* * * Media Ioctl callback * * This routine is called whenever the user queries the status of * the interface using ifconfig. * **********************************************************************/ static void igc_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr) { struct igc_adapter *adapter = iflib_get_softc(ctx); INIT_DEBUGOUT("igc_if_media_status: begin"); iflib_admin_intr_deferred(ctx); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (!adapter->link_active) { return; } ifmr->ifm_status |= IFM_ACTIVE; switch (adapter->link_speed) { case 10: ifmr->ifm_active |= IFM_10_T; break; case 100: ifmr->ifm_active |= IFM_100_TX; break; case 1000: ifmr->ifm_active |= IFM_1000_T; break; case 2500: ifmr->ifm_active |= IFM_2500_T; break; } if (adapter->link_duplex == FULL_DUPLEX) ifmr->ifm_active |= IFM_FDX; else ifmr->ifm_active |= IFM_HDX; } /********************************************************************* * * Media Ioctl callback * * This routine is called when the user changes speed/duplex using * media/mediopt option with ifconfig. * **********************************************************************/ static int igc_if_media_change(if_ctx_t ctx) { struct igc_adapter *adapter = iflib_get_softc(ctx); struct ifmedia *ifm = iflib_get_media(ctx); INIT_DEBUGOUT("igc_if_media_change: begin"); if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); adapter->hw.mac.autoneg = DO_AUTO_NEG; switch (IFM_SUBTYPE(ifm->ifm_media)) { case IFM_AUTO: adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; break; case IFM_2500_T: adapter->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL; break; case IFM_1000_T: adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; break; case IFM_100_TX: if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) adapter->hw.phy.autoneg_advertised = ADVERTISE_100_FULL; else adapter->hw.phy.autoneg_advertised = ADVERTISE_100_HALF; break; case IFM_10_T: if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) adapter->hw.phy.autoneg_advertised = ADVERTISE_10_FULL; else adapter->hw.phy.autoneg_advertised = ADVERTISE_10_HALF; break; default: device_printf(adapter->dev, "Unsupported media type\n"); } igc_if_init(ctx); return (0); } static int igc_if_set_promisc(if_ctx_t ctx, int flags) { struct igc_adapter *adapter = iflib_get_softc(ctx); if_t ifp = iflib_get_ifp(ctx); u32 reg_rctl; int mcnt = 0; reg_rctl = IGC_READ_REG(&adapter->hw, IGC_RCTL); reg_rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_UPE); if (flags & IFF_ALLMULTI) mcnt = MAX_NUM_MULTICAST_ADDRESSES; else mcnt = min(if_llmaddr_count(ifp), MAX_NUM_MULTICAST_ADDRESSES); /* Don't disable if in MAX groups */ if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) reg_rctl &= (~IGC_RCTL_MPE); IGC_WRITE_REG(&adapter->hw, IGC_RCTL, reg_rctl); if (flags & IFF_PROMISC) { reg_rctl |= (IGC_RCTL_UPE | IGC_RCTL_MPE); /* Turn this on if you want to see bad packets */ if (igc_debug_sbp) reg_rctl |= IGC_RCTL_SBP; IGC_WRITE_REG(&adapter->hw, IGC_RCTL, reg_rctl); } else if (flags & IFF_ALLMULTI) { reg_rctl |= IGC_RCTL_MPE; reg_rctl &= ~IGC_RCTL_UPE; IGC_WRITE_REG(&adapter->hw, IGC_RCTL, reg_rctl); } return (0); } static u_int igc_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int idx) { u8 *mta = arg; if (idx == MAX_NUM_MULTICAST_ADDRESSES) return (0); bcopy(LLADDR(sdl), &mta[idx * ETHER_ADDR_LEN], ETHER_ADDR_LEN); return (1); } /********************************************************************* * Multicast Update * * This routine is called whenever multicast address list is updated. * **********************************************************************/ static void igc_if_multi_set(if_ctx_t ctx) { struct igc_adapter *adapter = iflib_get_softc(ctx); if_t ifp = iflib_get_ifp(ctx); u8 *mta; /* Multicast array memory */ u32 reg_rctl = 0; int mcnt = 0; IOCTL_DEBUGOUT("igc_set_multi: begin"); mta = adapter->mta; bzero(mta, sizeof(u8) * ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES); mcnt = if_foreach_llmaddr(ifp, igc_copy_maddr, mta); reg_rctl = IGC_READ_REG(&adapter->hw, IGC_RCTL); if (if_getflags(ifp) & IFF_PROMISC) { reg_rctl |= (IGC_RCTL_UPE | IGC_RCTL_MPE); /* Turn this on if you want to see bad packets */ if (igc_debug_sbp) reg_rctl |= IGC_RCTL_SBP; } else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES || if_getflags(ifp) & IFF_ALLMULTI) { reg_rctl |= IGC_RCTL_MPE; reg_rctl &= ~IGC_RCTL_UPE; } else reg_rctl &= ~(IGC_RCTL_UPE | IGC_RCTL_MPE); if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) igc_update_mc_addr_list(&adapter->hw, mta, mcnt); IGC_WRITE_REG(&adapter->hw, IGC_RCTL, reg_rctl); } /********************************************************************* * Timer routine * * This routine schedules igc_if_update_admin_status() to check for * link status and to gather statistics as well as to perform some * controller-specific hardware patting. * **********************************************************************/ static void igc_if_timer(if_ctx_t ctx, uint16_t qid) { if (qid != 0) return; iflib_admin_intr_deferred(ctx); } static void igc_if_update_admin_status(if_ctx_t ctx) { struct igc_adapter *adapter = iflib_get_softc(ctx); struct igc_hw *hw = &adapter->hw; device_t dev = iflib_get_dev(ctx); u32 link_check, thstat, ctrl; link_check = thstat = ctrl = 0; /* Get the cached link value or read phy for real */ switch (hw->phy.media_type) { case igc_media_type_copper: if (hw->mac.get_link_status == true) { /* Do the work to read phy */ igc_check_for_link(hw); link_check = !hw->mac.get_link_status; } else link_check = true; break; case igc_media_type_unknown: igc_check_for_link(hw); link_check = !hw->mac.get_link_status; /* FALLTHROUGH */ default: break; } /* Now check for a transition */ if (link_check && (adapter->link_active == 0)) { igc_get_speed_and_duplex(hw, &adapter->link_speed, &adapter->link_duplex); if (bootverbose) device_printf(dev, "Link is up %d Mbps %s\n", adapter->link_speed, ((adapter->link_duplex == FULL_DUPLEX) ? "Full Duplex" : "Half Duplex")); adapter->link_active = 1; iflib_link_state_change(ctx, LINK_STATE_UP, IF_Mbps(adapter->link_speed)); } else if (!link_check && (adapter->link_active == 1)) { adapter->link_speed = 0; adapter->link_duplex = 0; adapter->link_active = 0; iflib_link_state_change(ctx, LINK_STATE_DOWN, 0); } igc_update_stats_counters(adapter); } static void igc_if_watchdog_reset(if_ctx_t ctx) { struct igc_adapter *adapter = iflib_get_softc(ctx); /* * Just count the event; iflib(4) will already trigger a * sufficient reset of the controller. */ adapter->watchdog_events++; } /********************************************************************* * * This routine disables all traffic on the adapter by issuing a * global reset on the MAC. * **********************************************************************/ static void igc_if_stop(if_ctx_t ctx) { struct igc_adapter *adapter = iflib_get_softc(ctx); INIT_DEBUGOUT("igc_if_stop: begin"); igc_reset_hw(&adapter->hw); IGC_WRITE_REG(&adapter->hw, IGC_WUC, 0); } /********************************************************************* * * Determine hardware revision. * **********************************************************************/ static void igc_identify_hardware(if_ctx_t ctx) { device_t dev = iflib_get_dev(ctx); struct igc_adapter *adapter = iflib_get_softc(ctx); /* Make sure our PCI config space has the necessary stuff set */ adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); /* Save off the information about this board */ adapter->hw.vendor_id = pci_get_vendor(dev); adapter->hw.device_id = pci_get_device(dev); adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2); adapter->hw.subsystem_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); /* Do Shared Code Init and Setup */ if (igc_set_mac_type(&adapter->hw)) { device_printf(dev, "Setup init failure\n"); return; } } static int igc_allocate_pci_resources(if_ctx_t ctx) { struct igc_adapter *adapter = iflib_get_softc(ctx); device_t dev = iflib_get_dev(ctx); int rid; rid = PCIR_BAR(0); adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (adapter->memory == NULL) { device_printf(dev, "Unable to allocate bus resource: memory\n"); return (ENXIO); } adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->memory); adapter->osdep.mem_bus_space_handle = rman_get_bushandle(adapter->memory); adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle; adapter->hw.back = &adapter->osdep; return (0); } /********************************************************************* * * Set up the MSI-X Interrupt handlers * **********************************************************************/ static int igc_if_msix_intr_assign(if_ctx_t ctx, int msix) { struct igc_adapter *adapter = iflib_get_softc(ctx); struct igc_rx_queue *rx_que = adapter->rx_queues; struct igc_tx_queue *tx_que = adapter->tx_queues; int error, rid, i, vector = 0, rx_vectors; char buf[16]; /* First set up ring resources */ for (i = 0; i < adapter->rx_num_queues; i++, rx_que++, vector++) { rid = vector + 1; snprintf(buf, sizeof(buf), "rxq%d", i); error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, IFLIB_INTR_RXTX, igc_msix_que, rx_que, rx_que->me, buf); if (error) { device_printf(iflib_get_dev(ctx), "Failed to allocate que int %d err: %d", i, error); adapter->rx_num_queues = i + 1; goto fail; } rx_que->msix = vector; /* * Set the bit to enable interrupt * in IGC_IMS -- bits 20 and 21 * are for RX0 and RX1, note this has * NOTHING to do with the MSI-X vector */ rx_que->eims = 1 << vector; } rx_vectors = vector; vector = 0; for (i = 0; i < adapter->tx_num_queues; i++, tx_que++, vector++) { snprintf(buf, sizeof(buf), "txq%d", i); tx_que = &adapter->tx_queues[i]; iflib_softirq_alloc_generic(ctx, &adapter->rx_queues[i % adapter->rx_num_queues].que_irq, IFLIB_INTR_TX, tx_que, tx_que->me, buf); tx_que->msix = (vector % adapter->rx_num_queues); /* * Set the bit to enable interrupt * in IGC_IMS -- bits 22 and 23 * are for TX0 and TX1, note this has * NOTHING to do with the MSI-X vector */ tx_que->eims = 1 << i; } /* Link interrupt */ rid = rx_vectors + 1; error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid, IFLIB_INTR_ADMIN, igc_msix_link, adapter, 0, "aq"); if (error) { device_printf(iflib_get_dev(ctx), "Failed to register admin handler"); goto fail; } adapter->linkvec = rx_vectors; return (0); fail: iflib_irq_free(ctx, &adapter->irq); rx_que = adapter->rx_queues; for (int i = 0; i < adapter->rx_num_queues; i++, rx_que++) iflib_irq_free(ctx, &rx_que->que_irq); return (error); } static void igc_configure_queues(struct igc_adapter *adapter) { struct igc_hw *hw = &adapter->hw; struct igc_rx_queue *rx_que; struct igc_tx_queue *tx_que; u32 ivar = 0, newitr = 0; /* First turn on RSS capability */ IGC_WRITE_REG(hw, IGC_GPIE, IGC_GPIE_MSIX_MODE | IGC_GPIE_EIAME | IGC_GPIE_PBA | IGC_GPIE_NSICR); /* Turn on MSI-X */ /* RX entries */ for (int i = 0; i < adapter->rx_num_queues; i++) { u32 index = i >> 1; ivar = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, index); rx_que = &adapter->rx_queues[i]; if (i & 1) { ivar &= 0xFF00FFFF; ivar |= (rx_que->msix | IGC_IVAR_VALID) << 16; } else { ivar &= 0xFFFFFF00; ivar |= rx_que->msix | IGC_IVAR_VALID; } IGC_WRITE_REG_ARRAY(hw, IGC_IVAR0, index, ivar); } /* TX entries */ for (int i = 0; i < adapter->tx_num_queues; i++) { u32 index = i >> 1; ivar = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, index); tx_que = &adapter->tx_queues[i]; if (i & 1) { ivar &= 0x00FFFFFF; ivar |= (tx_que->msix | IGC_IVAR_VALID) << 24; } else { ivar &= 0xFFFF00FF; ivar |= (tx_que->msix | IGC_IVAR_VALID) << 8; } IGC_WRITE_REG_ARRAY(hw, IGC_IVAR0, index, ivar); adapter->que_mask |= tx_que->eims; } /* And for the link interrupt */ ivar = (adapter->linkvec | IGC_IVAR_VALID) << 8; adapter->link_mask = 1 << adapter->linkvec; IGC_WRITE_REG(hw, IGC_IVAR_MISC, ivar); /* Set the starting interrupt rate */ if (igc_max_interrupt_rate > 0) newitr = (4000000 / igc_max_interrupt_rate) & 0x7FFC; newitr |= IGC_EITR_CNT_IGNR; for (int i = 0; i < adapter->rx_num_queues; i++) { rx_que = &adapter->rx_queues[i]; IGC_WRITE_REG(hw, IGC_EITR(rx_que->msix), newitr); } return; } static void igc_free_pci_resources(if_ctx_t ctx) { struct igc_adapter *adapter = iflib_get_softc(ctx); struct igc_rx_queue *que = adapter->rx_queues; device_t dev = iflib_get_dev(ctx); /* Release all MSI-X queue resources */ if (adapter->intr_type == IFLIB_INTR_MSIX) iflib_irq_free(ctx, &adapter->irq); for (int i = 0; i < adapter->rx_num_queues; i++, que++) { iflib_irq_free(ctx, &que->que_irq); } if (adapter->memory != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(adapter->memory), adapter->memory); adapter->memory = NULL; } if (adapter->flash != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(adapter->flash), adapter->flash); adapter->flash = NULL; } if (adapter->ioport != NULL) { bus_release_resource(dev, SYS_RES_IOPORT, rman_get_rid(adapter->ioport), adapter->ioport); adapter->ioport = NULL; } } /* Set up MSI or MSI-X */ static int igc_setup_msix(if_ctx_t ctx) { return (0); } /********************************************************************* * * Initialize the DMA Coalescing feature * **********************************************************************/ static void igc_init_dmac(struct igc_adapter *adapter, u32 pba) { device_t dev = adapter->dev; struct igc_hw *hw = &adapter->hw; u32 dmac, reg = ~IGC_DMACR_DMAC_EN; u16 hwm; u16 max_frame_size; int status; max_frame_size = adapter->shared->isc_max_frame_size; if (adapter->dmac == 0) { /* Disabling it */ IGC_WRITE_REG(hw, IGC_DMACR, reg); return; } else device_printf(dev, "DMA Coalescing enabled\n"); /* Set starting threshold */ IGC_WRITE_REG(hw, IGC_DMCTXTH, 0); hwm = 64 * pba - max_frame_size / 16; if (hwm < 64 * (pba - 6)) hwm = 64 * (pba - 6); reg = IGC_READ_REG(hw, IGC_FCRTC); reg &= ~IGC_FCRTC_RTH_COAL_MASK; reg |= ((hwm << IGC_FCRTC_RTH_COAL_SHIFT) & IGC_FCRTC_RTH_COAL_MASK); IGC_WRITE_REG(hw, IGC_FCRTC, reg); dmac = pba - max_frame_size / 512; if (dmac < pba - 10) dmac = pba - 10; reg = IGC_READ_REG(hw, IGC_DMACR); reg &= ~IGC_DMACR_DMACTHR_MASK; reg |= ((dmac << IGC_DMACR_DMACTHR_SHIFT) & IGC_DMACR_DMACTHR_MASK); /* transition to L0x or L1 if available..*/ reg |= (IGC_DMACR_DMAC_EN | IGC_DMACR_DMAC_LX_MASK); /* Check if status is 2.5Gb backplane connection * before configuration of watchdog timer, which is * in msec values in 12.8usec intervals * watchdog timer= msec values in 32usec intervals * for non 2.5Gb connection */ status = IGC_READ_REG(hw, IGC_STATUS); if ((status & IGC_STATUS_2P5_SKU) && (!(status & IGC_STATUS_2P5_SKU_OVER))) reg |= ((adapter->dmac * 5) >> 6); else reg |= (adapter->dmac >> 5); IGC_WRITE_REG(hw, IGC_DMACR, reg); IGC_WRITE_REG(hw, IGC_DMCRTRH, 0); /* Set the interval before transition */ reg = IGC_READ_REG(hw, IGC_DMCTLX); reg |= IGC_DMCTLX_DCFLUSH_DIS; /* ** in 2.5Gb connection, TTLX unit is 0.4 usec ** which is 0x4*2 = 0xA. But delay is still 4 usec */ status = IGC_READ_REG(hw, IGC_STATUS); if ((status & IGC_STATUS_2P5_SKU) && (!(status & IGC_STATUS_2P5_SKU_OVER))) reg |= 0xA; else reg |= 0x4; IGC_WRITE_REG(hw, IGC_DMCTLX, reg); /* free space in tx packet buffer to wake from DMA coal */ IGC_WRITE_REG(hw, IGC_DMCTXTH, (IGC_TXPBSIZE - (2 * max_frame_size)) >> 6); /* make low power state decision controlled by DMA coal */ reg = IGC_READ_REG(hw, IGC_PCIEMISC); reg &= ~IGC_PCIEMISC_LX_DECISION; IGC_WRITE_REG(hw, IGC_PCIEMISC, reg); } /********************************************************************* * * Initialize the hardware to a configuration as specified by the * adapter structure. * **********************************************************************/ static void igc_reset(if_ctx_t ctx) { device_t dev = iflib_get_dev(ctx); struct igc_adapter *adapter = iflib_get_softc(ctx); struct igc_hw *hw = &adapter->hw; u32 rx_buffer_size; u32 pba; INIT_DEBUGOUT("igc_reset: begin"); /* Let the firmware know the OS is in control */ igc_get_hw_control(adapter); /* * Packet Buffer Allocation (PBA) * Writing PBA sets the receive portion of the buffer * the remainder is used for the transmit buffer. */ pba = IGC_PBA_34K; INIT_DEBUGOUT1("igc_reset: pba=%dK",pba); /* * These parameters control the automatic generation (Tx) and * response (Rx) to Ethernet PAUSE frames. * - High water mark should allow for at least two frames to be * received after sending an XOFF. * - Low water mark works best when it is very near the high water mark. * This allows the receiver to restart by sending XON when it has * drained a bit. Here we use an arbitrary value of 1500 which will * restart after one full frame is pulled from the buffer. There * could be several smaller frames in the buffer and if so they will * not trigger the XON until their total number reduces the buffer * by 1500. * - The pause time is fairly large at 1000 x 512ns = 512 usec. */ rx_buffer_size = (pba & 0xffff) << 10; hw->fc.high_water = rx_buffer_size - roundup2(adapter->hw.mac.max_frame_size, 1024); /* 16-byte granularity */ hw->fc.low_water = hw->fc.high_water - 16; if (adapter->fc) /* locally set flow control value? */ hw->fc.requested_mode = adapter->fc; else hw->fc.requested_mode = igc_fc_full; hw->fc.pause_time = IGC_FC_PAUSE_TIME; hw->fc.send_xon = true; /* Issue a global reset */ igc_reset_hw(hw); IGC_WRITE_REG(hw, IGC_WUC, 0); /* and a re-init */ if (igc_init_hw(hw) < 0) { device_printf(dev, "Hardware Initialization Failed\n"); return; } /* Setup DMA Coalescing */ igc_init_dmac(adapter, pba); IGC_WRITE_REG(hw, IGC_VET, ETHERTYPE_VLAN); igc_get_phy_info(hw); igc_check_for_link(hw); } /* * Initialise the RSS mapping for NICs that support multiple transmit/ * receive rings. */ #define RSSKEYLEN 10 static void igc_initialize_rss_mapping(struct igc_adapter *adapter) { struct igc_hw *hw = &adapter->hw; int i; int queue_id; u32 reta; u32 rss_key[RSSKEYLEN], mrqc, shift = 0; /* * The redirection table controls which destination * queue each bucket redirects traffic to. * Each DWORD represents four queues, with the LSB * being the first queue in the DWORD. * * This just allocates buckets to queues using round-robin * allocation. * * NOTE: It Just Happens to line up with the default * RSS allocation method. */ /* Warning FM follows */ reta = 0; for (i = 0; i < 128; i++) { #ifdef RSS queue_id = rss_get_indirection_to_bucket(i); /* * If we have more queues than buckets, we'll * end up mapping buckets to a subset of the * queues. * * If we have more buckets than queues, we'll * end up instead assigning multiple buckets * to queues. * * Both are suboptimal, but we need to handle * the case so we don't go out of bounds * indexing arrays and such. */ queue_id = queue_id % adapter->rx_num_queues; #else queue_id = (i % adapter->rx_num_queues); #endif /* Adjust if required */ queue_id = queue_id << shift; /* * The low 8 bits are for hash value (n+0); * The next 8 bits are for hash value (n+1), etc. */ reta = reta >> 8; reta = reta | ( ((uint32_t) queue_id) << 24); if ((i & 3) == 3) { IGC_WRITE_REG(hw, IGC_RETA(i >> 2), reta); reta = 0; } } /* Now fill in hash table */ /* * MRQC: Multiple Receive Queues Command * Set queuing to RSS control, number depends on the device. */ mrqc = IGC_MRQC_ENABLE_RSS_4Q; #ifdef RSS /* XXX ew typecasting */ rss_getkey((uint8_t *) &rss_key); #else arc4rand(&rss_key, sizeof(rss_key), 0); #endif for (i = 0; i < RSSKEYLEN; i++) IGC_WRITE_REG_ARRAY(hw, IGC_RSSRK(0), i, rss_key[i]); /* * Configure the RSS fields to hash upon. */ mrqc |= (IGC_MRQC_RSS_FIELD_IPV4 | IGC_MRQC_RSS_FIELD_IPV4_TCP); mrqc |= (IGC_MRQC_RSS_FIELD_IPV6 | IGC_MRQC_RSS_FIELD_IPV6_TCP); mrqc |=( IGC_MRQC_RSS_FIELD_IPV4_UDP | IGC_MRQC_RSS_FIELD_IPV6_UDP); mrqc |=( IGC_MRQC_RSS_FIELD_IPV6_UDP_EX | IGC_MRQC_RSS_FIELD_IPV6_TCP_EX); IGC_WRITE_REG(hw, IGC_MRQC, mrqc); } /********************************************************************* * * Setup networking device structure and register interface media. * **********************************************************************/ static int igc_setup_interface(if_ctx_t ctx) { if_t ifp = iflib_get_ifp(ctx); struct igc_adapter *adapter = iflib_get_softc(ctx); if_softc_ctx_t scctx = adapter->shared; INIT_DEBUGOUT("igc_setup_interface: begin"); /* Single Queue */ if (adapter->tx_num_queues == 1) { if_setsendqlen(ifp, scctx->isc_ntxd[0] - 1); if_setsendqready(ifp); } /* * Specify the media types supported by this adapter and register * callbacks to update media and link information */ ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T, 0, NULL); ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL); ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL); ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL); ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL); ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_T, 0, NULL); ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO); return (0); } static int igc_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets) { struct igc_adapter *adapter = iflib_get_softc(ctx); if_softc_ctx_t scctx = adapter->shared; int error = IGC_SUCCESS; struct igc_tx_queue *que; int i, j; MPASS(adapter->tx_num_queues > 0); MPASS(adapter->tx_num_queues == ntxqsets); /* First allocate the top level queue structs */ if (!(adapter->tx_queues = (struct igc_tx_queue *) malloc(sizeof(struct igc_tx_queue) * adapter->tx_num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(iflib_get_dev(ctx), "Unable to allocate queue memory\n"); return(ENOMEM); } for (i = 0, que = adapter->tx_queues; i < adapter->tx_num_queues; i++, que++) { /* Set up some basics */ struct tx_ring *txr = &que->txr; txr->adapter = que->adapter = adapter; que->me = txr->me = i; /* Allocate report status array */ if (!(txr->tx_rsq = (qidx_t *) malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(iflib_get_dev(ctx), "failed to allocate rs_idxs memory\n"); error = ENOMEM; goto fail; } for (j = 0; j < scctx->isc_ntxd[0]; j++) txr->tx_rsq[j] = QIDX_INVALID; /* get the virtual and physical address of the hardware queues */ txr->tx_base = (struct igc_tx_desc *)vaddrs[i*ntxqs]; txr->tx_paddr = paddrs[i*ntxqs]; } if (bootverbose) device_printf(iflib_get_dev(ctx), "allocated for %d tx_queues\n", adapter->tx_num_queues); return (0); fail: igc_if_queues_free(ctx); return (error); } static int igc_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets) { struct igc_adapter *adapter = iflib_get_softc(ctx); int error = IGC_SUCCESS; struct igc_rx_queue *que; int i; MPASS(adapter->rx_num_queues > 0); MPASS(adapter->rx_num_queues == nrxqsets); /* First allocate the top level queue structs */ if (!(adapter->rx_queues = (struct igc_rx_queue *) malloc(sizeof(struct igc_rx_queue) * adapter->rx_num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(iflib_get_dev(ctx), "Unable to allocate queue memory\n"); error = ENOMEM; goto fail; } for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) { /* Set up some basics */ struct rx_ring *rxr = &que->rxr; rxr->adapter = que->adapter = adapter; rxr->que = que; que->me = rxr->me = i; /* get the virtual and physical address of the hardware queues */ rxr->rx_base = (union igc_rx_desc_extended *)vaddrs[i*nrxqs]; rxr->rx_paddr = paddrs[i*nrxqs]; } if (bootverbose) device_printf(iflib_get_dev(ctx), "allocated for %d rx_queues\n", adapter->rx_num_queues); return (0); fail: igc_if_queues_free(ctx); return (error); } static void igc_if_queues_free(if_ctx_t ctx) { struct igc_adapter *adapter = iflib_get_softc(ctx); struct igc_tx_queue *tx_que = adapter->tx_queues; struct igc_rx_queue *rx_que = adapter->rx_queues; if (tx_que != NULL) { for (int i = 0; i < adapter->tx_num_queues; i++, tx_que++) { struct tx_ring *txr = &tx_que->txr; if (txr->tx_rsq == NULL) break; free(txr->tx_rsq, M_DEVBUF); txr->tx_rsq = NULL; } free(adapter->tx_queues, M_DEVBUF); adapter->tx_queues = NULL; } if (rx_que != NULL) { free(adapter->rx_queues, M_DEVBUF); adapter->rx_queues = NULL; } igc_release_hw_control(adapter); if (adapter->mta != NULL) { free(adapter->mta, M_DEVBUF); } } /********************************************************************* * * Enable transmit unit. * **********************************************************************/ static void igc_initialize_transmit_unit(if_ctx_t ctx) { struct igc_adapter *adapter = iflib_get_softc(ctx); if_softc_ctx_t scctx = adapter->shared; struct igc_tx_queue *que; struct tx_ring *txr; struct igc_hw *hw = &adapter->hw; u32 tctl, txdctl = 0; INIT_DEBUGOUT("igc_initialize_transmit_unit: begin"); for (int i = 0; i < adapter->tx_num_queues; i++, txr++) { u64 bus_addr; caddr_t offp, endp; que = &adapter->tx_queues[i]; txr = &que->txr; bus_addr = txr->tx_paddr; /* Clear checksum offload context. */ offp = (caddr_t)&txr->csum_flags; endp = (caddr_t)(txr + 1); bzero(offp, endp - offp); /* Base and Len of TX Ring */ IGC_WRITE_REG(hw, IGC_TDLEN(i), scctx->isc_ntxd[0] * sizeof(struct igc_tx_desc)); IGC_WRITE_REG(hw, IGC_TDBAH(i), (u32)(bus_addr >> 32)); IGC_WRITE_REG(hw, IGC_TDBAL(i), (u32)bus_addr); /* Init the HEAD/TAIL indices */ IGC_WRITE_REG(hw, IGC_TDT(i), 0); IGC_WRITE_REG(hw, IGC_TDH(i), 0); HW_DEBUGOUT2("Base = %x, Length = %x\n", IGC_READ_REG(&adapter->hw, IGC_TDBAL(i)), IGC_READ_REG(&adapter->hw, IGC_TDLEN(i))); txdctl = 0; /* clear txdctl */ txdctl |= 0x1f; /* PTHRESH */ txdctl |= 1 << 8; /* HTHRESH */ txdctl |= 1 << 16;/* WTHRESH */ txdctl |= 1 << 22; /* Reserved bit 22 must always be 1 */ txdctl |= IGC_TXDCTL_GRAN; txdctl |= 1 << 25; /* LWTHRESH */ IGC_WRITE_REG(hw, IGC_TXDCTL(i), txdctl); } /* Program the Transmit Control Register */ tctl = IGC_READ_REG(&adapter->hw, IGC_TCTL); tctl &= ~IGC_TCTL_CT; tctl |= (IGC_TCTL_PSP | IGC_TCTL_RTLC | IGC_TCTL_EN | (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT)); /* This write will effectively turn on the transmit unit. */ IGC_WRITE_REG(&adapter->hw, IGC_TCTL, tctl); } /********************************************************************* * * Enable receive unit. * **********************************************************************/ #define BSIZEPKT_ROUNDUP ((1<shared; if_t ifp = iflib_get_ifp(ctx); struct igc_hw *hw = &adapter->hw; struct igc_rx_queue *que; int i; u32 psize, rctl, rxcsum, srrctl = 0; INIT_DEBUGOUT("igc_initialize_receive_units: begin"); /* * Make sure receives are disabled while setting * up the descriptor ring */ rctl = IGC_READ_REG(hw, IGC_RCTL); IGC_WRITE_REG(hw, IGC_RCTL, rctl & ~IGC_RCTL_EN); /* Setup the Receive Control Register */ rctl &= ~(3 << IGC_RCTL_MO_SHIFT); rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_LBM_NO | IGC_RCTL_RDMTS_HALF | (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT); /* Do not store bad packets */ rctl &= ~IGC_RCTL_SBP; /* Enable Long Packet receive */ if (if_getmtu(ifp) > ETHERMTU) rctl |= IGC_RCTL_LPE; else rctl &= ~IGC_RCTL_LPE; /* Strip the CRC */ if (!igc_disable_crc_stripping) rctl |= IGC_RCTL_SECRC; rxcsum = IGC_READ_REG(hw, IGC_RXCSUM); if (if_getcapenable(ifp) & IFCAP_RXCSUM) { rxcsum |= IGC_RXCSUM_CRCOFL; if (adapter->tx_num_queues > 1) rxcsum |= IGC_RXCSUM_PCSD; else rxcsum |= IGC_RXCSUM_IPPCSE; } else { if (adapter->tx_num_queues > 1) rxcsum |= IGC_RXCSUM_PCSD; else rxcsum &= ~IGC_RXCSUM_TUOFL; } IGC_WRITE_REG(hw, IGC_RXCSUM, rxcsum); if (adapter->rx_num_queues > 1) igc_initialize_rss_mapping(adapter); if (if_getmtu(ifp) > ETHERMTU) { psize = scctx->isc_max_frame_size; /* are we on a vlan? */ if (if_vlantrunkinuse(ifp)) psize += VLAN_TAG_SIZE; IGC_WRITE_REG(&adapter->hw, IGC_RLPML, psize); } /* Set maximum packet buffer len */ srrctl |= (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >> IGC_SRRCTL_BSIZEPKT_SHIFT; /* srrctl above overrides this but set the register to a sane value */ rctl |= IGC_RCTL_SZ_2048; /* * If TX flow control is disabled and there's >1 queue defined, * enable DROP. * * This drops frames rather than hanging the RX MAC for all queues. */ if ((adapter->rx_num_queues > 1) && (adapter->fc == igc_fc_none || adapter->fc == igc_fc_rx_pause)) { srrctl |= IGC_SRRCTL_DROP_EN; } /* Setup the Base and Length of the Rx Descriptor Rings */ for (i = 0, que = adapter->rx_queues; i < adapter->rx_num_queues; i++, que++) { struct rx_ring *rxr = &que->rxr; u64 bus_addr = rxr->rx_paddr; u32 rxdctl; #ifdef notyet /* Configure for header split? -- ignore for now */ rxr->hdr_split = igc_header_split; #else srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF; #endif IGC_WRITE_REG(hw, IGC_RDLEN(i), scctx->isc_nrxd[0] * sizeof(struct igc_rx_desc)); IGC_WRITE_REG(hw, IGC_RDBAH(i), (uint32_t)(bus_addr >> 32)); IGC_WRITE_REG(hw, IGC_RDBAL(i), (uint32_t)bus_addr); IGC_WRITE_REG(hw, IGC_SRRCTL(i), srrctl); /* Setup the Head and Tail Descriptor Pointers */ IGC_WRITE_REG(hw, IGC_RDH(i), 0); IGC_WRITE_REG(hw, IGC_RDT(i), 0); /* Enable this Queue */ rxdctl = IGC_READ_REG(hw, IGC_RXDCTL(i)); rxdctl |= IGC_RXDCTL_QUEUE_ENABLE; rxdctl &= 0xFFF00000; rxdctl |= IGC_RX_PTHRESH; rxdctl |= IGC_RX_HTHRESH << 8; rxdctl |= IGC_RX_WTHRESH << 16; IGC_WRITE_REG(hw, IGC_RXDCTL(i), rxdctl); } /* Make sure VLAN Filters are off */ rctl &= ~IGC_RCTL_VFE; /* Write out the settings */ IGC_WRITE_REG(hw, IGC_RCTL, rctl); return; } static void igc_setup_vlan_hw_support(if_ctx_t ctx) { struct igc_adapter *adapter = iflib_get_softc(ctx); struct igc_hw *hw = &adapter->hw; struct ifnet *ifp = iflib_get_ifp(ctx); u32 reg; /* igc hardware doesn't seem to implement VFTA for HWFILTER */ if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING && !igc_disable_crc_stripping) { reg = IGC_READ_REG(hw, IGC_CTRL); reg |= IGC_CTRL_VME; IGC_WRITE_REG(hw, IGC_CTRL, reg); } else { reg = IGC_READ_REG(hw, IGC_CTRL); reg &= ~IGC_CTRL_VME; IGC_WRITE_REG(hw, IGC_CTRL, reg); } } static void igc_if_intr_enable(if_ctx_t ctx) { struct igc_adapter *adapter = iflib_get_softc(ctx); struct igc_hw *hw = &adapter->hw; u32 mask; if (__predict_true(adapter->intr_type == IFLIB_INTR_MSIX)) { mask = (adapter->que_mask | adapter->link_mask); IGC_WRITE_REG(hw, IGC_EIAC, mask); IGC_WRITE_REG(hw, IGC_EIAM, mask); IGC_WRITE_REG(hw, IGC_EIMS, mask); IGC_WRITE_REG(hw, IGC_IMS, IGC_IMS_LSC); } else IGC_WRITE_REG(hw, IGC_IMS, IMS_ENABLE_MASK); IGC_WRITE_FLUSH(hw); } static void igc_if_intr_disable(if_ctx_t ctx) { struct igc_adapter *adapter = iflib_get_softc(ctx); struct igc_hw *hw = &adapter->hw; if (__predict_true(adapter->intr_type == IFLIB_INTR_MSIX)) { IGC_WRITE_REG(hw, IGC_EIMC, 0xffffffff); IGC_WRITE_REG(hw, IGC_EIAC, 0); } IGC_WRITE_REG(hw, IGC_IMC, 0xffffffff); IGC_WRITE_FLUSH(hw); } /* * igc_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit. * For ASF and Pass Through versions of f/w this means * that the driver is loaded. For AMT version type f/w * this means that the network i/f is open. */ static void igc_get_hw_control(struct igc_adapter *adapter) { u32 ctrl_ext; if (adapter->vf_ifp) return; ctrl_ext = IGC_READ_REG(&adapter->hw, IGC_CTRL_EXT); IGC_WRITE_REG(&adapter->hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_DRV_LOAD); } /* * igc_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit. * For ASF and Pass Through versions of f/w this means that * the driver is no longer loaded. For AMT versions of the * f/w this means that the network i/f is closed. */ static void igc_release_hw_control(struct igc_adapter *adapter) { u32 ctrl_ext; ctrl_ext = IGC_READ_REG(&adapter->hw, IGC_CTRL_EXT); IGC_WRITE_REG(&adapter->hw, IGC_CTRL_EXT, ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD); return; } static int igc_is_valid_ether_addr(u8 *addr) { char zero_addr[6] = { 0, 0, 0, 0, 0, 0 }; if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) { return (false); } return (true); } /* ** Parse the interface capabilities with regard ** to both system management and wake-on-lan for ** later use. */ static void igc_get_wakeup(if_ctx_t ctx) { struct igc_adapter *adapter = iflib_get_softc(ctx); u16 eeprom_data = 0, apme_mask; apme_mask = IGC_WUC_APME; eeprom_data = IGC_READ_REG(&adapter->hw, IGC_WUC); if (eeprom_data & apme_mask) adapter->wol = IGC_WUFC_LNKC; } /* * Enable PCI Wake On Lan capability */ static void igc_enable_wakeup(if_ctx_t ctx) { struct igc_adapter *adapter = iflib_get_softc(ctx); device_t dev = iflib_get_dev(ctx); if_t ifp = iflib_get_ifp(ctx); int error = 0; u32 pmc, ctrl, rctl; u16 status; if (pci_find_cap(dev, PCIY_PMG, &pmc) != 0) return; /* * Determine type of Wakeup: note that wol * is set with all bits on by default. */ if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) == 0) adapter->wol &= ~IGC_WUFC_MAG; if ((if_getcapenable(ifp) & IFCAP_WOL_UCAST) == 0) adapter->wol &= ~IGC_WUFC_EX; if ((if_getcapenable(ifp) & IFCAP_WOL_MCAST) == 0) adapter->wol &= ~IGC_WUFC_MC; else { rctl = IGC_READ_REG(&adapter->hw, IGC_RCTL); rctl |= IGC_RCTL_MPE; IGC_WRITE_REG(&adapter->hw, IGC_RCTL, rctl); } if (!(adapter->wol & (IGC_WUFC_EX | IGC_WUFC_MAG | IGC_WUFC_MC))) goto pme; /* Advertise the wakeup capability */ ctrl = IGC_READ_REG(&adapter->hw, IGC_CTRL); ctrl |= IGC_CTRL_ADVD3WUC; IGC_WRITE_REG(&adapter->hw, IGC_CTRL, ctrl); /* Enable wakeup by the MAC */ IGC_WRITE_REG(&adapter->hw, IGC_WUC, IGC_WUC_PME_EN); IGC_WRITE_REG(&adapter->hw, IGC_WUFC, adapter->wol); pme: status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2); status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); if (!error && (if_getcapenable(ifp) & IFCAP_WOL)) status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2); return; } /********************************************************************** * * Update the board statistics counters. * **********************************************************************/ static void igc_update_stats_counters(struct igc_adapter *adapter) { u64 prev_xoffrxc = adapter->stats.xoffrxc; adapter->stats.crcerrs += IGC_READ_REG(&adapter->hw, IGC_CRCERRS); adapter->stats.mpc += IGC_READ_REG(&adapter->hw, IGC_MPC); adapter->stats.scc += IGC_READ_REG(&adapter->hw, IGC_SCC); adapter->stats.ecol += IGC_READ_REG(&adapter->hw, IGC_ECOL); adapter->stats.mcc += IGC_READ_REG(&adapter->hw, IGC_MCC); adapter->stats.latecol += IGC_READ_REG(&adapter->hw, IGC_LATECOL); adapter->stats.colc += IGC_READ_REG(&adapter->hw, IGC_COLC); adapter->stats.colc += IGC_READ_REG(&adapter->hw, IGC_RERC); adapter->stats.dc += IGC_READ_REG(&adapter->hw, IGC_DC); adapter->stats.rlec += IGC_READ_REG(&adapter->hw, IGC_RLEC); adapter->stats.xonrxc += IGC_READ_REG(&adapter->hw, IGC_XONRXC); adapter->stats.xontxc += IGC_READ_REG(&adapter->hw, IGC_XONTXC); adapter->stats.xoffrxc += IGC_READ_REG(&adapter->hw, IGC_XOFFRXC); /* * For watchdog management we need to know if we have been * paused during the last interval, so capture that here. */ if (adapter->stats.xoffrxc != prev_xoffrxc) adapter->shared->isc_pause_frames = 1; adapter->stats.xofftxc += IGC_READ_REG(&adapter->hw, IGC_XOFFTXC); adapter->stats.fcruc += IGC_READ_REG(&adapter->hw, IGC_FCRUC); adapter->stats.prc64 += IGC_READ_REG(&adapter->hw, IGC_PRC64); adapter->stats.prc127 += IGC_READ_REG(&adapter->hw, IGC_PRC127); adapter->stats.prc255 += IGC_READ_REG(&adapter->hw, IGC_PRC255); adapter->stats.prc511 += IGC_READ_REG(&adapter->hw, IGC_PRC511); adapter->stats.prc1023 += IGC_READ_REG(&adapter->hw, IGC_PRC1023); adapter->stats.prc1522 += IGC_READ_REG(&adapter->hw, IGC_PRC1522); adapter->stats.tlpic += IGC_READ_REG(&adapter->hw, IGC_TLPIC); adapter->stats.rlpic += IGC_READ_REG(&adapter->hw, IGC_RLPIC); adapter->stats.gprc += IGC_READ_REG(&adapter->hw, IGC_GPRC); adapter->stats.bprc += IGC_READ_REG(&adapter->hw, IGC_BPRC); adapter->stats.mprc += IGC_READ_REG(&adapter->hw, IGC_MPRC); adapter->stats.gptc += IGC_READ_REG(&adapter->hw, IGC_GPTC); /* For the 64-bit byte counters the low dword must be read first. */ /* Both registers clear on the read of the high dword */ adapter->stats.gorc += IGC_READ_REG(&adapter->hw, IGC_GORCL) + ((u64)IGC_READ_REG(&adapter->hw, IGC_GORCH) << 32); adapter->stats.gotc += IGC_READ_REG(&adapter->hw, IGC_GOTCL) + ((u64)IGC_READ_REG(&adapter->hw, IGC_GOTCH) << 32); adapter->stats.rnbc += IGC_READ_REG(&adapter->hw, IGC_RNBC); adapter->stats.ruc += IGC_READ_REG(&adapter->hw, IGC_RUC); adapter->stats.rfc += IGC_READ_REG(&adapter->hw, IGC_RFC); adapter->stats.roc += IGC_READ_REG(&adapter->hw, IGC_ROC); adapter->stats.rjc += IGC_READ_REG(&adapter->hw, IGC_RJC); adapter->stats.tor += IGC_READ_REG(&adapter->hw, IGC_TORH); adapter->stats.tot += IGC_READ_REG(&adapter->hw, IGC_TOTH); adapter->stats.tpr += IGC_READ_REG(&adapter->hw, IGC_TPR); adapter->stats.tpt += IGC_READ_REG(&adapter->hw, IGC_TPT); adapter->stats.ptc64 += IGC_READ_REG(&adapter->hw, IGC_PTC64); adapter->stats.ptc127 += IGC_READ_REG(&adapter->hw, IGC_PTC127); adapter->stats.ptc255 += IGC_READ_REG(&adapter->hw, IGC_PTC255); adapter->stats.ptc511 += IGC_READ_REG(&adapter->hw, IGC_PTC511); adapter->stats.ptc1023 += IGC_READ_REG(&adapter->hw, IGC_PTC1023); adapter->stats.ptc1522 += IGC_READ_REG(&adapter->hw, IGC_PTC1522); adapter->stats.mptc += IGC_READ_REG(&adapter->hw, IGC_MPTC); adapter->stats.bptc += IGC_READ_REG(&adapter->hw, IGC_BPTC); /* Interrupt Counts */ adapter->stats.iac += IGC_READ_REG(&adapter->hw, IGC_IAC); adapter->stats.rxdmtc += IGC_READ_REG(&adapter->hw, IGC_RXDMTC); adapter->stats.algnerrc += IGC_READ_REG(&adapter->hw, IGC_ALGNERRC); adapter->stats.tncrs += IGC_READ_REG(&adapter->hw, IGC_TNCRS); adapter->stats.htdpmc += IGC_READ_REG(&adapter->hw, IGC_HTDPMC); adapter->stats.tsctc += IGC_READ_REG(&adapter->hw, IGC_TSCTC); } static uint64_t igc_if_get_counter(if_ctx_t ctx, ift_counter cnt) { struct igc_adapter *adapter = iflib_get_softc(ctx); if_t ifp = iflib_get_ifp(ctx); switch (cnt) { case IFCOUNTER_COLLISIONS: return (adapter->stats.colc); case IFCOUNTER_IERRORS: return (adapter->dropped_pkts + adapter->stats.rxerrc + adapter->stats.crcerrs + adapter->stats.algnerrc + adapter->stats.ruc + adapter->stats.roc + adapter->stats.mpc + adapter->stats.htdpmc); case IFCOUNTER_OERRORS: return (adapter->stats.ecol + adapter->stats.latecol + adapter->watchdog_events); default: return (if_get_counter_default(ifp, cnt)); } } /* igc_if_needs_restart - Tell iflib when the driver needs to be reinitialized * @ctx: iflib context * @event: event code to check * * Defaults to returning false for unknown events. * * @returns true if iflib needs to reinit the interface */ static bool igc_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event) { switch (event) { case IFLIB_RESTART_VLAN_CONFIG: default: return (false); } } /* Export a single 32-bit register via a read-only sysctl. */ static int igc_sysctl_reg_handler(SYSCTL_HANDLER_ARGS) { struct igc_adapter *adapter; u_int val; adapter = oidp->oid_arg1; val = IGC_READ_REG(&adapter->hw, oidp->oid_arg2); return (sysctl_handle_int(oidp, &val, 0, req)); } /* * Add sysctl variables, one per statistic, to the system. */ static void igc_add_hw_stats(struct igc_adapter *adapter) { device_t dev = iflib_get_dev(adapter->ctx); struct igc_tx_queue *tx_que = adapter->tx_queues; struct igc_rx_queue *rx_que = adapter->rx_queues; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); struct sysctl_oid *tree = device_get_sysctl_tree(dev); struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); struct igc_hw_stats *stats = &adapter->stats; struct sysctl_oid *stat_node, *queue_node, *int_node; struct sysctl_oid_list *stat_list, *queue_list, *int_list; #define QUEUE_NAME_LEN 32 char namebuf[QUEUE_NAME_LEN]; /* Driver Statistics */ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq", CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns", CTLFLAG_RD, &adapter->rx_overruns, "RX overruns"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts", CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "device_control", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, IGC_CTRL, igc_sysctl_reg_handler, "IU", "Device Control Register"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_control", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, IGC_RCTL, igc_sysctl_reg_handler, "IU", "Receiver Control Register"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water", CTLFLAG_RD, &adapter->hw.fc.high_water, 0, "Flow Control High Watermark"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water", CTLFLAG_RD, &adapter->hw.fc.low_water, 0, "Flow Control Low Watermark"); for (int i = 0; i < adapter->tx_num_queues; i++, tx_que++) { struct tx_ring *txr = &tx_que->txr; snprintf(namebuf, QUEUE_NAME_LEN, "queue_tx_%d", i); queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX Queue Name"); queue_list = SYSCTL_CHILDREN(queue_node); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, IGC_TDH(txr->me), igc_sysctl_reg_handler, "IU", "Transmit Descriptor Head"); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, IGC_TDT(txr->me), igc_sysctl_reg_handler, "IU", "Transmit Descriptor Tail"); SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tx_irq", CTLFLAG_RD, &txr->tx_irq, "Queue MSI-X Transmit Interrupts"); } for (int j = 0; j < adapter->rx_num_queues; j++, rx_que++) { struct rx_ring *rxr = &rx_que->rxr; snprintf(namebuf, QUEUE_NAME_LEN, "queue_rx_%d", j); queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX Queue Name"); queue_list = SYSCTL_CHILDREN(queue_node); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, IGC_RDH(rxr->me), igc_sysctl_reg_handler, "IU", "Receive Descriptor Head"); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, IGC_RDT(rxr->me), igc_sysctl_reg_handler, "IU", "Receive Descriptor Tail"); SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "rx_irq", CTLFLAG_RD, &rxr->rx_irq, "Queue MSI-X Receive Interrupts"); } /* MAC stats get their own sub node */ stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Statistics"); stat_list = SYSCTL_CHILDREN(stat_node); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "excess_coll", CTLFLAG_RD, &stats->ecol, "Excessive collisions"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "single_coll", CTLFLAG_RD, &stats->scc, "Single collisions"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "multiple_coll", CTLFLAG_RD, &stats->mcc, "Multiple collisions"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "late_coll", CTLFLAG_RD, &stats->latecol, "Late collisions"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "collision_count", CTLFLAG_RD, &stats->colc, "Collision Count"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "symbol_errors", CTLFLAG_RD, &adapter->stats.symerrs, "Symbol Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "sequence_errors", CTLFLAG_RD, &adapter->stats.sec, "Sequence Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "defer_count", CTLFLAG_RD, &adapter->stats.dc, "Defer Count"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "missed_packets", CTLFLAG_RD, &adapter->stats.mpc, "Missed Packets"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_no_buff", CTLFLAG_RD, &adapter->stats.rnbc, "Receive No Buffers"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersize", CTLFLAG_RD, &adapter->stats.ruc, "Receive Undersize"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented", CTLFLAG_RD, &adapter->stats.rfc, "Fragmented Packets Received "); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversize", CTLFLAG_RD, &adapter->stats.roc, "Oversized Packets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabber", CTLFLAG_RD, &adapter->stats.rjc, "Recevied Jabber"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_errs", CTLFLAG_RD, &adapter->stats.rxerrc, "Receive Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs", CTLFLAG_RD, &adapter->stats.crcerrs, "CRC errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "alignment_errs", CTLFLAG_RD, &adapter->stats.algnerrc, "Alignment Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd", CTLFLAG_RD, &adapter->stats.xonrxc, "XON Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd", CTLFLAG_RD, &adapter->stats.xontxc, "XON Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd", CTLFLAG_RD, &adapter->stats.xoffrxc, "XOFF Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd", CTLFLAG_RD, &adapter->stats.xofftxc, "XOFF Transmitted"); /* Packet Reception Stats */ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd", CTLFLAG_RD, &adapter->stats.tpr, "Total Packets Received "); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd", CTLFLAG_RD, &adapter->stats.gprc, "Good Packets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd", CTLFLAG_RD, &adapter->stats.bprc, "Broadcast Packets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd", CTLFLAG_RD, &adapter->stats.mprc, "Multicast Packets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64", CTLFLAG_RD, &adapter->stats.prc64, "64 byte frames received "); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127", CTLFLAG_RD, &adapter->stats.prc127, "65-127 byte frames received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255", CTLFLAG_RD, &adapter->stats.prc255, "128-255 byte frames received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511", CTLFLAG_RD, &adapter->stats.prc511, "256-511 byte frames received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023", CTLFLAG_RD, &adapter->stats.prc1023, "512-1023 byte frames received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522", CTLFLAG_RD, &adapter->stats.prc1522, "1023-1522 byte frames received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd", CTLFLAG_RD, &adapter->stats.gorc, "Good Octets Received"); /* Packet Transmission Stats */ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", CTLFLAG_RD, &adapter->stats.gotc, "Good Octets Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd", CTLFLAG_RD, &adapter->stats.tpt, "Total Packets Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", CTLFLAG_RD, &adapter->stats.gptc, "Good Packets Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd", CTLFLAG_RD, &adapter->stats.bptc, "Broadcast Packets Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd", CTLFLAG_RD, &adapter->stats.mptc, "Multicast Packets Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64", CTLFLAG_RD, &adapter->stats.ptc64, "64 byte frames transmitted "); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127", CTLFLAG_RD, &adapter->stats.ptc127, "65-127 byte frames transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255", CTLFLAG_RD, &adapter->stats.ptc255, "128-255 byte frames transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511", CTLFLAG_RD, &adapter->stats.ptc511, "256-511 byte frames transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023", CTLFLAG_RD, &adapter->stats.ptc1023, "512-1023 byte frames transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522", CTLFLAG_RD, &adapter->stats.ptc1522, "1024-1522 byte frames transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_txd", CTLFLAG_RD, &adapter->stats.tsctc, "TSO Contexts Transmitted"); /* Interrupt Stats */ int_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "interrupts", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Interrupt Statistics"); int_list = SYSCTL_CHILDREN(int_node); SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "asserts", CTLFLAG_RD, &adapter->stats.iac, "Interrupt Assertion Count"); SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_desc_min_thresh", CTLFLAG_RD, &adapter->stats.rxdmtc, "Rx Desc Min Thresh Count"); } +static void +igc_fw_version(struct igc_adapter *sc) +{ + struct igc_hw *hw = &sc->hw; + struct igc_fw_version *fw_ver = &sc->fw_ver; + + *fw_ver = (struct igc_fw_version){0}; + + igc_get_fw_version(hw, fw_ver); +} + +static void +igc_sbuf_fw_version(struct igc_fw_version *fw_ver, struct sbuf *buf) +{ + const char *space = ""; + + if (fw_ver->eep_major || fw_ver->eep_minor || fw_ver->eep_build) { + sbuf_printf(buf, "EEPROM V%d.%d-%d", fw_ver->eep_major, + fw_ver->eep_minor, fw_ver->eep_build); + space = " "; + } + + if (fw_ver->invm_major || fw_ver->invm_minor || fw_ver->invm_img_type) { + sbuf_printf(buf, "%sNVM V%d.%d imgtype%d", + space, fw_ver->invm_major, fw_ver->invm_minor, + fw_ver->invm_img_type); + space = " "; + } + + if (fw_ver->or_valid) { + sbuf_printf(buf, "%sOption ROM V%d-b%d-p%d", + space, fw_ver->or_major, fw_ver->or_build, + fw_ver->or_patch); + space = " "; + } + + if (fw_ver->etrack_id) + sbuf_printf(buf, "%seTrack 0x%08x", space, fw_ver->etrack_id); +} + +static void +igc_print_fw_version(struct igc_adapter *sc ) +{ + device_t dev = sc->dev; + struct sbuf *buf; + int error = 0; + + buf = sbuf_new_auto(); + if (!buf) { + device_printf(dev, "Could not allocate sbuf for output.\n"); + return; + } + + igc_sbuf_fw_version(&sc->fw_ver, buf); + + error = sbuf_finish(buf); + if (error) + device_printf(dev, "Error finishing sbuf: %d\n", error); + else if (sbuf_len(buf)) + device_printf(dev, "%s\n", sbuf_data(buf)); + + sbuf_delete(buf); +} + +static int +igc_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS) +{ + struct igc_adapter *sc = (struct igc_adapter *)arg1; + device_t dev = sc->dev; + struct sbuf *buf; + int error = 0; + + buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); + if (!buf) { + device_printf(dev, "Could not allocate sbuf for output.\n"); + return (ENOMEM); + } + + igc_sbuf_fw_version(&sc->fw_ver, buf); + + error = sbuf_finish(buf); + if (error) + device_printf(dev, "Error finishing sbuf: %d\n", error); + + sbuf_delete(buf); + + return (0); +} + /********************************************************************** * * This routine provides a way to dump out the adapter eeprom, * often a useful debug/service tool. This only dumps the first * 32 words, stuff that matters is in that extent. * **********************************************************************/ static int igc_sysctl_nvm_info(SYSCTL_HANDLER_ARGS) { struct igc_adapter *adapter = (struct igc_adapter *)arg1; int error; int result; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); /* * This value will cause a hex dump of the * first 32 16-bit words of the EEPROM to * the screen. */ if (result == 1) igc_print_nvm_info(adapter); return (error); } static void igc_print_nvm_info(struct igc_adapter *adapter) { u16 eeprom_data; int i, j, row = 0; /* Its a bit crude, but it gets the job done */ printf("\nInterface EEPROM Dump:\n"); printf("Offset\n0x0000 "); for (i = 0, j = 0; i < 32; i++, j++) { if (j == 8) { /* Make the offset block */ j = 0; ++row; printf("\n0x00%x0 ",row); } igc_read_nvm(&adapter->hw, i, 1, &eeprom_data); printf("%04x ", eeprom_data); } printf("\n"); } /* * Set flow control using sysctl: * Flow control values: * 0 - off * 1 - rx pause * 2 - tx pause * 3 - full */ static int igc_set_flowcntl(SYSCTL_HANDLER_ARGS) { int error; static int input = 3; /* default is full */ struct igc_adapter *adapter = (struct igc_adapter *) arg1; error = sysctl_handle_int(oidp, &input, 0, req); if ((error) || (req->newptr == NULL)) return (error); if (input == adapter->fc) /* no change? */ return (error); switch (input) { case igc_fc_rx_pause: case igc_fc_tx_pause: case igc_fc_full: case igc_fc_none: adapter->hw.fc.requested_mode = input; adapter->fc = input; break; default: /* Do nothing */ return (error); } adapter->hw.fc.current_mode = adapter->hw.fc.requested_mode; igc_force_mac_fc(&adapter->hw); return (error); } /* * Manage Energy Efficient Ethernet: * Control values: * 0/1 - enabled/disabled */ static int igc_sysctl_eee(SYSCTL_HANDLER_ARGS) { struct igc_adapter *adapter = (struct igc_adapter *) arg1; int error, value; value = adapter->hw.dev_spec._i225.eee_disable; error = sysctl_handle_int(oidp, &value, 0, req); if (error || req->newptr == NULL) return (error); adapter->hw.dev_spec._i225.eee_disable = (value != 0); igc_if_init(adapter->ctx); return (0); } static int igc_sysctl_debug_info(SYSCTL_HANDLER_ARGS) { struct igc_adapter *adapter; int error; int result; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (result == 1) { adapter = (struct igc_adapter *) arg1; igc_print_debug_info(adapter); } return (error); } static int igc_get_rs(SYSCTL_HANDLER_ARGS) { struct igc_adapter *adapter = (struct igc_adapter *) arg1; int error; int result; result = 0; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr || result != 1) return (error); igc_dump_rs(adapter); return (error); } static void igc_if_debug(if_ctx_t ctx) { igc_dump_rs(iflib_get_softc(ctx)); } /* * This routine is meant to be fluid, add whatever is * needed for debugging a problem. -jfv */ static void igc_print_debug_info(struct igc_adapter *adapter) { device_t dev = iflib_get_dev(adapter->ctx); if_t ifp = iflib_get_ifp(adapter->ctx); struct tx_ring *txr = &adapter->tx_queues->txr; struct rx_ring *rxr = &adapter->rx_queues->rxr; if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) printf("Interface is RUNNING "); else printf("Interface is NOT RUNNING\n"); if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) printf("and INACTIVE\n"); else printf("and ACTIVE\n"); for (int i = 0; i < adapter->tx_num_queues; i++, txr++) { device_printf(dev, "TX Queue %d ------\n", i); device_printf(dev, "hw tdh = %d, hw tdt = %d\n", IGC_READ_REG(&adapter->hw, IGC_TDH(i)), IGC_READ_REG(&adapter->hw, IGC_TDT(i))); } for (int j=0; j < adapter->rx_num_queues; j++, rxr++) { device_printf(dev, "RX Queue %d ------\n", j); device_printf(dev, "hw rdh = %d, hw rdt = %d\n", IGC_READ_REG(&adapter->hw, IGC_RDH(j)), IGC_READ_REG(&adapter->hw, IGC_RDT(j))); } } diff --git a/sys/dev/igc/if_igc.h b/sys/dev/igc/if_igc.h index 92865b5b9eb1..727699baee5f 100644 --- a/sys/dev/igc/if_igc.h +++ b/sys/dev/igc/if_igc.h @@ -1,358 +1,360 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2016 Nicole Graziano * All rights reserved. * Copyright (c) 2021 Rubicon Communications, LLC (Netgate) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_ddb.h" #include "opt_inet.h" #include "opt_inet6.h" #include "opt_rss.h" #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #ifdef DDB #include #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "igc_api.h" #include "igc_i225.h" #include "ifdi_if.h" #ifndef _IGC_H_DEFINED_ #define _IGC_H_DEFINED_ /* Tunables */ /* * IGC_MAX_TXD: Maximum number of Transmit Descriptors * Valid Range: 128-4096 * Default Value: 1024 * This value is the number of transmit descriptors allocated by the driver. * Increasing this value allows the driver to queue more transmits. Each * descriptor is 16 bytes. * Since TDLEN should be multiple of 128bytes, the number of transmit * desscriptors should meet the following condition. * (num_tx_desc * sizeof(struct igc_tx_desc)) % 128 == 0 */ #define IGC_MIN_TXD 128 #define IGC_MAX_TXD 4096 #define IGC_DEFAULT_TXD 1024 #define IGC_DEFAULT_MULTI_TXD 4096 #define IGC_MAX_TXD 4096 /* * IGC_MAX_RXD - Maximum number of receive Descriptors * Valid Range: 128-4096 * Default Value: 1024 * This value is the number of receive descriptors allocated by the driver. * Increasing this value allows the driver to buffer more incoming packets. * Each descriptor is 16 bytes. A receive buffer is also allocated for each * descriptor. The maximum MTU size is 16110. * Since TDLEN should be multiple of 128bytes, the number of transmit * desscriptors should meet the following condition. * (num_tx_desc * sizeof(struct igc_tx_desc)) % 128 == 0 */ #define IGC_MIN_RXD 128 #define IGC_MAX_RXD 4096 #define IGC_DEFAULT_RXD 1024 #define IGC_DEFAULT_MULTI_RXD 4096 #define IGC_MAX_RXD 4096 /* * This parameter controls whether or not autonegotation is enabled. * 0 - Disable autonegotiation * 1 - Enable autonegotiation */ #define DO_AUTO_NEG true /* Tunables -- End */ #define AUTONEG_ADV_DEFAULT (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ ADVERTISE_100_HALF | ADVERTISE_100_FULL | \ ADVERTISE_1000_FULL | ADVERTISE_2500_FULL) #define AUTO_ALL_MODES 0 /* * Micellaneous constants */ #define MAX_NUM_MULTICAST_ADDRESSES 128 #define IGC_FC_PAUSE_TIME 0x0680 #define IGC_TXPBSIZE 20408 #define IGC_PKTTYPE_MASK 0x0000FFF0 #define IGC_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coalesce Flush */ #define IGC_RX_PTHRESH 8 #define IGC_RX_HTHRESH 8 #define IGC_RX_WTHRESH 4 #define IGC_TX_PTHRESH 8 #define IGC_TX_HTHRESH 1 /* * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will * also optimize cache line size effect. H/W supports up to cache line size 128. */ #define IGC_DBA_ALIGN 128 #define IGC_MSIX_BAR 3 /* Defines for printing debug information */ #define DEBUG_INIT 0 #define DEBUG_IOCTL 0 #define DEBUG_HW 0 #define INIT_DEBUGOUT(S) if (DEBUG_INIT) printf(S "\n") #define INIT_DEBUGOUT1(S, A) if (DEBUG_INIT) printf(S "\n", A) #define INIT_DEBUGOUT2(S, A, B) if (DEBUG_INIT) printf(S "\n", A, B) #define IOCTL_DEBUGOUT(S) if (DEBUG_IOCTL) printf(S "\n") #define IOCTL_DEBUGOUT1(S, A) if (DEBUG_IOCTL) printf(S "\n", A) #define IOCTL_DEBUGOUT2(S, A, B) if (DEBUG_IOCTL) printf(S "\n", A, B) #define HW_DEBUGOUT(S) if (DEBUG_HW) printf(S "\n") #define HW_DEBUGOUT1(S, A) if (DEBUG_HW) printf(S "\n", A) #define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B) #define IGC_MAX_SCATTER 40 #define IGC_VFTA_SIZE 128 #define IGC_TSO_SIZE 65535 #define IGC_TSO_SEG_SIZE 4096 /* Max dma segment size */ #define IGC_CSUM_OFFLOAD (CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP | \ CSUM_IP_SCTP | CSUM_IP6_UDP | CSUM_IP6_TCP | \ CSUM_IP6_SCTP) /* Offload bits in mbuf flag */ struct igc_adapter; struct igc_int_delay_info { struct igc_adapter *adapter; /* Back-pointer to the adapter struct */ int offset; /* Register offset to read/write */ int value; /* Current value in usecs */ }; /* * The transmit ring, one per tx queue */ struct tx_ring { struct igc_adapter *adapter; struct igc_tx_desc *tx_base; uint64_t tx_paddr; qidx_t *tx_rsq; uint8_t me; qidx_t tx_rs_cidx; qidx_t tx_rs_pidx; qidx_t tx_cidx_processed; /* Interrupt resources */ void *tag; struct resource *res; unsigned long tx_irq; /* Saved csum offloading context information */ int csum_flags; int csum_lhlen; int csum_iphlen; int csum_thlen; int csum_mss; int csum_pktlen; uint32_t csum_txd_upper; uint32_t csum_txd_lower; /* last field */ }; /* * The Receive ring, one per rx queue */ struct rx_ring { struct igc_adapter *adapter; struct igc_rx_queue *que; u32 me; u32 payload; union igc_rx_desc_extended *rx_base; uint64_t rx_paddr; /* Interrupt resources */ void *tag; struct resource *res; /* Soft stats */ unsigned long rx_irq; unsigned long rx_discarded; unsigned long rx_packets; unsigned long rx_bytes; }; struct igc_tx_queue { struct igc_adapter *adapter; u32 msix; u32 eims; /* This queue's EIMS bit */ u32 me; struct tx_ring txr; }; struct igc_rx_queue { struct igc_adapter *adapter; u32 me; u32 msix; u32 eims; struct rx_ring rxr; u64 irqs; struct if_irq que_irq; }; /* Our adapter structure */ struct igc_adapter { if_t ifp; struct igc_hw hw; if_softc_ctx_t shared; if_ctx_t ctx; #define tx_num_queues shared->isc_ntxqsets #define rx_num_queues shared->isc_nrxqsets #define intr_type shared->isc_intr /* FreeBSD operating-system-specific structures. */ struct igc_osdep osdep; device_t dev; struct cdev *led_dev; struct igc_tx_queue *tx_queues; struct igc_rx_queue *rx_queues; struct if_irq irq; struct resource *memory; struct resource *flash; struct resource *ioport; struct resource *res; void *tag; u32 linkvec; u32 ivars; struct ifmedia *media; int msix; int if_flags; int igc_insert_vlan_header; u32 ims; u32 flags; /* Task for FAST handling */ struct grouptask link_task; u32 txd_cmd; u32 rx_mbuf_sz; /* Management and WOL features */ u32 wol; /* Multicast array memory */ u8 *mta; /* Info about the interface */ u16 link_active; u16 fc; u16 link_speed; u16 link_duplex; u32 smartspeed; u32 dmac; int link_mask; u64 que_mask; + struct igc_fw_version fw_ver; + struct igc_int_delay_info tx_int_delay; struct igc_int_delay_info tx_abs_int_delay; struct igc_int_delay_info rx_int_delay; struct igc_int_delay_info rx_abs_int_delay; struct igc_int_delay_info tx_itr; /* Misc stats maintained by the driver */ unsigned long dropped_pkts; unsigned long link_irq; unsigned long rx_overruns; unsigned long watchdog_events; struct igc_hw_stats stats; u16 vf_ifp; }; void igc_dump_rs(struct igc_adapter *); #define IGC_RSSRK_SIZE 4 #define IGC_RSSRK_VAL(key, i) (key[(i) * IGC_RSSRK_SIZE] | \ key[(i) * IGC_RSSRK_SIZE + 1] << 8 | \ key[(i) * IGC_RSSRK_SIZE + 2] << 16 | \ key[(i) * IGC_RSSRK_SIZE + 3] << 24) #endif /* _IGC_H_DEFINED_ */ diff --git a/sys/dev/igc/igc_defines.h b/sys/dev/igc/igc_defines.h index 1701918c3a9c..e9ebd23c41c3 100644 --- a/sys/dev/igc/igc_defines.h +++ b/sys/dev/igc/igc_defines.h @@ -1,1345 +1,1365 @@ /*- * Copyright 2021 Intel Corp * Copyright 2021 Rubicon Communications, LLC (Netgate) * SPDX-License-Identifier: BSD-3-Clause */ #ifndef _IGC_DEFINES_H_ #define _IGC_DEFINES_H_ /* Number of Transmit and Receive Descriptors must be a multiple of 8 */ #define REQ_TX_DESCRIPTOR_MULTIPLE 8 #define REQ_RX_DESCRIPTOR_MULTIPLE 8 /* Definitions for power management and wakeup registers */ /* Wake Up Control */ #define IGC_WUC_APME 0x00000001 /* APM Enable */ #define IGC_WUC_PME_EN 0x00000002 /* PME Enable */ #define IGC_WUC_PME_STATUS 0x00000004 /* PME Status */ #define IGC_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */ #define IGC_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */ /* Wake Up Filter Control */ #define IGC_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ #define IGC_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ #define IGC_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ #define IGC_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ #define IGC_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ #define IGC_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ #define IGC_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ /* Wake Up Status */ #define IGC_WUS_LNKC IGC_WUFC_LNKC #define IGC_WUS_MAG IGC_WUFC_MAG #define IGC_WUS_EX IGC_WUFC_EX #define IGC_WUS_MC IGC_WUFC_MC #define IGC_WUS_BC IGC_WUFC_BC /* Packet types that are enabled for wake packet delivery */ #define WAKE_PKT_WUS ( \ IGC_WUS_EX | \ IGC_WUS_ARPD | \ IGC_WUS_IPV4 | \ IGC_WUS_IPV6 | \ IGC_WUS_NSD) /* Wake Up Packet Length */ #define IGC_WUPL_MASK 0x00000FFF /* Wake Up Packet Memory stores the first 128 bytes of the wake up packet */ #define IGC_WUPM_BYTES 128 #define IGC_WUS_ARPD 0x00000020 /* Directed ARP Request */ #define IGC_WUS_IPV4 0x00000040 /* Directed IPv4 */ #define IGC_WUS_IPV6 0x00000080 /* Directed IPv6 */ #define IGC_WUS_NSD 0x00000400 /* Directed IPv6 Neighbor Solicitation */ /* Extended Device Control */ #define IGC_CTRL_EXT_LPCD 0x00000004 /* LCD Power Cycle Done */ #define IGC_CTRL_EXT_SDP4_DATA 0x00000010 /* SW Definable Pin 4 data */ #define IGC_CTRL_EXT_SDP6_DATA 0x00000040 /* SW Definable Pin 6 data */ #define IGC_CTRL_EXT_SDP3_DATA 0x00000080 /* SW Definable Pin 3 data */ #define IGC_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */ #define IGC_CTRL_EXT_SDP3_DIR 0x00000800 /* Direction of SDP3 0=in 1=out */ #define IGC_CTRL_EXT_FORCE_SMBUS 0x00000800 /* Force SMBus mode */ #define IGC_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ #define IGC_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ #define IGC_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ #define IGC_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clk Gating */ #define IGC_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 #define IGC_CTRL_EXT_EIAME 0x01000000 #define IGC_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */ #define IGC_CTRL_EXT_IAME 0x08000000 /* Int ACK Auto-mask */ #define IGC_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ #define IGC_CTRL_EXT_PHYPDEN 0x00100000 #define IGC_IVAR_VALID 0x80 #define IGC_GPIE_NSICR 0x00000001 #define IGC_GPIE_MSIX_MODE 0x00000010 #define IGC_GPIE_EIAME 0x40000000 #define IGC_GPIE_PBA 0x80000000 /* Receive Descriptor bit definitions */ #define IGC_RXD_STAT_DD 0x01 /* Descriptor Done */ #define IGC_RXD_STAT_EOP 0x02 /* End of Packet */ #define IGC_RXD_STAT_IXSM 0x04 /* Ignore checksum */ #define IGC_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ #define IGC_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ #define IGC_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ #define IGC_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ #define IGC_RXD_STAT_PIF 0x80 /* passed in-exact filter */ #define IGC_RXD_STAT_IPIDV 0x200 /* IP identification valid */ #define IGC_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ #define IGC_RXD_ERR_CE 0x01 /* CRC Error */ #define IGC_RXD_ERR_SE 0x02 /* Symbol Error */ #define IGC_RXD_ERR_SEQ 0x04 /* Sequence Error */ #define IGC_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ #define IGC_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ #define IGC_RXD_ERR_IPE 0x40 /* IP Checksum Error */ #define IGC_RXD_ERR_RXE 0x80 /* Rx Data Error */ #define IGC_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ #define IGC_RXDEXT_STATERR_TST 0x00000100 /* Time Stamp taken */ #define IGC_RXDEXT_STATERR_LB 0x00040000 #define IGC_RXDEXT_STATERR_L4E 0x20000000 #define IGC_RXDEXT_STATERR_IPE 0x40000000 #define IGC_RXDEXT_STATERR_RXE 0x80000000 /* Same mask, but for extended and packet split descriptors */ #define IGC_RXDEXT_ERR_FRAME_ERR_MASK ( \ IGC_RXDEXT_STATERR_CE | \ IGC_RXDEXT_STATERR_SE | \ IGC_RXDEXT_STATERR_SEQ | \ IGC_RXDEXT_STATERR_CXE | \ IGC_RXDEXT_STATERR_RXE) #if !defined(EXTERNAL_RELEASE) || defined(IGCE_MQ) #define IGC_MRQC_ENABLE_RSS_2Q 0x00000001 #endif /* !EXTERNAL_RELEASE || IGCE_MQ */ #define IGC_MRQC_RSS_FIELD_MASK 0xFFFF0000 #define IGC_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 #define IGC_MRQC_RSS_FIELD_IPV4 0x00020000 #define IGC_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 #define IGC_MRQC_RSS_FIELD_IPV6 0x00100000 #define IGC_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 #define IGC_RXDPS_HDRSTAT_HDRSP 0x00008000 /* Management Control */ #define IGC_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ #define IGC_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ #define IGC_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ #define IGC_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ #define IGC_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ /* Enable MAC address filtering */ #define IGC_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MNG packets to host memory */ #define IGC_MANC_EN_MNG2HOST 0x00200000 #define IGC_MANC2H_PORT_623 0x00000020 /* Port 0x26f */ #define IGC_MANC2H_PORT_664 0x00000040 /* Port 0x298 */ #define IGC_MDEF_PORT_623 0x00000800 /* Port 0x26f */ #define IGC_MDEF_PORT_664 0x00000400 /* Port 0x298 */ /* Receive Control */ #define IGC_RCTL_RST 0x00000001 /* Software reset */ #define IGC_RCTL_EN 0x00000002 /* enable */ #define IGC_RCTL_SBP 0x00000004 /* store bad packet */ #define IGC_RCTL_UPE 0x00000008 /* unicast promisc enable */ #define IGC_RCTL_MPE 0x00000010 /* multicast promisc enable */ #define IGC_RCTL_LPE 0x00000020 /* long packet enable */ #define IGC_RCTL_LBM_NO 0x00000000 /* no loopback mode */ #define IGC_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ #define IGC_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ #define IGC_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ #define IGC_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */ #define IGC_RCTL_RDMTS_HEX 0x00010000 #define IGC_RCTL_RDMTS1_HEX IGC_RCTL_RDMTS_HEX #define IGC_RCTL_MO_SHIFT 12 /* multicast offset shift */ #define IGC_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ #define IGC_RCTL_BAM 0x00008000 /* broadcast enable */ /* these buffer sizes are valid if IGC_RCTL_BSEX is 0 */ #define IGC_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */ #define IGC_RCTL_SZ_1024 0x00010000 /* Rx buffer size 1024 */ #define IGC_RCTL_SZ_512 0x00020000 /* Rx buffer size 512 */ #define IGC_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */ /* these buffer sizes are valid if IGC_RCTL_BSEX is 1 */ #define IGC_RCTL_SZ_16384 0x00010000 /* Rx buffer size 16384 */ #define IGC_RCTL_SZ_8192 0x00020000 /* Rx buffer size 8192 */ #define IGC_RCTL_SZ_4096 0x00030000 /* Rx buffer size 4096 */ #define IGC_RCTL_VFE 0x00040000 /* vlan filter enable */ #define IGC_RCTL_CFIEN 0x00080000 /* canonical form enable */ #define IGC_RCTL_CFI 0x00100000 /* canonical form indicator */ #define IGC_RCTL_DPF 0x00400000 /* discard pause frames */ #define IGC_RCTL_PMCF 0x00800000 /* pass MAC control frames */ #define IGC_RCTL_BSEX 0x02000000 /* Buffer size extension */ #define IGC_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ /* Use byte values for the following shift parameters * Usage: * psrctl |= (((ROUNDUP(value0, 128) >> IGC_PSRCTL_BSIZE0_SHIFT) & * IGC_PSRCTL_BSIZE0_MASK) | * ((ROUNDUP(value1, 1024) >> IGC_PSRCTL_BSIZE1_SHIFT) & * IGC_PSRCTL_BSIZE1_MASK) | * ((ROUNDUP(value2, 1024) << IGC_PSRCTL_BSIZE2_SHIFT) & * IGC_PSRCTL_BSIZE2_MASK) | * ((ROUNDUP(value3, 1024) << IGC_PSRCTL_BSIZE3_SHIFT) |; * IGC_PSRCTL_BSIZE3_MASK)) * where value0 = [128..16256], default=256 * value1 = [1024..64512], default=4096 * value2 = [0..64512], default=4096 * value3 = [0..64512], default=0 */ #define IGC_PSRCTL_BSIZE0_MASK 0x0000007F #define IGC_PSRCTL_BSIZE1_MASK 0x00003F00 #define IGC_PSRCTL_BSIZE2_MASK 0x003F0000 #define IGC_PSRCTL_BSIZE3_MASK 0x3F000000 #define IGC_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ #define IGC_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ #define IGC_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ #define IGC_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ /* SWFW_SYNC Definitions */ #define IGC_SWFW_EEP_SM 0x01 #define IGC_SWFW_PHY0_SM 0x02 #define IGC_SWFW_PHY1_SM 0x04 #define IGC_SWFW_CSR_SM 0x08 #define IGC_SWFW_SW_MNG_SM 0x400 /* Device Control */ #define IGC_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ #define IGC_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */ #define IGC_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master reqs */ #define IGC_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ #define IGC_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ #define IGC_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ #define IGC_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ #define IGC_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ #define IGC_CTRL_SPD_10 0x00000000 /* Force 10Mb */ #define IGC_CTRL_SPD_100 0x00000100 /* Force 100Mb */ #define IGC_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ #define IGC_CTRL_FRCSPD 0x00000800 /* Force Speed */ #define IGC_CTRL_FRCDPX 0x00001000 /* Force Duplex */ #define IGC_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ #define IGC_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ #define IGC_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ #define IGC_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ #define IGC_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */ #define IGC_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ #define IGC_CTRL_DEV_RST 0x20000000 /* Device reset */ #define IGC_CTRL_RST 0x04000000 /* Global reset */ #define IGC_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ #define IGC_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ #define IGC_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ #define IGC_CTRL_PHY_RST 0x80000000 /* PHY Reset */ #define IGC_CONNSW_AUTOSENSE_EN 0x1 #define IGC_PCS_LCTL_FORCE_FCTRL 0x80 #define IGC_PCS_LSTS_AN_COMPLETE 0x10000 /* Device Status */ #define IGC_STATUS_FD 0x00000001 /* Duplex 0=half 1=full */ #define IGC_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ #define IGC_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ #define IGC_STATUS_FUNC_SHIFT 2 #define IGC_STATUS_FUNC_1 0x00000004 /* Function 1 */ #define IGC_STATUS_TXOFF 0x00000010 /* transmission paused */ #define IGC_STATUS_SPEED_MASK 0x000000C0 #define IGC_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ #define IGC_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ #define IGC_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ #define IGC_STATUS_SPEED_2500 0x00400000 /* Speed 2.5Gb/s indication for I225 */ #define IGC_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Compltn by NVM */ #define IGC_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */ #define IGC_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master request status */ #define IGC_STATUS_2P5_SKU 0x00001000 /* Val of 2.5GBE SKU strap */ #define IGC_STATUS_2P5_SKU_OVER 0x00002000 /* Val of 2.5GBE SKU Over */ #define IGC_STATUS_PCIM_STATE 0x40000000 /* PCIm function state */ #define SPEED_10 10 #define SPEED_100 100 #define SPEED_1000 1000 #define SPEED_2500 2500 #define HALF_DUPLEX 1 #define FULL_DUPLEX 2 #define ADVERTISE_10_HALF 0x0001 #define ADVERTISE_10_FULL 0x0002 #define ADVERTISE_100_HALF 0x0004 #define ADVERTISE_100_FULL 0x0008 #define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ #define ADVERTISE_1000_FULL 0x0020 #define ADVERTISE_2500_HALF 0x0040 /* NOT used, just FYI */ #define ADVERTISE_2500_FULL 0x0080 /* 1000/H is not supported, nor spec-compliant. */ #define IGC_ALL_SPEED_DUPLEX ( \ ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ ADVERTISE_100_FULL | ADVERTISE_1000_FULL) #define IGC_ALL_SPEED_DUPLEX_2500 ( \ ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ ADVERTISE_100_FULL | ADVERTISE_1000_FULL | ADVERTISE_2500_FULL) #define IGC_ALL_NOT_GIG ( \ ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ ADVERTISE_100_FULL) #define IGC_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) #define IGC_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) #define IGC_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) #define AUTONEG_ADVERTISE_SPEED_DEFAULT IGC_ALL_SPEED_DUPLEX #define AUTONEG_ADVERTISE_SPEED_DEFAULT_2500 IGC_ALL_SPEED_DUPLEX_2500 /* LED Control */ #define IGC_LEDCTL_LED0_MODE_MASK 0x0000000F #define IGC_LEDCTL_LED0_MODE_SHIFT 0 #define IGC_LEDCTL_LED0_IVRT 0x00000040 #define IGC_LEDCTL_LED0_BLINK 0x00000080 #define IGC_LEDCTL_MODE_LED_ON 0xE #define IGC_LEDCTL_MODE_LED_OFF 0xF /* Transmit Descriptor bit definitions */ #define IGC_TXD_DTYP_D 0x00100000 /* Data Descriptor */ #define IGC_TXD_DTYP_C 0x00000000 /* Context Descriptor */ #define IGC_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ #define IGC_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ #define IGC_TXD_CMD_EOP 0x01000000 /* End of Packet */ #define IGC_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ #define IGC_TXD_CMD_IC 0x04000000 /* Insert Checksum */ #define IGC_TXD_CMD_RS 0x08000000 /* Report Status */ #define IGC_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ #define IGC_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */ #define IGC_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ #define IGC_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ #define IGC_TXD_STAT_DD 0x00000001 /* Descriptor Done */ #define IGC_TXD_CMD_TCP 0x01000000 /* TCP packet */ #define IGC_TXD_CMD_IP 0x02000000 /* IP packet */ #define IGC_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ #define IGC_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */ /* Transmit Control */ #define IGC_TCTL_EN 0x00000002 /* enable Tx */ #define IGC_TCTL_PSP 0x00000008 /* pad short packets */ #define IGC_TCTL_CT 0x00000ff0 /* collision threshold */ #define IGC_TCTL_COLD 0x003ff000 /* collision distance */ #define IGC_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ #define IGC_TCTL_MULR 0x10000000 /* Multiple request support */ /* Transmit Arbitration Count */ #define IGC_TARC0_ENABLE 0x00000400 /* Enable Tx Queue 0 */ /* SerDes Control */ #define IGC_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 #define IGC_SCTL_ENABLE_SERDES_LOOPBACK 0x0410 /* Receive Checksum Control */ #define IGC_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ #define IGC_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ #define IGC_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ #define IGC_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ #define IGC_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ /* GPY211 - I225 defines */ #define GPY_MMD_MASK 0xFFFF0000 #define GPY_MMD_SHIFT 16 #define GPY_REG_MASK 0x0000FFFF /* Header split receive */ #define IGC_RFCTL_NFSW_DIS 0x00000040 #define IGC_RFCTL_NFSR_DIS 0x00000080 #define IGC_RFCTL_ACK_DIS 0x00001000 #define IGC_RFCTL_EXTEN 0x00008000 #define IGC_RFCTL_IPV6_EX_DIS 0x00010000 #define IGC_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 #define IGC_RFCTL_LEF 0x00040000 /* Collision related configuration parameters */ #define IGC_CT_SHIFT 4 #define IGC_COLLISION_THRESHOLD 15 #define IGC_COLLISION_DISTANCE 63 #define IGC_COLD_SHIFT 12 /* Default values for the transmit IPG register */ #define DEFAULT_82543_TIPG_IPGT_FIBER 9 #define DEFAULT_82543_TIPG_IPGT_COPPER 8 #define IGC_TIPG_IPGT_MASK 0x000003FF #define DEFAULT_82543_TIPG_IPGR1 8 #define IGC_TIPG_IPGR1_SHIFT 10 #define DEFAULT_82543_TIPG_IPGR2 6 #define DEFAULT_80003ES2LAN_TIPG_IPGR2 7 #define IGC_TIPG_IPGR2_SHIFT 20 /* Ethertype field values */ #define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ #define ETHERNET_FCS_SIZE 4 #define MAX_JUMBO_FRAME_SIZE MJUM9BYTES #define IGC_TX_PTR_GAP 0x1F /* Extended Configuration Control and Size */ #define IGC_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 #define IGC_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 #define IGC_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008 #define IGC_EXTCNF_CTRL_SWFLAG 0x00000020 #define IGC_EXTCNF_CTRL_GATE_PHY_CFG 0x00000080 #define IGC_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000 #define IGC_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16 #define IGC_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000 #define IGC_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT 16 #define IGC_PHY_CTRL_D0A_LPLU 0x00000002 #define IGC_PHY_CTRL_NOND0A_LPLU 0x00000004 #define IGC_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008 #define IGC_PHY_CTRL_GBE_DISABLE 0x00000040 #define IGC_KABGTXD_BGSQLBIAS 0x00050000 /* PBA constants */ #define IGC_PBA_8K 0x0008 /* 8KB */ #define IGC_PBA_10K 0x000A /* 10KB */ #define IGC_PBA_12K 0x000C /* 12KB */ #define IGC_PBA_14K 0x000E /* 14KB */ #define IGC_PBA_16K 0x0010 /* 16KB */ #define IGC_PBA_18K 0x0012 #define IGC_PBA_20K 0x0014 #define IGC_PBA_22K 0x0016 #define IGC_PBA_24K 0x0018 #define IGC_PBA_26K 0x001A #define IGC_PBA_30K 0x001E #define IGC_PBA_32K 0x0020 #define IGC_PBA_34K 0x0022 #define IGC_PBA_35K 0x0023 #define IGC_PBA_38K 0x0026 #define IGC_PBA_40K 0x0028 #define IGC_PBA_48K 0x0030 /* 48KB */ #define IGC_PBA_64K 0x0040 /* 64KB */ #define IGC_PBA_RXA_MASK 0xFFFF #define IGC_PBS_16K IGC_PBA_16K /* Uncorrectable/correctable ECC Error counts and enable bits */ #define IGC_PBECCSTS_CORR_ERR_CNT_MASK 0x000000FF #define IGC_PBECCSTS_UNCORR_ERR_CNT_MASK 0x0000FF00 #define IGC_PBECCSTS_UNCORR_ERR_CNT_SHIFT 8 #define IGC_PBECCSTS_ECC_ENABLE 0x00010000 #define IFS_MAX 80 #define IFS_MIN 40 #define IFS_RATIO 4 #define IFS_STEP 10 #define MIN_NUM_XMITS 1000 /* SW Semaphore Register */ #define IGC_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ #define IGC_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ #define IGC_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ #define IGC_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */ /* Interrupt Cause Read */ #define IGC_ICR_TXDW 0x00000001 /* Transmit desc written back */ #define IGC_ICR_TXQE 0x00000002 /* Transmit Queue empty */ #define IGC_ICR_LSC 0x00000004 /* Link Status Change */ #define IGC_ICR_RXSEQ 0x00000008 /* Rx sequence error */ #define IGC_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ #define IGC_ICR_RXO 0x00000040 /* Rx overrun */ #define IGC_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ #define IGC_ICR_RXCFG 0x00000400 /* Rx /c/ ordered set */ #define IGC_ICR_GPI_EN0 0x00000800 /* GP Int 0 */ #define IGC_ICR_GPI_EN1 0x00001000 /* GP Int 1 */ #define IGC_ICR_GPI_EN2 0x00002000 /* GP Int 2 */ #define IGC_ICR_GPI_EN3 0x00004000 /* GP Int 3 */ #define IGC_ICR_TXD_LOW 0x00008000 #define IGC_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */ #define IGC_ICR_TS 0x00080000 /* Time Sync Interrupt */ #define IGC_ICR_DRSTA 0x40000000 /* Device Reset Asserted */ /* If this bit asserted, the driver should claim the interrupt */ #define IGC_ICR_INT_ASSERTED 0x80000000 #define IGC_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */ #define IGC_ICR_FER 0x00400000 /* Fatal Error */ /* Extended Interrupt Cause Read */ #define IGC_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */ #define IGC_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */ #define IGC_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */ #define IGC_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */ #define IGC_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */ #define IGC_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */ #define IGC_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */ #define IGC_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */ #define IGC_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ #define IGC_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ /* TCP Timer */ #define IGC_TCPTIMER_KS 0x00000100 /* KickStart */ #define IGC_TCPTIMER_COUNT_ENABLE 0x00000200 /* Count Enable */ #define IGC_TCPTIMER_COUNT_FINISH 0x00000400 /* Count finish */ #define IGC_TCPTIMER_LOOP 0x00000800 /* Loop */ /* This defines the bits that are set in the Interrupt Mask * Set/Read Register. Each bit is documented below: * o RXT0 = Receiver Timer Interrupt (ring 0) * o TXDW = Transmit Descriptor Written Back * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) * o RXSEQ = Receive Sequence Error * o LSC = Link Status Change */ #define IMS_ENABLE_MASK ( \ IGC_IMS_RXT0 | \ IGC_IMS_TXDW | \ IGC_IMS_RXDMT0 | \ IGC_IMS_RXSEQ | \ IGC_IMS_LSC) /* Interrupt Mask Set */ #define IGC_IMS_TXDW IGC_ICR_TXDW /* Tx desc written back */ #define IGC_IMS_LSC IGC_ICR_LSC /* Link Status Change */ #define IGC_IMS_RXSEQ IGC_ICR_RXSEQ /* Rx sequence error */ #define IGC_IMS_RXDMT0 IGC_ICR_RXDMT0 /* Rx desc min. threshold */ #define IGC_QVECTOR_MASK 0x7FFC /* Q-vector mask */ #define IGC_ITR_VAL_MASK 0x04 /* ITR value mask */ #define IGC_IMS_RXO IGC_ICR_RXO /* Rx overrun */ #define IGC_IMS_RXT0 IGC_ICR_RXT0 /* Rx timer intr */ #define IGC_IMS_TXD_LOW IGC_ICR_TXD_LOW #define IGC_IMS_ECCER IGC_ICR_ECCER /* Uncorrectable ECC Error */ #define IGC_IMS_TS IGC_ICR_TS /* Time Sync Interrupt */ #define IGC_IMS_DRSTA IGC_ICR_DRSTA /* Device Reset Asserted */ #define IGC_IMS_DOUTSYNC IGC_ICR_DOUTSYNC /* NIC DMA out of sync */ #define IGC_IMS_FER IGC_ICR_FER /* Fatal Error */ #define IGC_IMS_THS IGC_ICR_THS /* ICR.TS: Thermal Sensor Event*/ #define IGC_IMS_MDDET IGC_ICR_MDDET /* Malicious Driver Detect */ /* Extended Interrupt Mask Set */ #define IGC_EIMS_RX_QUEUE0 IGC_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */ #define IGC_EIMS_RX_QUEUE1 IGC_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */ #define IGC_EIMS_RX_QUEUE2 IGC_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */ #define IGC_EIMS_RX_QUEUE3 IGC_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */ #define IGC_EIMS_TX_QUEUE0 IGC_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */ #define IGC_EIMS_TX_QUEUE1 IGC_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */ #define IGC_EIMS_TX_QUEUE2 IGC_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */ #define IGC_EIMS_TX_QUEUE3 IGC_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */ #define IGC_EIMS_TCP_TIMER IGC_EICR_TCP_TIMER /* TCP Timer */ #define IGC_EIMS_OTHER IGC_EICR_OTHER /* Interrupt Cause Active */ /* Interrupt Cause Set */ #define IGC_ICS_LSC IGC_ICR_LSC /* Link Status Change */ #define IGC_ICS_RXSEQ IGC_ICR_RXSEQ /* Rx sequence error */ #define IGC_ICS_RXDMT0 IGC_ICR_RXDMT0 /* Rx desc min. threshold */ /* Extended Interrupt Cause Set */ #define IGC_EICS_RX_QUEUE0 IGC_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */ #define IGC_EICS_RX_QUEUE1 IGC_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */ #define IGC_EICS_RX_QUEUE2 IGC_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */ #define IGC_EICS_RX_QUEUE3 IGC_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */ #define IGC_EICS_TX_QUEUE0 IGC_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */ #define IGC_EICS_TX_QUEUE1 IGC_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */ #define IGC_EICS_TX_QUEUE2 IGC_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */ #define IGC_EICS_TX_QUEUE3 IGC_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */ #define IGC_EICS_TCP_TIMER IGC_EICR_TCP_TIMER /* TCP Timer */ #define IGC_EICS_OTHER IGC_EICR_OTHER /* Interrupt Cause Active */ #define IGC_EITR_ITR_INT_MASK 0x0000FFFF #define IGC_EITR_INTERVAL 0x00007FFC /* IGC_EITR_CNT_IGNR is only for 82576 and newer */ #define IGC_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */ /* Transmit Descriptor Control */ #define IGC_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ #define IGC_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */ #define IGC_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ #define IGC_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ #define IGC_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ #define IGC_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */ /* Enable the counting of descriptors still to be processed. */ #define IGC_TXDCTL_COUNT_DESC 0x00400000 /* Flow Control Constants */ #define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 #define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 #define FLOW_CONTROL_TYPE 0x8808 /* 802.1q VLAN Packet Size */ #define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ #define IGC_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ /* Receive Address * Number of high/low register pairs in the RAR. The RAR (Receive Address * Registers) holds the directed and multicast addresses that we monitor. * Technically, we have 16 spots. However, we reserve one of these spots * (RAR[15]) for our directed address used by controllers with * manageability enabled, allowing us room for 15 multicast addresses. */ #define IGC_RAR_ENTRIES 15 #define IGC_RAH_AV 0x80000000 /* Receive descriptor valid */ #define IGC_RAL_MAC_ADDR_LEN 4 #define IGC_RAH_MAC_ADDR_LEN 2 /* Error Codes */ #define IGC_SUCCESS 0 #define IGC_ERR_NVM 1 #define IGC_ERR_PHY 2 #define IGC_ERR_CONFIG 3 #define IGC_ERR_PARAM 4 #define IGC_ERR_MAC_INIT 5 #define IGC_ERR_PHY_TYPE 6 #define IGC_ERR_RESET 9 #define IGC_ERR_MASTER_REQUESTS_PENDING 10 #define IGC_ERR_HOST_INTERFACE_COMMAND 11 #define IGC_BLK_PHY_RESET 12 #define IGC_ERR_SWFW_SYNC 13 #define IGC_NOT_IMPLEMENTED 14 #define IGC_ERR_MBX 15 #define IGC_ERR_INVALID_ARGUMENT 16 #define IGC_ERR_NO_SPACE 17 #define IGC_ERR_NVM_PBA_SECTION 18 #define IGC_ERR_INVM_VALUE_NOT_FOUND 20 /* Loop limit on how long we wait for auto-negotiation to complete */ #define COPPER_LINK_UP_LIMIT 10 #define PHY_AUTO_NEG_LIMIT 45 /* Number of 100 microseconds we wait for PCI Express master disable */ #define MASTER_DISABLE_TIMEOUT 800 /* Number of milliseconds we wait for PHY configuration done after MAC reset */ #define PHY_CFG_TIMEOUT 100 /* Number of 2 milliseconds we wait for acquiring MDIO ownership. */ #define MDIO_OWNERSHIP_TIMEOUT 10 /* Number of milliseconds for NVM auto read done after MAC reset. */ #define AUTO_READ_DONE_TIMEOUT 10 /* Flow Control */ #define IGC_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */ #define IGC_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ #define IGC_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ /* Transmit Configuration Word */ #define IGC_TXCW_FD 0x00000020 /* TXCW full duplex */ #define IGC_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ #define IGC_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ #define IGC_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */ #define IGC_TXCW_ANE 0x80000000 /* Auto-neg enable */ /* Receive Configuration Word */ #define IGC_RXCW_CW 0x0000ffff /* RxConfigWord mask */ #define IGC_RXCW_IV 0x08000000 /* Receive config invalid */ #define IGC_RXCW_C 0x20000000 /* Receive config */ #define IGC_RXCW_SYNCH 0x40000000 /* Receive config synch */ #define IGC_TSYNCTXCTL_TXTT_0 0x00000001 /* Tx timestamp reg 0 valid */ #define IGC_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */ #define IGC_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */ #define IGC_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */ #define IGC_TSYNCRXCTL_TYPE_L2_V2 0x00 #define IGC_TSYNCRXCTL_TYPE_L4_V1 0x02 #define IGC_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 #define IGC_TSYNCRXCTL_TYPE_ALL 0x08 #define IGC_TSYNCRXCTL_TYPE_EVENT_V2 0x0A #define IGC_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */ #define IGC_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */ #define IGC_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF #define IGC_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00 #define IGC_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01 #define IGC_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02 #define IGC_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03 #define IGC_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04 #define IGC_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00 #define IGC_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000 #define IGC_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100 #define IGC_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200 #define IGC_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300 #define IGC_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800 #define IGC_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900 #define IGC_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00 #define IGC_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00 #define IGC_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00 #define IGC_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00 #define IGC_TIMINCA_16NS_SHIFT 24 #define IGC_TIMINCA_INCPERIOD_SHIFT 24 #define IGC_TIMINCA_INCVALUE_MASK 0x00FFFFFF /* Time Sync Interrupt Cause/Mask Register Bits */ #define TSINTR_SYS_WRAP (1 << 0) /* SYSTIM Wrap around. */ #define TSINTR_TXTS (1 << 1) /* Transmit Timestamp. */ #define TSINTR_TT0 (1 << 3) /* Target Time 0 Trigger. */ #define TSINTR_TT1 (1 << 4) /* Target Time 1 Trigger. */ #define TSINTR_AUTT0 (1 << 5) /* Auxiliary Timestamp 0 Taken. */ #define TSINTR_AUTT1 (1 << 6) /* Auxiliary Timestamp 1 Taken. */ #define TSYNC_INTERRUPTS TSINTR_TXTS /* TSAUXC Configuration Bits */ #define TSAUXC_EN_TT0 (1 << 0) /* Enable target time 0. */ #define TSAUXC_EN_TT1 (1 << 1) /* Enable target time 1. */ #define TSAUXC_EN_CLK0 (1 << 2) /* Enable Configurable Frequency Clock 0. */ #define TSAUXC_ST0 (1 << 4) /* Start Clock 0 Toggle on Target Time 0. */ #define TSAUXC_EN_CLK1 (1 << 5) /* Enable Configurable Frequency Clock 1. */ #define TSAUXC_ST1 (1 << 7) /* Start Clock 1 Toggle on Target Time 1. */ #define TSAUXC_EN_TS0 (1 << 8) /* Enable hardware timestamp 0. */ #define TSAUXC_EN_TS1 (1 << 10) /* Enable hardware timestamp 0. */ /* SDP Configuration Bits */ #define AUX0_SEL_SDP0 (0u << 0) /* Assign SDP0 to auxiliary time stamp 0. */ #define AUX0_SEL_SDP1 (1u << 0) /* Assign SDP1 to auxiliary time stamp 0. */ #define AUX0_SEL_SDP2 (2u << 0) /* Assign SDP2 to auxiliary time stamp 0. */ #define AUX0_SEL_SDP3 (3u << 0) /* Assign SDP3 to auxiliary time stamp 0. */ #define AUX0_TS_SDP_EN (1u << 2) /* Enable auxiliary time stamp trigger 0. */ #define AUX1_SEL_SDP0 (0u << 3) /* Assign SDP0 to auxiliary time stamp 1. */ #define AUX1_SEL_SDP1 (1u << 3) /* Assign SDP1 to auxiliary time stamp 1. */ #define AUX1_SEL_SDP2 (2u << 3) /* Assign SDP2 to auxiliary time stamp 1. */ #define AUX1_SEL_SDP3 (3u << 3) /* Assign SDP3 to auxiliary time stamp 1. */ #define AUX1_TS_SDP_EN (1u << 5) /* Enable auxiliary time stamp trigger 1. */ #define TS_SDP0_EN (1u << 8) /* SDP0 is assigned to Tsync. */ #define TS_SDP1_EN (1u << 11) /* SDP1 is assigned to Tsync. */ #define TS_SDP2_EN (1u << 14) /* SDP2 is assigned to Tsync. */ #define TS_SDP3_EN (1u << 17) /* SDP3 is assigned to Tsync. */ #define TS_SDP0_SEL_TT0 (0u << 6) /* Target time 0 is output on SDP0. */ #define TS_SDP0_SEL_TT1 (1u << 6) /* Target time 1 is output on SDP0. */ #define TS_SDP1_SEL_TT0 (0u << 9) /* Target time 0 is output on SDP1. */ #define TS_SDP1_SEL_TT1 (1u << 9) /* Target time 1 is output on SDP1. */ #define TS_SDP0_SEL_FC0 (2u << 6) /* Freq clock 0 is output on SDP0. */ #define TS_SDP0_SEL_FC1 (3u << 6) /* Freq clock 1 is output on SDP0. */ #define TS_SDP1_SEL_FC0 (2u << 9) /* Freq clock 0 is output on SDP1. */ #define TS_SDP1_SEL_FC1 (3u << 9) /* Freq clock 1 is output on SDP1. */ #define TS_SDP2_SEL_TT0 (0u << 12) /* Target time 0 is output on SDP2. */ #define TS_SDP2_SEL_TT1 (1u << 12) /* Target time 1 is output on SDP2. */ #define TS_SDP2_SEL_FC0 (2u << 12) /* Freq clock 0 is output on SDP2. */ #define TS_SDP2_SEL_FC1 (3u << 12) /* Freq clock 1 is output on SDP2. */ #define TS_SDP3_SEL_TT0 (0u << 15) /* Target time 0 is output on SDP3. */ #define TS_SDP3_SEL_TT1 (1u << 15) /* Target time 1 is output on SDP3. */ #define TS_SDP3_SEL_FC0 (2u << 15) /* Freq clock 0 is output on SDP3. */ #define TS_SDP3_SEL_FC1 (3u << 15) /* Freq clock 1 is output on SDP3. */ #define IGC_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */ #define IGC_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */ /* Extended Device Control */ #define IGC_CTRL_EXT_SDP2_DIR 0x00000400 /* SDP2 Data direction */ /* ETQF register bit definitions */ #define IGC_ETQF_1588 (1 << 30) #define IGC_FTQF_VF_BP 0x00008000 #define IGC_FTQF_1588_TIME_STAMP 0x08000000 #define IGC_FTQF_MASK 0xF0000000 #define IGC_FTQF_MASK_PROTO_BP 0x10000000 /* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ #define IGC_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */ #define IGC_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ #define IGC_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */ #define IGC_TSICR_TXTS 0x00000002 #define IGC_TSIM_TXTS 0x00000002 /* TUPLE Filtering Configuration */ #define IGC_TTQF_DISABLE_MASK 0xF0008000 /* TTQF Disable Mask */ #define IGC_TTQF_QUEUE_ENABLE 0x100 /* TTQF Queue Enable Bit */ #define IGC_TTQF_PROTOCOL_MASK 0xFF /* TTQF Protocol Mask */ /* TTQF TCP Bit, shift with IGC_TTQF_PROTOCOL SHIFT */ #define IGC_TTQF_PROTOCOL_TCP 0x0 /* TTQF UDP Bit, shift with IGC_TTQF_PROTOCOL_SHIFT */ #define IGC_TTQF_PROTOCOL_UDP 0x1 /* TTQF SCTP Bit, shift with IGC_TTQF_PROTOCOL_SHIFT */ #define IGC_TTQF_PROTOCOL_SCTP 0x2 #define IGC_TTQF_PROTOCOL_SHIFT 5 /* TTQF Protocol Shift */ #define IGC_TTQF_QUEUE_SHIFT 16 /* TTQF Queue Shfit */ #define IGC_TTQF_RX_QUEUE_MASK 0x70000 /* TTQF Queue Mask */ #define IGC_TTQF_MASK_ENABLE 0x10000000 /* TTQF Mask Enable Bit */ #define IGC_IMIR_CLEAR_MASK 0xF001FFFF /* IMIR Reg Clear Mask */ #define IGC_IMIR_PORT_BYPASS 0x20000 /* IMIR Port Bypass Bit */ #define IGC_IMIR_PRIORITY_SHIFT 29 /* IMIR Priority Shift */ #define IGC_IMIREXT_CLEAR_MASK 0x7FFFF /* IMIREXT Reg Clear Mask */ #define IGC_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */ #define IGC_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */ #define IGC_MDICNFG_PHY_MASK 0x03E00000 #define IGC_MDICNFG_PHY_SHIFT 21 #define IGC_MEDIA_PORT_COPPER 1 #define IGC_MEDIA_PORT_OTHER 2 #define IGC_M88E1112_AUTO_COPPER_SGMII 0x2 #define IGC_M88E1112_AUTO_COPPER_BASEX 0x3 #define IGC_M88E1112_STATUS_LINK 0x0004 /* Interface Link Bit */ #define IGC_M88E1112_MAC_CTRL_1 0x10 #define IGC_M88E1112_MAC_CTRL_1_MODE_MASK 0x0380 /* Mode Select */ #define IGC_M88E1112_MAC_CTRL_1_MODE_SHIFT 7 #define IGC_M88E1112_PAGE_ADDR 0x16 #define IGC_M88E1112_STATUS 0x01 #define IGC_THSTAT_LOW_EVENT 0x20000000 /* Low thermal threshold */ #define IGC_THSTAT_MID_EVENT 0x00200000 /* Mid thermal threshold */ #define IGC_THSTAT_HIGH_EVENT 0x00002000 /* High thermal threshold */ #define IGC_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */ #define IGC_THSTAT_LINK_THROTTLE 0x00000002 /* Link Spd Throttle Event */ /* EEE defines */ #define IGC_IPCNFG_EEE_2_5G_AN 0x00000010 /* IPCNFG EEE Ena 2.5G AN */ #define IGC_IPCNFG_EEE_1G_AN 0x00000008 /* IPCNFG EEE Ena 1G AN */ #define IGC_IPCNFG_EEE_100M_AN 0x00000004 /* IPCNFG EEE Ena 100M AN */ #define IGC_EEER_TX_LPI_EN 0x00010000 /* EEER Tx LPI Enable */ #define IGC_EEER_RX_LPI_EN 0x00020000 /* EEER Rx LPI Enable */ #define IGC_EEER_LPI_FC 0x00040000 /* EEER Ena on Flow Cntrl */ /* EEE status */ #define IGC_EEER_EEE_NEG 0x20000000 /* EEE capability nego */ #define IGC_EEER_RX_LPI_STATUS 0x40000000 /* Rx in LPI state */ #define IGC_EEER_TX_LPI_STATUS 0x80000000 /* Tx in LPI state */ #define IGC_EEE_LP_ADV_ADDR_I350 0x040F /* EEE LP Advertisement */ #define IGC_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */ #define IGC_M88E1543_EEE_CTRL_1 0x0 #define IGC_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */ #define IGC_M88E1543_FIBER_CTRL 0x0 /* Fiber Control Register */ #define IGC_EEE_ADV_DEV_I354 7 #define IGC_EEE_ADV_ADDR_I354 60 #define IGC_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */ #define IGC_EEE_ADV_1000_SUPPORTED (1 << 2) /* 1000BaseT EEE Supported */ #define IGC_PCS_STATUS_DEV_I354 3 #define IGC_PCS_STATUS_ADDR_I354 1 #define IGC_PCS_STATUS_RX_LPI_RCVD 0x0400 #define IGC_PCS_STATUS_TX_LPI_RCVD 0x0800 #define IGC_M88E1512_CFG_REG_1 0x0010 #define IGC_M88E1512_CFG_REG_2 0x0011 #define IGC_M88E1512_CFG_REG_3 0x0007 #define IGC_M88E1512_MODE 0x0014 #define IGC_EEE_SU_LPI_CLK_STP 0x00800000 /* EEE LPI Clock Stop */ #define IGC_EEE_LP_ADV_DEV_I225 7 /* EEE LP Adv Device */ #define IGC_EEE_LP_ADV_ADDR_I225 61 /* EEE LP Adv Register */ #define IGC_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */ /* PHY Control Register */ #define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ #define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ #define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ #define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ #define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ #define MII_CR_POWER_DOWN 0x0800 /* Power down */ #define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ #define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ #define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ #define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ #define MII_CR_SPEED_1000 0x0040 #define MII_CR_SPEED_100 0x2000 #define MII_CR_SPEED_10 0x0000 /* PHY Status Register */ #define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ #define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ #define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ #define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ #define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ #define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ #define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ #define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ #define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ #define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ #define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ #define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ #define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ #define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ #define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ /* Autoneg Advertisement Register */ #define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ #define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ #define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ #define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ #define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ #define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ #define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ #define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ #define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ #define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */ /* Link Partner Ability Register (Base Page) */ #define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */ #define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP 10T Half Dplx Capable */ #define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP 10T Full Dplx Capable */ #define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP 100TX Half Dplx Capable */ #define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP 100TX Full Dplx Capable */ #define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */ #define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ #define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asym Pause Direction bit */ #define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP detected Remote Fault */ #define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP rx'd link code word */ #define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */ /* Autoneg Expansion Register */ #define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */ #define NWAY_ER_PAGE_RXD 0x0002 /* LP 10T Half Dplx Capable */ #define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP 10T Full Dplx Capable */ #define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP 100TX Half Dplx Capable */ #define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP 100TX Full Dplx Capable */ /* 1000BASE-T Control Register */ #define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */ #define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ #define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ /* 1=Repeater/switch device port 0=DTE device */ #define CR_1000T_REPEATER_DTE 0x0400 /* 1=Configure PHY as Master 0=Configure PHY as Slave */ #define CR_1000T_MS_VALUE 0x0800 /* 1=Master/Slave manual config value 0=Automatic Master/Slave config */ #define CR_1000T_MS_ENABLE 0x1000 #define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ #define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ #define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ #define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ #define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ /* 1000BASE-T Status Register */ #define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle err since last rd */ #define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asym pause direction bit */ #define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ #define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ #define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ #define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ #define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local Tx Master, 0=Slave */ #define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */ #define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5 /* PHY 1000 MII Register/Bit Definitions */ /* PHY Registers defined by IEEE */ #define PHY_CONTROL 0x00 /* Control Register */ #define PHY_STATUS 0x01 /* Status Register */ #define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ #define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ #define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ #define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ #define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */ #define PHY_NEXT_PAGE_TX 0x07 /* Next Page Tx */ #define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */ #define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ #define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ #define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ /* PHY GPY 211 registers */ #define STANDARD_AN_REG_MASK 0x0007 /* MMD */ #define ANEG_MULTIGBT_AN_CTRL 0x0020 /* MULTI GBT AN Control Register */ #define MMD_DEVADDR_SHIFT 16 /* Shift MMD to higher bits */ #define CR_2500T_FD_CAPS 0x0080 /* Advertise 2500T FD capability */ #define PHY_CONTROL_LB 0x4000 /* PHY Loopback bit */ /* NVM Control */ #define IGC_EECD_SK 0x00000001 /* NVM Clock */ #define IGC_EECD_CS 0x00000002 /* NVM Chip Select */ #define IGC_EECD_DI 0x00000004 /* NVM Data In */ #define IGC_EECD_DO 0x00000008 /* NVM Data Out */ #define IGC_EECD_REQ 0x00000040 /* NVM Access Request */ #define IGC_EECD_GNT 0x00000080 /* NVM Access Grant */ #define IGC_EECD_PRES 0x00000100 /* NVM Present */ #define IGC_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */ /* NVM Addressing bits based on type 0=small, 1=large */ #define IGC_EECD_ADDR_BITS 0x00000400 #define IGC_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ #define IGC_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ #define IGC_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ #define IGC_EECD_SIZE_EX_SHIFT 11 #define IGC_EECD_FLUPD 0x00080000 /* Update FLASH */ #define IGC_EECD_AUPDEN 0x00100000 /* Ena Auto FLASH update */ #define IGC_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ #define IGC_EECD_SEC1VAL_VALID_MASK (IGC_EECD_AUTO_RD | IGC_EECD_PRES) #define IGC_EECD_FLUPD_I225 0x00800000 /* Update FLASH */ #define IGC_EECD_FLUDONE_I225 0x04000000 /* Update FLASH done */ #define IGC_EECD_FLASH_DETECTED_I225 0x00080000 /* FLASH detected */ #define IGC_FLUDONE_ATTEMPTS 20000 #define IGC_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ #define IGC_EECD_SEC1VAL_I225 0x02000000 /* Sector One Valid */ #define IGC_FLSECU_BLK_SW_ACCESS_I225 0x00000004 /* Block SW access */ #define IGC_FWSM_FW_VALID_I225 0x8000 /* FW valid bit */ #define IGC_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write regs */ #define IGC_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ #define IGC_NVM_RW_REG_START 1 /* Start operation */ #define IGC_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ #define IGC_NVM_POLL_WRITE 1 /* Flag for polling for write complete */ #define IGC_NVM_POLL_READ 0 /* Flag for polling for read complete */ #define IGC_FLASH_UPDATES 2000 /* NVM Word Offsets */ -#define NVM_COMPAT 0x0003 -#define NVM_ID_LED_SETTINGS 0x0004 -#define NVM_FUTURE_INIT_WORD1 0x0019 -#define NVM_COMPAT_VALID_CSUM 0x0001 +#define NVM_COMPAT 0x0003 +#define NVM_ID_LED_SETTINGS 0x0004 +#define NVM_VERSION 0x0005 +#define NVM_FUTURE_INIT_WORD1 0x0019 +#define NVM_COMPAT_VALID_CSUM 0x0001 #define NVM_FUTURE_INIT_WORD1_VALID_CSUM 0x0040 +#define NVM_ETRACK_WORD 0x0042 +#define NVM_ETRACK_HIWORD 0x0043 +#define NVM_COMB_VER_OFF 0x0083 +#define NVM_COMB_VER_PTR 0x003d + +/* NVM version defines */ +#define NVM_MAJOR_MASK 0xF000 +#define NVM_MINOR_MASK 0x0FF0 +#define NVM_IMAGE_ID_MASK 0x000F +#define NVM_COMB_VER_MASK 0x00FF +#define NVM_MAJOR_SHIFT 12 +#define NVM_MINOR_SHIFT 4 +#define NVM_COMB_VER_SHFT 8 +#define NVM_VER_INVALID 0xFFFF +#define NVM_ETRACK_SHIFT 16 +#define NVM_ETRACK_VALID 0x8000 +#define NVM_NEW_DEC_MASK 0x0F00 +#define NVM_HEX_CONV 16 +#define NVM_HEX_TENS 10 #define NVM_INIT_CONTROL2_REG 0x000F #define NVM_INIT_CONTROL3_PORT_B 0x0014 #define NVM_INIT_3GIO_3 0x001A #define NVM_SWDEF_PINS_CTRL_PORT_0 0x0020 #define NVM_INIT_CONTROL3_PORT_A 0x0024 #define NVM_CFG 0x0012 #define NVM_ALT_MAC_ADDR_PTR 0x0037 #define NVM_CHECKSUM_REG 0x003F #define IGC_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */ #define IGC_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */ /* Mask bits for fields in Word 0x0f of the NVM */ #define NVM_WORD0F_PAUSE_MASK 0x3000 #define NVM_WORD0F_PAUSE 0x1000 #define NVM_WORD0F_ASM_DIR 0x2000 /* Mask bits for fields in Word 0x1a of the NVM */ #define NVM_WORD1A_ASPM_MASK 0x000C /* Mask bits for fields in Word 0x03 of the EEPROM */ #define NVM_COMPAT_LOM 0x0800 /* length of string needed to store PBA number */ #define IGC_PBANUM_LENGTH 11 /* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ #define NVM_SUM 0xBABA /* PBA (printed board assembly) number words */ #define NVM_PBA_OFFSET_0 8 #define NVM_PBA_OFFSET_1 9 #define NVM_PBA_PTR_GUARD 0xFAFA #define NVM_WORD_SIZE_BASE_SHIFT 6 /* NVM Commands - Microwire */ #define NVM_READ_OPCODE_MICROWIRE 0x6 /* NVM read opcode */ #define NVM_WRITE_OPCODE_MICROWIRE 0x5 /* NVM write opcode */ #define NVM_ERASE_OPCODE_MICROWIRE 0x7 /* NVM erase opcode */ #define NVM_EWEN_OPCODE_MICROWIRE 0x13 /* NVM erase/write enable */ #define NVM_EWDS_OPCODE_MICROWIRE 0x10 /* NVM erase/write disable */ /* NVM Commands - SPI */ #define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ #define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */ #define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ #define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ #define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ #define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ /* SPI NVM Status Register */ #define NVM_STATUS_RDY_SPI 0x01 /* Word definitions for ID LED Settings */ #define ID_LED_RESERVED_0000 0x0000 #define ID_LED_RESERVED_FFFF 0xFFFF #define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ (ID_LED_OFF1_OFF2 << 8) | \ (ID_LED_DEF1_DEF2 << 4) | \ (ID_LED_DEF1_DEF2)) #define ID_LED_DEF1_DEF2 0x1 #define ID_LED_DEF1_ON2 0x2 #define ID_LED_DEF1_OFF2 0x3 #define ID_LED_ON1_DEF2 0x4 #define ID_LED_ON1_ON2 0x5 #define ID_LED_ON1_OFF2 0x6 #define ID_LED_OFF1_DEF2 0x7 #define ID_LED_OFF1_ON2 0x8 #define ID_LED_OFF1_OFF2 0x9 #define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF #define IGP_ACTIVITY_LED_ENABLE 0x0300 #define IGP_LED3_MODE 0x07000000 /* PCI/PCI-X/PCI-EX Config space */ #define PCIX_COMMAND_REGISTER 0xE6 #define PCIX_STATUS_REGISTER_LO 0xE8 #define PCIX_STATUS_REGISTER_HI 0xEA #define PCI_HEADER_TYPE_REGISTER 0x0E #define PCIE_LINK_STATUS 0x12 #define PCIX_COMMAND_MMRBC_MASK 0x000C #define PCIX_COMMAND_MMRBC_SHIFT 0x2 #define PCIX_STATUS_HI_MMRBC_MASK 0x0060 #define PCIX_STATUS_HI_MMRBC_SHIFT 0x5 #define PCIX_STATUS_HI_MMRBC_4K 0x3 #define PCIX_STATUS_HI_MMRBC_2K 0x2 #define PCIX_STATUS_LO_FUNC_MASK 0x7 #define PCI_HEADER_TYPE_MULTIFUNC 0x80 #define PCIE_LINK_WIDTH_MASK 0x3F0 #define PCIE_LINK_WIDTH_SHIFT 4 #define PCIE_LINK_SPEED_MASK 0x0F #define PCIE_LINK_SPEED_2500 0x01 #define PCIE_LINK_SPEED_5000 0x02 #ifndef ETH_ADDR_LEN #define ETH_ADDR_LEN 6 #endif #define PHY_REVISION_MASK 0xFFFFFFF0 #define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ #define MAX_PHY_MULTI_PAGE_REG 0xF /* Bit definitions for valid PHY IDs. * I = Integrated * E = External */ #define M88IGC_E_PHY_ID 0x01410C50 #define M88IGC_I_PHY_ID 0x01410C30 #define M88E1011_I_PHY_ID 0x01410C20 #define IGP01IGC_I_PHY_ID 0x02A80380 #define M88E1111_I_PHY_ID 0x01410CC0 #define GG82563_E_PHY_ID 0x01410CA0 #define IGP03IGC_E_PHY_ID 0x02A80390 #define IFE_E_PHY_ID 0x02A80330 #define IFE_PLUS_E_PHY_ID 0x02A80320 #define IFE_C_E_PHY_ID 0x02A80310 #define I225_I_PHY_ID 0x67C9DC00 /* M88IGC Specific Registers */ #define M88IGC_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Reg */ #define M88IGC_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Reg */ #define M88IGC_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Cntrl */ #define M88IGC_RX_ERR_CNTR 0x15 /* Receive Error Counter */ #define M88IGC_PHY_PAGE_SELECT 0x1D /* Reg 29 for pg number setting */ #define M88IGC_PHY_GEN_CONTROL 0x1E /* meaning depends on reg 29 */ /* M88IGC PHY Specific Control Register */ #define M88IGC_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reverse enabled */ /* MDI Crossover Mode bits 6:5 Manual MDI configuration */ #define M88IGC_PSCR_MDI_MANUAL_MODE 0x0000 #define M88IGC_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ /* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */ #define M88IGC_PSCR_AUTO_X_1000T 0x0040 /* Auto crossover enabled all speeds */ #define M88IGC_PSCR_AUTO_X_MODE 0x0060 #define M88IGC_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Tx */ /* M88IGC PHY Specific Status Register */ #define M88IGC_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ #define M88IGC_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ #define M88IGC_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ /* 0 = <50M * 1 = 50-80M * 2 = 80-110M * 3 = 110-140M * 4 = >140M */ #define M88IGC_PSSR_CABLE_LENGTH 0x0380 #define M88IGC_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */ #define M88IGC_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ #define M88IGC_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ #define M88IGC_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ #define M88IGC_PSSR_CABLE_LENGTH_SHIFT 7 /* Number of times we will attempt to autonegotiate before downshifting if we * are the master */ #define M88IGC_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 #define M88IGC_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 /* Number of times we will attempt to autonegotiate before downshifting if we * are the slave */ #define M88IGC_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 #define M88IGC_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 #define M88IGC_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ /* M88EC018 Rev 2 specific DownShift settings */ #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 /* Bits... * 15-5: page * 4-0: register offset */ #define GG82563_PAGE_SHIFT 5 #define GG82563_REG(page, reg) \ (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) #define GG82563_MIN_ALT_REG 30 /* GG82563 Specific Registers */ #define GG82563_PHY_SPEC_CTRL GG82563_REG(0, 16) /* PHY Spec Cntrl */ #define GG82563_PHY_PAGE_SELECT GG82563_REG(0, 22) /* Page Select */ #define GG82563_PHY_SPEC_CTRL_2 GG82563_REG(0, 26) /* PHY Spec Cntrl2 */ #define GG82563_PHY_PAGE_SELECT_ALT GG82563_REG(0, 29) /* Alt Page Select */ /* MAC Specific Control Register */ #define GG82563_PHY_MAC_SPEC_CTRL GG82563_REG(2, 21) #define GG82563_PHY_DSP_DISTANCE GG82563_REG(5, 26) /* DSP Distance */ /* Page 193 - Port Control Registers */ /* Kumeran Mode Control */ #define GG82563_PHY_KMRN_MODE_CTRL GG82563_REG(193, 16) #define GG82563_PHY_PWR_MGMT_CTRL GG82563_REG(193, 20) /* Pwr Mgt Ctrl */ /* Page 194 - KMRN Registers */ #define GG82563_PHY_INBAND_CTRL GG82563_REG(194, 18) /* Inband Ctrl */ /* MDI Control */ #define IGC_MDIC_DATA_MASK 0x0000FFFF #define IGC_MDIC_INT_EN 0x20000000 #define IGC_MDIC_REG_MASK 0x001F0000 #define IGC_MDIC_REG_SHIFT 16 #define IGC_MDIC_PHY_SHIFT 21 #define IGC_MDIC_OP_WRITE 0x04000000 #define IGC_MDIC_OP_READ 0x08000000 #define IGC_MDIC_READY 0x10000000 #define IGC_MDIC_ERROR 0x40000000 #define IGC_N0_QUEUE -1 #define IGC_MAX_MAC_HDR_LEN 127 #define IGC_MAX_NETWORK_HDR_LEN 511 #define IGC_VLANPQF_QUEUE_SEL(_n, q_idx) ((q_idx) << ((_n) * 4)) #define IGC_VLANPQF_P_VALID(_n) (0x1 << (3 + (_n) * 4)) #define IGC_VLANPQF_QUEUE_MASK 0x03 #define IGC_VFTA_BLOCK_SIZE 8 /* SerDes Control */ #define IGC_GEN_POLL_TIMEOUT 640 /* DMA Coalescing register fields */ /* DMA Coalescing Watchdog Timer */ #define IGC_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coalescing Rx Threshold */ #define IGC_DMACR_DMACTHR_MASK 0x00FF0000 #define IGC_DMACR_DMACTHR_SHIFT 16 /* Lx when no PCIe transactions */ #define IGC_DMACR_DMAC_LX_MASK 0x30000000 #define IGC_DMACR_DMAC_LX_SHIFT 28 #define IGC_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */ /* DMA Coalescing BMC-to-OS Watchdog Enable */ #define IGC_DMACR_DC_BMC2OSW_EN 0x00008000 /* DMA Coalescing Transmit Threshold */ #define IGC_DMCTXTH_DMCTTHR_MASK 0x00000FFF #define IGC_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */ /* Rx Traffic Rate Threshold */ #define IGC_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Rx packet rate in current window */ #define IGC_DMCRTRH_LRPRCW 0x80000000 /* DMA Coal Rx Traffic Current Count */ #define IGC_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* Flow ctrl Rx Threshold High val */ #define IGC_FCRTC_RTH_COAL_MASK 0x0003FFF0 #define IGC_FCRTC_RTH_COAL_SHIFT 4 /* Lx power decision based on DMA coal */ #define IGC_PCIEMISC_LX_DECISION 0x00000080 #define IGC_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */ #define IGC_RXPBS_SIZE_I210_MASK 0x0000003F /* Rx packet buffer size */ #define IGC_TXPB0S_SIZE_I210_MASK 0x0000003F /* Tx packet buffer 0 size */ #define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */ #define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ #define IGC_LTRC_EEEMS_EN 0x00000020 /* Enable EEE LTR max send */ /* Minimum time for 1000BASE-T where no data will be transmit following move out * of EEE LPI Tx state */ #define IGC_TW_SYSTEM_1000_MASK 0x000000FF /* Minimum time for 100BASE-T where no data will be transmit following move out * of EEE LPI Tx state */ #define IGC_TW_SYSTEM_100_MASK 0x0000FF00 #define IGC_TW_SYSTEM_100_SHIFT 8 #define IGC_LTRMINV_LTRV_MASK 0x000003FF /* LTR minimum value */ #define IGC_LTRMAXV_LTRV_MASK 0x000003FF /* LTR maximum value */ #define IGC_LTRMINV_SCALE_MASK 0x00001C00 /* LTR minimum scale */ #define IGC_LTRMINV_SCALE_SHIFT 10 /* Reg val to set scale to 1024 nsec */ #define IGC_LTRMINV_SCALE_1024 2 /* Reg val to set scale to 32768 nsec */ #define IGC_LTRMINV_SCALE_32768 3 #define IGC_LTRMINV_LSNP_REQ 0x00008000 /* LTR Snoop Requirement */ #define IGC_LTRMAXV_SCALE_MASK 0x00001C00 /* LTR maximum scale */ #define IGC_LTRMAXV_SCALE_SHIFT 10 /* Reg val to set scale to 1024 nsec */ #define IGC_LTRMAXV_SCALE_1024 2 /* Reg val to set scale to 32768 nsec */ #define IGC_LTRMAXV_SCALE_32768 3 #define IGC_LTRMAXV_LSNP_REQ 0x00008000 /* LTR Snoop Requirement */ #define I225_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */ #define I225_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ #define IGC_RXPBS_SIZE_I225_MASK 0x0000003F /* Rx packet buffer size */ #define IGC_TXPB0S_SIZE_I225_MASK 0x0000003F /* Tx packet buffer 0 size */ #define IGC_STM_OPCODE 0xDB00 #define IGC_EEPROM_FLASH_SIZE_WORD 0x11 #define INVM_DWORD_TO_RECORD_TYPE(invm_dword) \ (u8)((invm_dword) & 0x7) #define INVM_DWORD_TO_WORD_ADDRESS(invm_dword) \ (u8)(((invm_dword) & 0x0000FE00) >> 9) #define INVM_DWORD_TO_WORD_DATA(invm_dword) \ (u16)(((invm_dword) & 0xFFFF0000) >> 16) #define IGC_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS 8 #define IGC_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS 1 #define IGC_INVM_ULT_BYTES_SIZE 8 #define IGC_INVM_RECORD_SIZE_IN_BYTES 4 #define IGC_INVM_VER_FIELD_ONE 0x1FF8 #define IGC_INVM_VER_FIELD_TWO 0x7FE000 #define IGC_INVM_IMGTYPE_FIELD 0x1F800000 #define IGC_INVM_MAJOR_MASK 0x3F0 #define IGC_INVM_MINOR_MASK 0xF #define IGC_INVM_MAJOR_SHIFT 4 /* PLL Defines */ #define IGC_PCI_PMCSR 0x44 #define IGC_PCI_PMCSR_D3 0x03 #define IGC_MAX_PLL_TRIES 5 #define IGC_PHY_PLL_UNCONF 0xFF #define IGC_PHY_PLL_FREQ_PAGE 0xFC0000 #define IGC_PHY_PLL_FREQ_REG 0x000E #define IGC_INVM_DEFAULT_AL 0x202F #define IGC_INVM_AUTOLOAD 0x0A #define IGC_INVM_PLL_WO_VAL 0x0010 /* Proxy Filter Control Extended */ #define IGC_PROXYFCEX_MDNS 0x00000001 /* mDNS */ #define IGC_PROXYFCEX_MDNS_M 0x00000002 /* mDNS Multicast */ #define IGC_PROXYFCEX_MDNS_U 0x00000004 /* mDNS Unicast */ #define IGC_PROXYFCEX_IPV4_M 0x00000008 /* IPv4 Multicast */ #define IGC_PROXYFCEX_IPV6_M 0x00000010 /* IPv6 Multicast */ #define IGC_PROXYFCEX_IGMP 0x00000020 /* IGMP */ #define IGC_PROXYFCEX_IGMP_M 0x00000040 /* IGMP Multicast */ #define IGC_PROXYFCEX_ARPRES 0x00000080 /* ARP Response */ #define IGC_PROXYFCEX_ARPRES_D 0x00000100 /* ARP Response Directed */ #define IGC_PROXYFCEX_ICMPV4 0x00000200 /* ICMPv4 */ #define IGC_PROXYFCEX_ICMPV4_D 0x00000400 /* ICMPv4 Directed */ #define IGC_PROXYFCEX_ICMPV6 0x00000800 /* ICMPv6 */ #define IGC_PROXYFCEX_ICMPV6_D 0x00001000 /* ICMPv6 Directed */ #define IGC_PROXYFCEX_DNS 0x00002000 /* DNS */ /* Proxy Filter Control */ #define IGC_PROXYFC_D0 0x00000001 /* Enable offload in D0 */ #define IGC_PROXYFC_EX 0x00000004 /* Directed exact proxy */ #define IGC_PROXYFC_MC 0x00000008 /* Directed MC Proxy */ #define IGC_PROXYFC_BC 0x00000010 /* Broadcast Proxy Enable */ #define IGC_PROXYFC_ARP_DIRECTED 0x00000020 /* Directed ARP Proxy Ena */ #define IGC_PROXYFC_IPV4 0x00000040 /* Directed IPv4 Enable */ #define IGC_PROXYFC_IPV6 0x00000080 /* Directed IPv6 Enable */ #define IGC_PROXYFC_NS 0x00000200 /* IPv6 Neighbor Solicitation */ #define IGC_PROXYFC_NS_DIRECTED 0x00000400 /* Directed NS Proxy Ena */ #define IGC_PROXYFC_ARP 0x00000800 /* ARP Request Proxy Ena */ /* Proxy Status */ #define IGC_PROXYS_CLEAR 0xFFFFFFFF /* Clear */ /* Firmware Status */ #define IGC_FWSTS_FWRI 0x80000000 /* FW Reset Indication */ /* VF Control */ #define IGC_VTCTRL_RST 0x04000000 /* Reset VF */ #define IGC_STATUS_LAN_ID_MASK 0x00000000C /* Mask for Lan ID field */ /* Lan ID bit field offset in status register */ #define IGC_STATUS_LAN_ID_OFFSET 2 #define IGC_VFTA_ENTRIES 128 #define IGC_UNUSEDARG #ifndef ERROR_REPORT #define ERROR_REPORT(fmt) do { } while (0) #endif /* ERROR_REPORT */ #endif /* _IGC_DEFINES_H_ */ diff --git a/sys/dev/igc/igc_nvm.c b/sys/dev/igc/igc_nvm.c index d86e04ffa0dc..b476a5fdbeac 100644 --- a/sys/dev/igc/igc_nvm.c +++ b/sys/dev/igc/igc_nvm.c @@ -1,719 +1,800 @@ /*- * Copyright 2021 Intel Corp * Copyright 2021 Rubicon Communications, LLC (Netgate) * SPDX-License-Identifier: BSD-3-Clause */ #include #include "igc_api.h" static void igc_reload_nvm_generic(struct igc_hw *hw); /** * igc_init_nvm_ops_generic - Initialize NVM function pointers * @hw: pointer to the HW structure * * Setups up the function pointers to no-op functions **/ void igc_init_nvm_ops_generic(struct igc_hw *hw) { struct igc_nvm_info *nvm = &hw->nvm; DEBUGFUNC("igc_init_nvm_ops_generic"); /* Initialize function pointers */ nvm->ops.init_params = igc_null_ops_generic; nvm->ops.acquire = igc_null_ops_generic; nvm->ops.read = igc_null_read_nvm; nvm->ops.release = igc_null_nvm_generic; nvm->ops.reload = igc_reload_nvm_generic; nvm->ops.update = igc_null_ops_generic; nvm->ops.validate = igc_null_ops_generic; nvm->ops.write = igc_null_write_nvm; } /** * igc_null_nvm_read - No-op function, return 0 * @hw: pointer to the HW structure * @a: dummy variable * @b: dummy variable * @c: dummy variable **/ s32 igc_null_read_nvm(struct igc_hw IGC_UNUSEDARG *hw, u16 IGC_UNUSEDARG a, u16 IGC_UNUSEDARG b, u16 IGC_UNUSEDARG *c) { DEBUGFUNC("igc_null_read_nvm"); return IGC_SUCCESS; } /** * igc_null_nvm_generic - No-op function, return void * @hw: pointer to the HW structure **/ void igc_null_nvm_generic(struct igc_hw IGC_UNUSEDARG *hw) { DEBUGFUNC("igc_null_nvm_generic"); return; } /** * igc_null_write_nvm - No-op function, return 0 * @hw: pointer to the HW structure * @a: dummy variable * @b: dummy variable * @c: dummy variable **/ s32 igc_null_write_nvm(struct igc_hw IGC_UNUSEDARG *hw, u16 IGC_UNUSEDARG a, u16 IGC_UNUSEDARG b, u16 IGC_UNUSEDARG *c) { DEBUGFUNC("igc_null_write_nvm"); return IGC_SUCCESS; } /** * igc_raise_eec_clk - Raise EEPROM clock * @hw: pointer to the HW structure * @eecd: pointer to the EEPROM * * Enable/Raise the EEPROM clock bit. **/ static void igc_raise_eec_clk(struct igc_hw *hw, u32 *eecd) { *eecd = *eecd | IGC_EECD_SK; IGC_WRITE_REG(hw, IGC_EECD, *eecd); IGC_WRITE_FLUSH(hw); usec_delay(hw->nvm.delay_usec); } /** * igc_lower_eec_clk - Lower EEPROM clock * @hw: pointer to the HW structure * @eecd: pointer to the EEPROM * * Clear/Lower the EEPROM clock bit. **/ static void igc_lower_eec_clk(struct igc_hw *hw, u32 *eecd) { *eecd = *eecd & ~IGC_EECD_SK; IGC_WRITE_REG(hw, IGC_EECD, *eecd); IGC_WRITE_FLUSH(hw); usec_delay(hw->nvm.delay_usec); } /** * igc_shift_out_eec_bits - Shift data bits our to the EEPROM * @hw: pointer to the HW structure * @data: data to send to the EEPROM * @count: number of bits to shift out * * We need to shift 'count' bits out to the EEPROM. So, the value in the * "data" parameter will be shifted out to the EEPROM one bit at a time. * In order to do this, "data" must be broken down into bits. **/ static void igc_shift_out_eec_bits(struct igc_hw *hw, u16 data, u16 count) { struct igc_nvm_info *nvm = &hw->nvm; u32 eecd = IGC_READ_REG(hw, IGC_EECD); u32 mask; DEBUGFUNC("igc_shift_out_eec_bits"); mask = 0x01 << (count - 1); if (nvm->type == igc_nvm_eeprom_spi) eecd |= IGC_EECD_DO; do { eecd &= ~IGC_EECD_DI; if (data & mask) eecd |= IGC_EECD_DI; IGC_WRITE_REG(hw, IGC_EECD, eecd); IGC_WRITE_FLUSH(hw); usec_delay(nvm->delay_usec); igc_raise_eec_clk(hw, &eecd); igc_lower_eec_clk(hw, &eecd); mask >>= 1; } while (mask); eecd &= ~IGC_EECD_DI; IGC_WRITE_REG(hw, IGC_EECD, eecd); } /** * igc_shift_in_eec_bits - Shift data bits in from the EEPROM * @hw: pointer to the HW structure * @count: number of bits to shift in * * In order to read a register from the EEPROM, we need to shift 'count' bits * in from the EEPROM. Bits are "shifted in" by raising the clock input to * the EEPROM (setting the SK bit), and then reading the value of the data out * "DO" bit. During this "shifting in" process the data in "DI" bit should * always be clear. **/ static u16 igc_shift_in_eec_bits(struct igc_hw *hw, u16 count) { u32 eecd; u32 i; u16 data; DEBUGFUNC("igc_shift_in_eec_bits"); eecd = IGC_READ_REG(hw, IGC_EECD); eecd &= ~(IGC_EECD_DO | IGC_EECD_DI); data = 0; for (i = 0; i < count; i++) { data <<= 1; igc_raise_eec_clk(hw, &eecd); eecd = IGC_READ_REG(hw, IGC_EECD); eecd &= ~IGC_EECD_DI; if (eecd & IGC_EECD_DO) data |= 1; igc_lower_eec_clk(hw, &eecd); } return data; } /** * igc_poll_eerd_eewr_done - Poll for EEPROM read/write completion * @hw: pointer to the HW structure * @ee_reg: EEPROM flag for polling * * Polls the EEPROM status bit for either read or write completion based * upon the value of 'ee_reg'. **/ s32 igc_poll_eerd_eewr_done(struct igc_hw *hw, int ee_reg) { u32 attempts = 100000; u32 i, reg = 0; DEBUGFUNC("igc_poll_eerd_eewr_done"); for (i = 0; i < attempts; i++) { if (ee_reg == IGC_NVM_POLL_READ) reg = IGC_READ_REG(hw, IGC_EERD); else reg = IGC_READ_REG(hw, IGC_EEWR); if (reg & IGC_NVM_RW_REG_DONE) return IGC_SUCCESS; usec_delay(5); } return -IGC_ERR_NVM; } /** * igc_acquire_nvm_generic - Generic request for access to EEPROM * @hw: pointer to the HW structure * * Set the EEPROM access request bit and wait for EEPROM access grant bit. * Return successful if access grant bit set, else clear the request for * EEPROM access and return -IGC_ERR_NVM (-1). **/ s32 igc_acquire_nvm_generic(struct igc_hw *hw) { u32 eecd = IGC_READ_REG(hw, IGC_EECD); s32 timeout = IGC_NVM_GRANT_ATTEMPTS; DEBUGFUNC("igc_acquire_nvm_generic"); IGC_WRITE_REG(hw, IGC_EECD, eecd | IGC_EECD_REQ); eecd = IGC_READ_REG(hw, IGC_EECD); while (timeout) { if (eecd & IGC_EECD_GNT) break; usec_delay(5); eecd = IGC_READ_REG(hw, IGC_EECD); timeout--; } if (!timeout) { eecd &= ~IGC_EECD_REQ; IGC_WRITE_REG(hw, IGC_EECD, eecd); DEBUGOUT("Could not acquire NVM grant\n"); return -IGC_ERR_NVM; } return IGC_SUCCESS; } /** * igc_standby_nvm - Return EEPROM to standby state * @hw: pointer to the HW structure * * Return the EEPROM to a standby state. **/ static void igc_standby_nvm(struct igc_hw *hw) { struct igc_nvm_info *nvm = &hw->nvm; u32 eecd = IGC_READ_REG(hw, IGC_EECD); DEBUGFUNC("igc_standby_nvm"); if (nvm->type == igc_nvm_eeprom_spi) { /* Toggle CS to flush commands */ eecd |= IGC_EECD_CS; IGC_WRITE_REG(hw, IGC_EECD, eecd); IGC_WRITE_FLUSH(hw); usec_delay(nvm->delay_usec); eecd &= ~IGC_EECD_CS; IGC_WRITE_REG(hw, IGC_EECD, eecd); IGC_WRITE_FLUSH(hw); usec_delay(nvm->delay_usec); } } /** * igc_stop_nvm - Terminate EEPROM command * @hw: pointer to the HW structure * * Terminates the current command by inverting the EEPROM's chip select pin. **/ static void igc_stop_nvm(struct igc_hw *hw) { u32 eecd; DEBUGFUNC("igc_stop_nvm"); eecd = IGC_READ_REG(hw, IGC_EECD); if (hw->nvm.type == igc_nvm_eeprom_spi) { /* Pull CS high */ eecd |= IGC_EECD_CS; igc_lower_eec_clk(hw, &eecd); } } /** * igc_release_nvm_generic - Release exclusive access to EEPROM * @hw: pointer to the HW structure * * Stop any current commands to the EEPROM and clear the EEPROM request bit. **/ void igc_release_nvm_generic(struct igc_hw *hw) { u32 eecd; DEBUGFUNC("igc_release_nvm_generic"); igc_stop_nvm(hw); eecd = IGC_READ_REG(hw, IGC_EECD); eecd &= ~IGC_EECD_REQ; IGC_WRITE_REG(hw, IGC_EECD, eecd); } /** * igc_ready_nvm_eeprom - Prepares EEPROM for read/write * @hw: pointer to the HW structure * * Setups the EEPROM for reading and writing. **/ static s32 igc_ready_nvm_eeprom(struct igc_hw *hw) { struct igc_nvm_info *nvm = &hw->nvm; u32 eecd = IGC_READ_REG(hw, IGC_EECD); u8 spi_stat_reg; DEBUGFUNC("igc_ready_nvm_eeprom"); if (nvm->type == igc_nvm_eeprom_spi) { u16 timeout = NVM_MAX_RETRY_SPI; /* Clear SK and CS */ eecd &= ~(IGC_EECD_CS | IGC_EECD_SK); IGC_WRITE_REG(hw, IGC_EECD, eecd); IGC_WRITE_FLUSH(hw); usec_delay(1); /* Read "Status Register" repeatedly until the LSB is cleared. * The EEPROM will signal that the command has been completed * by clearing bit 0 of the internal status register. If it's * not cleared within 'timeout', then error out. */ while (timeout) { igc_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, hw->nvm.opcode_bits); spi_stat_reg = (u8)igc_shift_in_eec_bits(hw, 8); if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) break; usec_delay(5); igc_standby_nvm(hw); timeout--; } if (!timeout) { DEBUGOUT("SPI NVM Status error\n"); return -IGC_ERR_NVM; } } return IGC_SUCCESS; } /** * igc_read_nvm_eerd - Reads EEPROM using EERD register * @hw: pointer to the HW structure * @offset: offset of word in the EEPROM to read * @words: number of words to read * @data: word read from the EEPROM * * Reads a 16 bit word from the EEPROM using the EERD register. **/ s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words, u16 *data) { struct igc_nvm_info *nvm = &hw->nvm; u32 i, eerd = 0; s32 ret_val = IGC_SUCCESS; DEBUGFUNC("igc_read_nvm_eerd"); /* A check for invalid values: offset too large, too many words, * too many words for the offset, and not enough words. */ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || (words == 0)) { DEBUGOUT("nvm parameter(s) out of bounds\n"); return -IGC_ERR_NVM; } for (i = 0; i < words; i++) { eerd = ((offset + i) << IGC_NVM_RW_ADDR_SHIFT) + IGC_NVM_RW_REG_START; IGC_WRITE_REG(hw, IGC_EERD, eerd); ret_val = igc_poll_eerd_eewr_done(hw, IGC_NVM_POLL_READ); if (ret_val) break; data[i] = (IGC_READ_REG(hw, IGC_EERD) >> IGC_NVM_RW_REG_DATA); } if (ret_val) DEBUGOUT1("NVM read error: %d\n", ret_val); return ret_val; } /** * igc_write_nvm_spi - Write to EEPROM using SPI * @hw: pointer to the HW structure * @offset: offset within the EEPROM to be written to * @words: number of words to write * @data: 16 bit word(s) to be written to the EEPROM * * Writes data to EEPROM at offset using SPI interface. * * If igc_update_nvm_checksum is not called after this function , the * EEPROM will most likely contain an invalid checksum. **/ s32 igc_write_nvm_spi(struct igc_hw *hw, u16 offset, u16 words, u16 *data) { struct igc_nvm_info *nvm = &hw->nvm; s32 ret_val = -IGC_ERR_NVM; u16 widx = 0; DEBUGFUNC("igc_write_nvm_spi"); /* A check for invalid values: offset too large, too many words, * and not enough words. */ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || (words == 0)) { DEBUGOUT("nvm parameter(s) out of bounds\n"); return -IGC_ERR_NVM; } while (widx < words) { u8 write_opcode = NVM_WRITE_OPCODE_SPI; ret_val = nvm->ops.acquire(hw); if (ret_val) return ret_val; ret_val = igc_ready_nvm_eeprom(hw); if (ret_val) { nvm->ops.release(hw); return ret_val; } igc_standby_nvm(hw); /* Send the WRITE ENABLE command (8 bit opcode) */ igc_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, nvm->opcode_bits); igc_standby_nvm(hw); /* Some SPI eeproms use the 8th address bit embedded in the * opcode */ if ((nvm->address_bits == 8) && (offset >= 128)) write_opcode |= NVM_A8_OPCODE_SPI; /* Send the Write command (8-bit opcode + addr) */ igc_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); igc_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), nvm->address_bits); /* Loop to allow for up to whole page write of eeprom */ while (widx < words) { u16 word_out = data[widx]; word_out = (word_out >> 8) | (word_out << 8); igc_shift_out_eec_bits(hw, word_out, 16); widx++; if ((((offset + widx) * 2) % nvm->page_size) == 0) { igc_standby_nvm(hw); break; } } msec_delay(10); nvm->ops.release(hw); } return ret_val; } /** * igc_read_pba_string_generic - Read device part number * @hw: pointer to the HW structure * @pba_num: pointer to device part number * @pba_num_size: size of part number buffer * * Reads the product board assembly (PBA) number from the EEPROM and stores * the value in pba_num. **/ s32 igc_read_pba_string_generic(struct igc_hw *hw, u8 *pba_num, u32 pba_num_size) { s32 ret_val; u16 nvm_data; u16 pba_ptr; u16 offset; u16 length; DEBUGFUNC("igc_read_pba_string_generic"); if (pba_num == NULL) { DEBUGOUT("PBA string buffer was null\n"); return -IGC_ERR_INVALID_ARGUMENT; } ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); if (ret_val) { DEBUGOUT("NVM Read Error\n"); return ret_val; } ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr); if (ret_val) { DEBUGOUT("NVM Read Error\n"); return ret_val; } /* if nvm_data is not ptr guard the PBA must be in legacy format which * means pba_ptr is actually our second data word for the PBA number * and we can decode it into an ascii string */ if (nvm_data != NVM_PBA_PTR_GUARD) { DEBUGOUT("NVM PBA number is not stored as string\n"); /* make sure callers buffer is big enough to store the PBA */ if (pba_num_size < IGC_PBANUM_LENGTH) { DEBUGOUT("PBA string buffer too small\n"); return IGC_ERR_NO_SPACE; } /* extract hex string from data and pba_ptr */ pba_num[0] = (nvm_data >> 12) & 0xF; pba_num[1] = (nvm_data >> 8) & 0xF; pba_num[2] = (nvm_data >> 4) & 0xF; pba_num[3] = nvm_data & 0xF; pba_num[4] = (pba_ptr >> 12) & 0xF; pba_num[5] = (pba_ptr >> 8) & 0xF; pba_num[6] = '-'; pba_num[7] = 0; pba_num[8] = (pba_ptr >> 4) & 0xF; pba_num[9] = pba_ptr & 0xF; /* put a null character on the end of our string */ pba_num[10] = '\0'; /* switch all the data but the '-' to hex char */ for (offset = 0; offset < 10; offset++) { if (pba_num[offset] < 0xA) pba_num[offset] += '0'; else if (pba_num[offset] < 0x10) pba_num[offset] += 'A' - 0xA; } return IGC_SUCCESS; } ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length); if (ret_val) { DEBUGOUT("NVM Read Error\n"); return ret_val; } if (length == 0xFFFF || length == 0) { DEBUGOUT("NVM PBA number section invalid length\n"); return -IGC_ERR_NVM_PBA_SECTION; } /* check if pba_num buffer is big enough */ if (pba_num_size < (((u32)length * 2) - 1)) { DEBUGOUT("PBA string buffer too small\n"); return -IGC_ERR_NO_SPACE; } /* trim pba length from start of string */ pba_ptr++; length--; for (offset = 0; offset < length; offset++) { ret_val = hw->nvm.ops.read(hw, pba_ptr + offset, 1, &nvm_data); if (ret_val) { DEBUGOUT("NVM Read Error\n"); return ret_val; } pba_num[offset * 2] = (u8)(nvm_data >> 8); pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF); } pba_num[offset * 2] = '\0'; return IGC_SUCCESS; } /** * igc_read_mac_addr_generic - Read device MAC address * @hw: pointer to the HW structure * * Reads the device MAC address from the EEPROM and stores the value. * Since devices with two ports use the same EEPROM, we increment the * last bit in the MAC address for the second port. **/ s32 igc_read_mac_addr_generic(struct igc_hw *hw) { u32 rar_high; u32 rar_low; u16 i; rar_high = IGC_READ_REG(hw, IGC_RAH(0)); rar_low = IGC_READ_REG(hw, IGC_RAL(0)); for (i = 0; i < IGC_RAL_MAC_ADDR_LEN; i++) hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8)); for (i = 0; i < IGC_RAH_MAC_ADDR_LEN; i++) hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8)); for (i = 0; i < ETH_ADDR_LEN; i++) hw->mac.addr[i] = hw->mac.perm_addr[i]; return IGC_SUCCESS; } /** * igc_validate_nvm_checksum_generic - Validate EEPROM checksum * @hw: pointer to the HW structure * * Calculates the EEPROM checksum by reading/adding each word of the EEPROM * and then verifies that the sum of the EEPROM is equal to 0xBABA. **/ s32 igc_validate_nvm_checksum_generic(struct igc_hw *hw) { s32 ret_val; u16 checksum = 0; u16 i, nvm_data; DEBUGFUNC("igc_validate_nvm_checksum_generic"); for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); if (ret_val) { DEBUGOUT("NVM Read Error\n"); return ret_val; } checksum += nvm_data; } if (checksum != (u16) NVM_SUM) { DEBUGOUT("NVM Checksum Invalid\n"); return -IGC_ERR_NVM; } return IGC_SUCCESS; } /** * igc_update_nvm_checksum_generic - Update EEPROM checksum * @hw: pointer to the HW structure * * Updates the EEPROM checksum by reading/adding each word of the EEPROM * up to the checksum. Then calculates the EEPROM checksum and writes the * value to the EEPROM. **/ s32 igc_update_nvm_checksum_generic(struct igc_hw *hw) { s32 ret_val; u16 checksum = 0; u16 i, nvm_data; DEBUGFUNC("igc_update_nvm_checksum"); for (i = 0; i < NVM_CHECKSUM_REG; i++) { ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); if (ret_val) { DEBUGOUT("NVM Read Error while updating checksum.\n"); return ret_val; } checksum += nvm_data; } checksum = (u16) NVM_SUM - checksum; ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum); if (ret_val) DEBUGOUT("NVM Write Error while updating checksum.\n"); return ret_val; } /** * igc_reload_nvm_generic - Reloads EEPROM * @hw: pointer to the HW structure * * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the * extended control register. **/ static void igc_reload_nvm_generic(struct igc_hw *hw) { u32 ctrl_ext; DEBUGFUNC("igc_reload_nvm_generic"); usec_delay(10); ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT); ctrl_ext |= IGC_CTRL_EXT_EE_RST; IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext); IGC_WRITE_FLUSH(hw); } +/** + * igc_get_fw_version - Get firmware version information + * @hw: pointer to the HW structure + * @fw_vers: pointer to output version structure + * + * unsupported/not present features return 0 in version structure + **/ +void igc_get_fw_version(struct igc_hw *hw, struct igc_fw_version *fw_vers) +{ + u16 eeprom_verh, eeprom_verl, etrack_test, fw_version; + u8 q, hval, rem, result; + u16 comb_verh, comb_verl, comb_offset; + + memset(fw_vers, 0, sizeof(struct igc_fw_version)); + /* + * basic eeprom version numbers, bits used vary by part and by tool + * used to create the nvm images. Check which data format we have. + */ + switch (hw->mac.type) { + case igc_i225: + hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test); + /* find combo image version */ + hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset); + if (comb_offset && comb_offset != NVM_VER_INVALID) { + hw->nvm.ops.read(hw, NVM_COMB_VER_OFF + comb_offset + 1, + 1, &comb_verh); + hw->nvm.ops.read(hw, NVM_COMB_VER_OFF + comb_offset, + 1, &comb_verl); + + /* get Option Rom version if it exists and is valid */ + if (comb_verh && comb_verl && + comb_verh != NVM_VER_INVALID && + comb_verl != NVM_VER_INVALID) { + fw_vers->or_valid = true; + fw_vers->or_major = comb_verl >> + NVM_COMB_VER_SHFT; + fw_vers->or_build = (comb_verl << + NVM_COMB_VER_SHFT) | + (comb_verh >> + NVM_COMB_VER_SHFT); + fw_vers->or_patch = comb_verh & + NVM_COMB_VER_MASK; + } + } + break; + default: + hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test); + return; + } + hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version); + fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) + >> NVM_MAJOR_SHIFT; + + /* check for old style version format in newer images*/ + if ((fw_version & NVM_NEW_DEC_MASK) == 0x0) { + eeprom_verl = (fw_version & NVM_COMB_VER_MASK); + } else { + eeprom_verl = (fw_version & NVM_MINOR_MASK) + >> NVM_MINOR_SHIFT; + } + /* Convert minor value to hex before assigning to output struct + * Val to be converted will not be higher than 99, per tool output + */ + q = eeprom_verl / NVM_HEX_CONV; + hval = q * NVM_HEX_TENS; + rem = eeprom_verl % NVM_HEX_CONV; + result = hval + rem; + fw_vers->eep_minor = result; + + if ((etrack_test & NVM_MAJOR_MASK) == NVM_ETRACK_VALID) { + hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl); + hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh); + fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) + | eeprom_verl; + } else if ((etrack_test & NVM_ETRACK_VALID) == 0) { + hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verh); + hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verl); + fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) | + eeprom_verl; + } +} diff --git a/sys/dev/igc/igc_nvm.h b/sys/dev/igc/igc_nvm.h index eae0db4b484b..b4b602af6595 100644 --- a/sys/dev/igc/igc_nvm.h +++ b/sys/dev/igc/igc_nvm.h @@ -1,30 +1,48 @@ /*- * Copyright 2021 Intel Corp * Copyright 2021 Rubicon Communications, LLC (Netgate) * SPDX-License-Identifier: BSD-3-Clause */ #ifndef _IGC_NVM_H_ #define _IGC_NVM_H_ +struct igc_fw_version { + u32 etrack_id; + u16 eep_major; + u16 eep_minor; + u16 eep_build; + + u8 invm_major; + u8 invm_minor; + u8 invm_img_type; + + bool or_valid; + u16 or_major; + u16 or_build; + u16 or_patch; +}; + void igc_init_nvm_ops_generic(struct igc_hw *hw); s32 igc_null_read_nvm(struct igc_hw *hw, u16 a, u16 b, u16 *c); void igc_null_nvm_generic(struct igc_hw *hw); s32 igc_null_led_default(struct igc_hw *hw, u16 *data); s32 igc_null_write_nvm(struct igc_hw *hw, u16 a, u16 b, u16 *c); s32 igc_acquire_nvm_generic(struct igc_hw *hw); s32 igc_poll_eerd_eewr_done(struct igc_hw *hw, int ee_reg); s32 igc_read_mac_addr_generic(struct igc_hw *hw); s32 igc_read_pba_string_generic(struct igc_hw *hw, u8 *pba_num, u32 pba_num_size); s32 igc_read_nvm_eerd(struct igc_hw *hw, u16 offset, u16 words, u16 *data); s32 igc_valid_led_default_generic(struct igc_hw *hw, u16 *data); s32 igc_validate_nvm_checksum_generic(struct igc_hw *hw); s32 igc_write_nvm_spi(struct igc_hw *hw, u16 offset, u16 words, u16 *data); s32 igc_update_nvm_checksum_generic(struct igc_hw *hw); void igc_release_nvm_generic(struct igc_hw *hw); +void igc_get_fw_version(struct igc_hw *hw, + struct igc_fw_version *fw_vers); #endif