diff --git a/sys/dev/ixgbe/if_ix.c b/sys/dev/ixgbe/if_ix.c index 91bf7e4dd218..42523f82cc01 100644 --- a/sys/dev/ixgbe/if_ix.c +++ b/sys/dev/ixgbe/if_ix.c @@ -1,4782 +1,4782 @@ /****************************************************************************** Copyright (c) 2001-2017, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "opt_inet.h" #include "opt_inet6.h" #include "opt_rss.h" #include "ixgbe.h" #include "ixgbe_sriov.h" #include "ifdi_if.h" #include #include /************************************************************************ * Driver version ************************************************************************/ char ixgbe_driver_version[] = "4.0.1-k"; /************************************************************************ * PCI Device ID Table * * Used by probe to select devices to load on * Last field stores an index into ixgbe_strings * Last entry must be all 0s * * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } ************************************************************************/ static pci_vendor_info_t ixgbe_vendor_info_array[] = { PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, "Intel(R) 82598EB AF (Dual Fiber)"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, "Intel(R) 82598EB AF (Fiber)"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, "Intel(R) 82598EB AT (CX4)"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, "Intel(R) 82598EB AT"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, "Intel(R) 82598EB AT2"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, "Intel(R) 82598"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, "Intel(R) 82598EB AF DA (Dual Fiber)"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, "Intel(R) 82598EB AT (Dual CX4)"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, "Intel(R) 82598EB AF (Dual Fiber LR)"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, "Intel(R) 82598EB AF (Dual Fiber SR)"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, "Intel(R) 82598EB LOM"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, "Intel(R) X520 82599 (KX4)"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, "Intel(R) X520 82599 (KX4 Mezzanine)"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, "Intel(R) X520 82599ES (SFI/SFP+)"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, "Intel(R) X520 82599 (XAUI/BX4)"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, "Intel(R) X520 82599 (Dual CX4)"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, "Intel(R) X520-T 82599 LOM"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, "Intel(R) X520 82599 (Combined Backplane)"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, "Intel(R) X520 82599 (Backplane w/FCoE)"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, "Intel(R) X520 82599 (Dual SFP+)"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, "Intel(R) X520 82599 (Dual SFP+ w/FCoE)"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, "Intel(R) X520-1 82599EN (SFP+)"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, "Intel(R) X520-4 82599 (Quad SFP+)"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, "Intel(R) X520-Q1 82599 (QSFP+)"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, "Intel(R) X540-AT2"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, "Intel(R) X540-T1"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, "Intel(R) X550-T2"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) X550-T1"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, "Intel(R) X552 (KR Backplane)"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, "Intel(R) X552 (KX4 Backplane)"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, "Intel(R) X552/X557-AT (10GBASE-T)"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, "Intel(R) X552 (1000BASE-T)"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) X552 (SFP+)"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) X553 (KR Backplane)"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) X553 L (KR Backplane)"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) X553 (SFP+)"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) X553 N (SFP+)"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) X553 (1GbE SGMII)"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) X553 L (1GbE SGMII)"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) X553/X557-AT (10GBASE-T)"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) X553 (1GbE)"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) X553 L (1GbE)"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) X540-T2 (Bypass)"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) X520 82599 (Bypass)"), /* required last entry */ PVID_END }; static void *ixgbe_register(device_t dev); static int ixgbe_if_attach_pre(if_ctx_t ctx); static int ixgbe_if_attach_post(if_ctx_t ctx); static int ixgbe_if_detach(if_ctx_t ctx); static int ixgbe_if_shutdown(if_ctx_t ctx); static int ixgbe_if_suspend(if_ctx_t ctx); static int ixgbe_if_resume(if_ctx_t ctx); static void ixgbe_if_stop(if_ctx_t ctx); void ixgbe_if_enable_intr(if_ctx_t ctx); static void ixgbe_if_disable_intr(if_ctx_t ctx); static void ixgbe_link_intr_enable(if_ctx_t ctx); static int ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid); static void ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr); static int ixgbe_if_media_change(if_ctx_t ctx); static int ixgbe_if_msix_intr_assign(if_ctx_t, int); static int ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu); static void ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int strip); static void ixgbe_if_multi_set(if_ctx_t ctx); static int ixgbe_if_promisc_set(if_ctx_t ctx, int flags); static int ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets); static int ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets); static void ixgbe_if_queues_free(if_ctx_t ctx); static void ixgbe_if_timer(if_ctx_t ctx, uint16_t); static void ixgbe_if_update_admin_status(if_ctx_t ctx); static void ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag); static void ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag); static int ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req); static bool ixgbe_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event); int ixgbe_intr(void *arg); /************************************************************************ * Function prototypes ************************************************************************/ #if __FreeBSD_version >= 1100036 static uint64_t ixgbe_if_get_counter(if_ctx_t, ift_counter); #endif static void ixgbe_enable_queue(struct adapter *adapter, u32 vector); static void ixgbe_disable_queue(struct adapter *adapter, u32 vector); static void ixgbe_add_device_sysctls(if_ctx_t ctx); static int ixgbe_allocate_pci_resources(if_ctx_t ctx); static int ixgbe_setup_low_power_mode(if_ctx_t ctx); static void ixgbe_config_dmac(struct adapter *adapter); static void ixgbe_configure_ivars(struct adapter *adapter); static void ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type); static u8 *ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); static bool ixgbe_sfp_probe(if_ctx_t ctx); static void ixgbe_free_pci_resources(if_ctx_t ctx); static int ixgbe_msix_link(void *arg); static int ixgbe_msix_que(void *arg); static void ixgbe_initialize_rss_mapping(struct adapter *adapter); static void ixgbe_initialize_receive_units(if_ctx_t ctx); static void ixgbe_initialize_transmit_units(if_ctx_t ctx); static int ixgbe_setup_interface(if_ctx_t ctx); static void ixgbe_init_device_features(struct adapter *adapter); static void ixgbe_check_fan_failure(struct adapter *, u32, bool); static void ixgbe_sbuf_fw_version(struct ixgbe_hw *, struct sbuf *); static void ixgbe_print_fw_version(if_ctx_t ctx); static void ixgbe_add_media_types(if_ctx_t ctx); static void ixgbe_update_stats_counters(struct adapter *adapter); static void ixgbe_config_link(if_ctx_t ctx); static void ixgbe_get_slot_info(struct adapter *); static void ixgbe_check_wol_support(struct adapter *adapter); static void ixgbe_enable_rx_drop(struct adapter *); static void ixgbe_disable_rx_drop(struct adapter *); static void ixgbe_add_hw_stats(struct adapter *adapter); static int ixgbe_set_flowcntl(struct adapter *, int); static int ixgbe_set_advertise(struct adapter *, int); static int ixgbe_get_advertise(struct adapter *); static void ixgbe_setup_vlan_hw_support(if_ctx_t ctx); static void ixgbe_config_gpie(struct adapter *adapter); static void ixgbe_config_delay_values(struct adapter *adapter); /* Sysctl handlers */ static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS); static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS); static int ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS); static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS); static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS); static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS); static int ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS); #ifdef IXGBE_DEBUG static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS); static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS); #endif static int ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS); static int ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS); static int ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS); static int ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS); static int ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS); static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS); static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS); /* Deferred interrupt tasklets */ static void ixgbe_handle_msf(void *); static void ixgbe_handle_mod(void *); static void ixgbe_handle_phy(void *); /************************************************************************ * FreeBSD Device Interface Entry Points ************************************************************************/ static device_method_t ix_methods[] = { /* Device interface */ DEVMETHOD(device_register, ixgbe_register), DEVMETHOD(device_probe, iflib_device_probe), DEVMETHOD(device_attach, iflib_device_attach), DEVMETHOD(device_detach, iflib_device_detach), DEVMETHOD(device_shutdown, iflib_device_shutdown), DEVMETHOD(device_suspend, iflib_device_suspend), DEVMETHOD(device_resume, iflib_device_resume), #ifdef PCI_IOV DEVMETHOD(pci_iov_init, iflib_device_iov_init), DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit), DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf), #endif /* PCI_IOV */ DEVMETHOD_END }; static driver_t ix_driver = { "ix", ix_methods, sizeof(struct adapter), }; devclass_t ix_devclass; DRIVER_MODULE(ix, pci, ix_driver, ix_devclass, 0, 0); IFLIB_PNP_INFO(pci, ix_driver, ixgbe_vendor_info_array); MODULE_DEPEND(ix, pci, 1, 1, 1); MODULE_DEPEND(ix, ether, 1, 1, 1); MODULE_DEPEND(ix, iflib, 1, 1, 1); static device_method_t ixgbe_if_methods[] = { DEVMETHOD(ifdi_attach_pre, ixgbe_if_attach_pre), DEVMETHOD(ifdi_attach_post, ixgbe_if_attach_post), DEVMETHOD(ifdi_detach, ixgbe_if_detach), DEVMETHOD(ifdi_shutdown, ixgbe_if_shutdown), DEVMETHOD(ifdi_suspend, ixgbe_if_suspend), DEVMETHOD(ifdi_resume, ixgbe_if_resume), DEVMETHOD(ifdi_init, ixgbe_if_init), DEVMETHOD(ifdi_stop, ixgbe_if_stop), DEVMETHOD(ifdi_msix_intr_assign, ixgbe_if_msix_intr_assign), DEVMETHOD(ifdi_intr_enable, ixgbe_if_enable_intr), DEVMETHOD(ifdi_intr_disable, ixgbe_if_disable_intr), DEVMETHOD(ifdi_link_intr_enable, ixgbe_link_intr_enable), DEVMETHOD(ifdi_tx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable), DEVMETHOD(ifdi_rx_queue_intr_enable, ixgbe_if_rx_queue_intr_enable), DEVMETHOD(ifdi_tx_queues_alloc, ixgbe_if_tx_queues_alloc), DEVMETHOD(ifdi_rx_queues_alloc, ixgbe_if_rx_queues_alloc), DEVMETHOD(ifdi_queues_free, ixgbe_if_queues_free), DEVMETHOD(ifdi_update_admin_status, ixgbe_if_update_admin_status), DEVMETHOD(ifdi_multi_set, ixgbe_if_multi_set), DEVMETHOD(ifdi_mtu_set, ixgbe_if_mtu_set), DEVMETHOD(ifdi_crcstrip_set, ixgbe_if_crcstrip_set), DEVMETHOD(ifdi_media_status, ixgbe_if_media_status), DEVMETHOD(ifdi_media_change, ixgbe_if_media_change), DEVMETHOD(ifdi_promisc_set, ixgbe_if_promisc_set), DEVMETHOD(ifdi_timer, ixgbe_if_timer), DEVMETHOD(ifdi_vlan_register, ixgbe_if_vlan_register), DEVMETHOD(ifdi_vlan_unregister, ixgbe_if_vlan_unregister), DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter), DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req), DEVMETHOD(ifdi_needs_restart, ixgbe_if_needs_restart), #ifdef PCI_IOV DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init), DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit), DEVMETHOD(ifdi_iov_vf_add, ixgbe_if_iov_vf_add), #endif /* PCI_IOV */ DEVMETHOD_END }; /* * TUNEABLE PARAMETERS: */ static SYSCTL_NODE(_hw, OID_AUTO, ix, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "IXGBE driver parameters"); static driver_t ixgbe_if_driver = { "ixgbe_if", ixgbe_if_methods, sizeof(struct adapter) }; static int ixgbe_max_interrupt_rate = (4000000 / IXGBE_LOW_LATENCY); SYSCTL_INT(_hw_ix, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, &ixgbe_max_interrupt_rate, 0, "Maximum interrupts per second"); /* Flow control setting, default to full */ static int ixgbe_flow_control = ixgbe_fc_full; SYSCTL_INT(_hw_ix, OID_AUTO, flow_control, CTLFLAG_RDTUN, &ixgbe_flow_control, 0, "Default flow control used for all adapters"); /* Advertise Speed, default to 0 (auto) */ static int ixgbe_advertise_speed = 0; SYSCTL_INT(_hw_ix, OID_AUTO, advertise_speed, CTLFLAG_RDTUN, &ixgbe_advertise_speed, 0, "Default advertised speed for all adapters"); /* * Smart speed setting, default to on * this only works as a compile option * right now as its during attach, set * this to 'ixgbe_smart_speed_off' to * disable. */ static int ixgbe_smart_speed = ixgbe_smart_speed_on; /* * MSI-X should be the default for best performance, * but this allows it to be forced off for testing. */ static int ixgbe_enable_msix = 1; SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0, "Enable MSI-X interrupts"); /* * Defining this on will allow the use * of unsupported SFP+ modules, note that * doing so you are on your own :) */ -static int allow_unsupported_sfp = FALSE; +static int allow_unsupported_sfp = false; SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN, &allow_unsupported_sfp, 0, "Allow unsupported SFP modules...use at your own risk"); /* * Not sure if Flow Director is fully baked, * so we'll default to turning it off. */ static int ixgbe_enable_fdir = 0; SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0, "Enable Flow Director"); /* Receive-Side Scaling */ static int ixgbe_enable_rss = 1; SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0, "Enable Receive-Side Scaling (RSS)"); /* * AIM: Adaptive Interrupt Moderation * which means that the interrupt rate * is varied over time based on the * traffic for that interrupt vector */ -static int ixgbe_enable_aim = FALSE; +static int ixgbe_enable_aim = false; SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0, "Enable adaptive interrupt moderation"); #if 0 /* Keep running tab on them for sanity check */ static int ixgbe_total_ports; #endif MALLOC_DEFINE(M_IXGBE, "ix", "ix driver allocations"); /* * For Flow Director: this is the number of TX packets we sample * for the filter pool, this means every 20th packet will be probed. * * This feature can be disabled by setting this to 0. */ static int atr_sample_rate = 20; extern struct if_txrx ixgbe_txrx; static struct if_shared_ctx ixgbe_sctx_init = { .isc_magic = IFLIB_MAGIC, .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */ .isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header), .isc_tx_maxsegsize = PAGE_SIZE, .isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header), .isc_tso_maxsegsize = PAGE_SIZE, .isc_rx_maxsize = PAGE_SIZE*4, .isc_rx_nsegments = 1, .isc_rx_maxsegsize = PAGE_SIZE*4, .isc_nfl = 1, .isc_ntxqs = 1, .isc_nrxqs = 1, .isc_admin_intrcnt = 1, .isc_vendor_info = ixgbe_vendor_info_array, .isc_driver_version = ixgbe_driver_version, .isc_driver = &ixgbe_if_driver, .isc_flags = IFLIB_TSO_INIT_IP, .isc_nrxd_min = {MIN_RXD}, .isc_ntxd_min = {MIN_TXD}, .isc_nrxd_max = {MAX_RXD}, .isc_ntxd_max = {MAX_TXD}, .isc_nrxd_default = {DEFAULT_RXD}, .isc_ntxd_default = {DEFAULT_TXD}, }; /************************************************************************ * ixgbe_if_tx_queues_alloc ************************************************************************/ static int ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets) { struct adapter *adapter = iflib_get_softc(ctx); if_softc_ctx_t scctx = adapter->shared; struct ix_tx_queue *que; int i, j, error; MPASS(adapter->num_tx_queues > 0); MPASS(adapter->num_tx_queues == ntxqsets); MPASS(ntxqs == 1); /* Allocate queue structure memory */ adapter->tx_queues = (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets, M_IXGBE, M_NOWAIT | M_ZERO); if (!adapter->tx_queues) { device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n"); return (ENOMEM); } for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) { struct tx_ring *txr = &que->txr; /* In case SR-IOV is enabled, align the index properly */ txr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, i); txr->adapter = que->adapter = adapter; /* Allocate report status array */ txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO); if (txr->tx_rsq == NULL) { error = ENOMEM; goto fail; } for (j = 0; j < scctx->isc_ntxd[0]; j++) txr->tx_rsq[j] = QIDX_INVALID; /* get the virtual and physical address of the hardware queues */ txr->tail = IXGBE_TDT(txr->me); txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i]; txr->tx_paddr = paddrs[i]; txr->bytes = 0; txr->total_packets = 0; /* Set the rate at which we sample packets */ if (adapter->feat_en & IXGBE_FEATURE_FDIR) txr->atr_sample = atr_sample_rate; } device_printf(iflib_get_dev(ctx), "allocated for %d queues\n", adapter->num_tx_queues); return (0); fail: ixgbe_if_queues_free(ctx); return (error); } /* ixgbe_if_tx_queues_alloc */ /************************************************************************ * ixgbe_if_rx_queues_alloc ************************************************************************/ static int ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets) { struct adapter *adapter = iflib_get_softc(ctx); struct ix_rx_queue *que; int i; MPASS(adapter->num_rx_queues > 0); MPASS(adapter->num_rx_queues == nrxqsets); MPASS(nrxqs == 1); /* Allocate queue structure memory */ adapter->rx_queues = (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets, M_IXGBE, M_NOWAIT | M_ZERO); if (!adapter->rx_queues) { device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n"); return (ENOMEM); } for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) { struct rx_ring *rxr = &que->rxr; /* In case SR-IOV is enabled, align the index properly */ rxr->me = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, i); rxr->adapter = que->adapter = adapter; /* get the virtual and physical address of the hw queues */ rxr->tail = IXGBE_RDT(rxr->me); rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i]; rxr->rx_paddr = paddrs[i]; rxr->bytes = 0; rxr->que = que; } device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n", adapter->num_rx_queues); return (0); } /* ixgbe_if_rx_queues_alloc */ /************************************************************************ * ixgbe_if_queues_free ************************************************************************/ static void ixgbe_if_queues_free(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); struct ix_tx_queue *tx_que = adapter->tx_queues; struct ix_rx_queue *rx_que = adapter->rx_queues; int i; if (tx_que != NULL) { for (i = 0; i < adapter->num_tx_queues; i++, tx_que++) { struct tx_ring *txr = &tx_que->txr; if (txr->tx_rsq == NULL) break; free(txr->tx_rsq, M_IXGBE); txr->tx_rsq = NULL; } free(adapter->tx_queues, M_IXGBE); adapter->tx_queues = NULL; } if (rx_que != NULL) { free(adapter->rx_queues, M_IXGBE); adapter->rx_queues = NULL; } } /* ixgbe_if_queues_free */ /************************************************************************ * ixgbe_initialize_rss_mapping ************************************************************************/ static void ixgbe_initialize_rss_mapping(struct adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 reta = 0, mrqc, rss_key[10]; int queue_id, table_size, index_mult; int i, j; u32 rss_hash_config; if (adapter->feat_en & IXGBE_FEATURE_RSS) { /* Fetch the configured RSS key */ rss_getkey((uint8_t *)&rss_key); } else { /* set up random bits */ arc4rand(&rss_key, sizeof(rss_key), 0); } /* Set multiplier for RETA setup and table size based on MAC */ index_mult = 0x1; table_size = 128; switch (adapter->hw.mac.type) { case ixgbe_mac_82598EB: index_mult = 0x11; break; case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_X550EM_a: table_size = 512; break; default: break; } /* Set up the redirection table */ for (i = 0, j = 0; i < table_size; i++, j++) { if (j == adapter->num_rx_queues) j = 0; if (adapter->feat_en & IXGBE_FEATURE_RSS) { /* * Fetch the RSS bucket id for the given indirection * entry. Cap it at the number of configured buckets * (which is num_rx_queues.) */ queue_id = rss_get_indirection_to_bucket(i); queue_id = queue_id % adapter->num_rx_queues; } else queue_id = (j * index_mult); /* * The low 8 bits are for hash value (n+0); * The next 8 bits are for hash value (n+1), etc. */ reta = reta >> 8; reta = reta | (((uint32_t)queue_id) << 24); if ((i & 3) == 3) { if (i < 128) IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); else IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), reta); reta = 0; } } /* Now fill our hash function seeds */ for (i = 0; i < 10; i++) IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]); /* Perform hash on these packet types */ if (adapter->feat_en & IXGBE_FEATURE_RSS) rss_hash_config = rss_gethashconfig(); else { /* * Disable UDP - IP fragments aren't currently being handled * and so we end up with a mix of 2-tuple and 4-tuple * traffic. */ rss_hash_config = RSS_HASHTYPE_RSS_IPV4 | RSS_HASHTYPE_RSS_TCP_IPV4 | RSS_HASHTYPE_RSS_IPV6 | RSS_HASHTYPE_RSS_TCP_IPV6 | RSS_HASHTYPE_RSS_IPV6_EX | RSS_HASHTYPE_RSS_TCP_IPV6_EX; } mrqc = IXGBE_MRQC_RSSEN; if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX; if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP; if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; mrqc |= ixgbe_get_mrqc(adapter->iov_mode); IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); } /* ixgbe_initialize_rss_mapping */ /************************************************************************ * ixgbe_initialize_receive_units - Setup receive registers and features. ************************************************************************/ #define BSIZEPKT_ROUNDUP ((1<shared; struct ixgbe_hw *hw = &adapter->hw; struct ifnet *ifp = iflib_get_ifp(ctx); struct ix_rx_queue *que; int i, j; u32 bufsz, fctrl, srrctl, rxcsum; u32 hlreg; /* * Make sure receives are disabled while * setting up the descriptor ring */ ixgbe_disable_rx(hw); /* Enable broadcasts */ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); fctrl |= IXGBE_FCTRL_BAM; if (adapter->hw.mac.type == ixgbe_mac_82598EB) { fctrl |= IXGBE_FCTRL_DPF; fctrl |= IXGBE_FCTRL_PMCF; } IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); /* Set for Jumbo Frames? */ hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0); if (ifp->if_mtu > ETHERMTU) hlreg |= IXGBE_HLREG0_JUMBOEN; else hlreg &= ~IXGBE_HLREG0_JUMBOEN; IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg); bufsz = (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; /* Setup the Base and Length of the Rx Descriptor Ring */ for (i = 0, que = adapter->rx_queues; i < adapter->num_rx_queues; i++, que++) { struct rx_ring *rxr = &que->rxr; u64 rdba = rxr->rx_paddr; j = rxr->me; /* Setup the Base and Length of the Rx Descriptor Ring */ IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & 0x00000000ffffffffULL)); IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32)); IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc)); /* Set up the SRRCTL register */ srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(j)); srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; srrctl |= bufsz; srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; /* * Set DROP_EN iff we have no flow control and >1 queue. * Note that srrctl was cleared shortly before during reset, * so we do not need to clear the bit, but do it just in case * this code is moved elsewhere. */ if (adapter->num_rx_queues > 1 && adapter->hw.fc.requested_mode == ixgbe_fc_none) { srrctl |= IXGBE_SRRCTL_DROP_EN; } else { srrctl &= ~IXGBE_SRRCTL_DROP_EN; } IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(j), srrctl); /* Setup the HW Rx Head and Tail Descriptor Pointers */ IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0); IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0); /* Set the driver rx tail address */ rxr->tail = IXGBE_RDT(rxr->me); } if (adapter->hw.mac.type != ixgbe_mac_82598EB) { u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR; IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); } rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); ixgbe_initialize_rss_mapping(adapter); if (adapter->num_rx_queues > 1) { /* RSS and RX IPP Checksum are mutually exclusive */ rxcsum |= IXGBE_RXCSUM_PCSD; } if (ifp->if_capenable & IFCAP_RXCSUM) rxcsum |= IXGBE_RXCSUM_PCSD; /* This is useful for calculating UDP/IP fragment checksums */ if (!(rxcsum & IXGBE_RXCSUM_PCSD)) rxcsum |= IXGBE_RXCSUM_IPPCSE; IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); } /* ixgbe_initialize_receive_units */ /************************************************************************ * ixgbe_initialize_transmit_units - Enable transmit units. ************************************************************************/ static void ixgbe_initialize_transmit_units(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); struct ixgbe_hw *hw = &adapter->hw; if_softc_ctx_t scctx = adapter->shared; struct ix_tx_queue *que; int i; /* Setup the Base and Length of the Tx Descriptor Ring */ for (i = 0, que = adapter->tx_queues; i < adapter->num_tx_queues; i++, que++) { struct tx_ring *txr = &que->txr; u64 tdba = txr->tx_paddr; u32 txctrl = 0; int j = txr->me; IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j), (tdba & 0x00000000ffffffffULL)); IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32)); IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc)); /* Setup the HW Tx Head and Tail descriptor pointers */ IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0); IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0); /* Cache the tail address */ txr->tail = IXGBE_TDT(txr->me); txr->tx_rs_cidx = txr->tx_rs_pidx; txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1; for (int k = 0; k < scctx->isc_ntxd[0]; k++) txr->tx_rsq[k] = QIDX_INVALID; /* Disable Head Writeback */ /* * Note: for X550 series devices, these registers are actually * prefixed with TPH_ isntead of DCA_, but the addresses and * fields remain the same. */ switch (hw->mac.type) { case ixgbe_mac_82598EB: txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j)); break; default: txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j)); break; } txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; switch (hw->mac.type) { case ixgbe_mac_82598EB: IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl); break; default: IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl); break; } } if (hw->mac.type != ixgbe_mac_82598EB) { u32 dmatxctl, rttdcs; dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); dmatxctl |= IXGBE_DMATXCTL_TE; IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); /* Disable arbiter to set MTQC */ rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); rttdcs |= IXGBE_RTTDCS_ARBDIS; IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); IXGBE_WRITE_REG(hw, IXGBE_MTQC, ixgbe_get_mtqc(adapter->iov_mode)); rttdcs &= ~IXGBE_RTTDCS_ARBDIS; IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); } } /* ixgbe_initialize_transmit_units */ /************************************************************************ * ixgbe_register ************************************************************************/ static void * ixgbe_register(device_t dev) { return (&ixgbe_sctx_init); } /* ixgbe_register */ /************************************************************************ * ixgbe_if_attach_pre - Device initialization routine, part 1 * * Called when the driver is being loaded. * Identifies the type of hardware, initializes the hardware, * and initializes iflib structures. * * return 0 on success, positive on failure ************************************************************************/ static int ixgbe_if_attach_pre(if_ctx_t ctx) { struct adapter *adapter; device_t dev; if_softc_ctx_t scctx; struct ixgbe_hw *hw; int error = 0; u32 ctrl_ext; INIT_DEBUGOUT("ixgbe_attach: begin"); /* Allocate, clear, and link in our adapter structure */ dev = iflib_get_dev(ctx); adapter = iflib_get_softc(ctx); adapter->hw.back = adapter; adapter->ctx = ctx; adapter->dev = dev; scctx = adapter->shared = iflib_get_softc_ctx(ctx); adapter->media = iflib_get_media(ctx); hw = &adapter->hw; /* Determine hardware revision */ hw->vendor_id = pci_get_vendor(dev); hw->device_id = pci_get_device(dev); hw->revision_id = pci_get_revid(dev); hw->subsystem_vendor_id = pci_get_subvendor(dev); hw->subsystem_device_id = pci_get_subdevice(dev); /* Do base PCI setup - map BAR0 */ if (ixgbe_allocate_pci_resources(ctx)) { device_printf(dev, "Allocation of PCI resources failed\n"); return (ENXIO); } /* let hardware know driver is loaded */ ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); /* * Initialize the shared code */ if (ixgbe_init_shared_code(hw) != 0) { device_printf(dev, "Unable to initialize the shared code\n"); error = ENXIO; goto err_pci; } if (hw->mbx.ops.init_params) hw->mbx.ops.init_params(hw); hw->allow_unsupported_sfp = allow_unsupported_sfp; if (hw->mac.type != ixgbe_mac_82598EB) hw->phy.smart_speed = ixgbe_smart_speed; ixgbe_init_device_features(adapter); /* Enable WoL (if supported) */ ixgbe_check_wol_support(adapter); /* Verify adapter fan is still functional (if applicable) */ if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) { u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); - ixgbe_check_fan_failure(adapter, esdp, FALSE); + ixgbe_check_fan_failure(adapter, esdp, false); } /* Ensure SW/FW semaphore is free */ ixgbe_init_swfw_semaphore(hw); /* Set an initial default flow control value */ hw->fc.requested_mode = ixgbe_flow_control; - hw->phy.reset_if_overtemp = TRUE; + hw->phy.reset_if_overtemp = true; error = ixgbe_reset_hw(hw); - hw->phy.reset_if_overtemp = FALSE; + hw->phy.reset_if_overtemp = false; if (error == IXGBE_ERR_SFP_NOT_PRESENT) { /* * No optics in this port, set up * so the timer routine will probe * for later insertion. */ - adapter->sfp_probe = TRUE; + adapter->sfp_probe = true; error = 0; } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) { device_printf(dev, "Unsupported SFP+ module detected!\n"); error = EIO; goto err_pci; } else if (error) { device_printf(dev, "Hardware initialization failed\n"); error = EIO; goto err_pci; } /* Make sure we have a good EEPROM before we read from it */ if (ixgbe_validate_eeprom_checksum(&adapter->hw, NULL) < 0) { device_printf(dev, "The EEPROM Checksum Is Not Valid\n"); error = EIO; goto err_pci; } error = ixgbe_start_hw(hw); switch (error) { case IXGBE_ERR_EEPROM_VERSION: device_printf(dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n"); break; case IXGBE_ERR_SFP_NOT_SUPPORTED: device_printf(dev, "Unsupported SFP+ Module\n"); error = EIO; goto err_pci; case IXGBE_ERR_SFP_NOT_PRESENT: device_printf(dev, "No SFP+ Module found\n"); /* falls thru */ default: break; } /* Most of the iflib initialization... */ iflib_set_mac(ctx, hw->mac.addr); switch (adapter->hw.mac.type) { case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_X550EM_a: scctx->isc_rss_table_size = 512; scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64; break; default: scctx->isc_rss_table_size = 128; scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 16; } /* Allow legacy interrupts */ ixgbe_txrx.ift_legacy_intr = ixgbe_intr; scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) + sizeof(u32), DBA_ALIGN), scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN); /* XXX */ scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO | CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO; if (adapter->hw.mac.type == ixgbe_mac_82598EB) { scctx->isc_tx_nsegments = IXGBE_82598_SCATTER; } else { scctx->isc_tx_csum_flags |= CSUM_SCTP |CSUM_IP6_SCTP; scctx->isc_tx_nsegments = IXGBE_82599_SCATTER; } scctx->isc_msix_bar = pci_msix_table_bar(dev); scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments; scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE; scctx->isc_tx_tso_segsize_max = PAGE_SIZE; scctx->isc_txrx = &ixgbe_txrx; scctx->isc_capabilities = scctx->isc_capenable = IXGBE_CAPS; return (0); err_pci: ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext); ixgbe_free_pci_resources(ctx); return (error); } /* ixgbe_if_attach_pre */ /********************************************************************* * ixgbe_if_attach_post - Device initialization routine, part 2 * * Called during driver load, but after interrupts and * resources have been allocated and configured. * Sets up some data structures not relevant to iflib. * * return 0 on success, positive on failure *********************************************************************/ static int ixgbe_if_attach_post(if_ctx_t ctx) { device_t dev; struct adapter *adapter; struct ixgbe_hw *hw; int error = 0; dev = iflib_get_dev(ctx); adapter = iflib_get_softc(ctx); hw = &adapter->hw; if (adapter->intr_type == IFLIB_INTR_LEGACY && (adapter->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) { device_printf(dev, "Device does not support legacy interrupts"); error = ENXIO; goto err; } /* Allocate multicast array memory. */ adapter->mta = malloc(sizeof(*adapter->mta) * MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT); if (adapter->mta == NULL) { device_printf(dev, "Can not allocate multicast setup array\n"); error = ENOMEM; goto err; } /* hw.ix defaults init */ ixgbe_set_advertise(adapter, ixgbe_advertise_speed); /* Enable the optics for 82599 SFP+ fiber */ ixgbe_enable_tx_laser(hw); /* Enable power to the phy. */ - ixgbe_set_phy_power(hw, TRUE); + ixgbe_set_phy_power(hw, true); ixgbe_initialize_iov(adapter); error = ixgbe_setup_interface(ctx); if (error) { device_printf(dev, "Interface setup failed: %d\n", error); goto err; } ixgbe_if_update_admin_status(ctx); /* Initialize statistics */ ixgbe_update_stats_counters(adapter); ixgbe_add_hw_stats(adapter); /* Check PCIE slot type/speed/width */ ixgbe_get_slot_info(adapter); /* * Do time init and sysctl init here, but * only on the first port of a bypass adapter. */ ixgbe_bypass_init(adapter); /* Display NVM and Option ROM versions */ ixgbe_print_fw_version(ctx); /* Set an initial dmac value */ adapter->dmac = 0; /* Set initial advertised speeds (if applicable) */ adapter->advertise = ixgbe_get_advertise(adapter); if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) ixgbe_define_iov_schemas(dev, &error); /* Add sysctls */ ixgbe_add_device_sysctls(ctx); return (0); err: return (error); } /* ixgbe_if_attach_post */ /************************************************************************ * ixgbe_check_wol_support * * Checks whether the adapter's ports are capable of * Wake On LAN by reading the adapter's NVM. * * Sets each port's hw->wol_enabled value depending * on the value read here. ************************************************************************/ static void ixgbe_check_wol_support(struct adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u16 dev_caps = 0; /* Find out WoL support for port */ adapter->wol_support = hw->wol_enabled = 0; ixgbe_get_device_caps(hw, &dev_caps); if ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0_1) || ((dev_caps & IXGBE_DEVICE_CAPS_WOL_PORT0) && hw->bus.func == 0)) adapter->wol_support = hw->wol_enabled = 1; /* Save initial wake up filter configuration */ adapter->wufc = IXGBE_READ_REG(hw, IXGBE_WUFC); return; } /* ixgbe_check_wol_support */ /************************************************************************ * ixgbe_setup_interface * * Setup networking device structure and register an interface. ************************************************************************/ static int ixgbe_setup_interface(if_ctx_t ctx) { struct ifnet *ifp = iflib_get_ifp(ctx); struct adapter *adapter = iflib_get_softc(ctx); INIT_DEBUGOUT("ixgbe_setup_interface: begin"); if_setbaudrate(ifp, IF_Gbps(10)); adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; adapter->phy_layer = ixgbe_get_supported_physical_layer(&adapter->hw); ixgbe_add_media_types(ctx); /* Autoselect media by default */ ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO); return (0); } /* ixgbe_setup_interface */ /************************************************************************ * ixgbe_if_get_counter ************************************************************************/ static uint64_t ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt) { struct adapter *adapter = iflib_get_softc(ctx); if_t ifp = iflib_get_ifp(ctx); switch (cnt) { case IFCOUNTER_IPACKETS: return (adapter->ipackets); case IFCOUNTER_OPACKETS: return (adapter->opackets); case IFCOUNTER_IBYTES: return (adapter->ibytes); case IFCOUNTER_OBYTES: return (adapter->obytes); case IFCOUNTER_IMCASTS: return (adapter->imcasts); case IFCOUNTER_OMCASTS: return (adapter->omcasts); case IFCOUNTER_COLLISIONS: return (0); case IFCOUNTER_IQDROPS: return (adapter->iqdrops); case IFCOUNTER_OQDROPS: return (0); case IFCOUNTER_IERRORS: return (adapter->ierrors); default: return (if_get_counter_default(ifp, cnt)); } } /* ixgbe_if_get_counter */ /************************************************************************ * ixgbe_if_i2c_req ************************************************************************/ static int ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req) { struct adapter *adapter = iflib_get_softc(ctx); struct ixgbe_hw *hw = &adapter->hw; int i; if (hw->phy.ops.read_i2c_byte == NULL) return (ENXIO); for (i = 0; i < req->len; i++) hw->phy.ops.read_i2c_byte(hw, req->offset + i, req->dev_addr, &req->data[i]); return (0); } /* ixgbe_if_i2c_req */ /* ixgbe_if_needs_restart - Tell iflib when the driver needs to be reinitialized * @ctx: iflib context * @event: event code to check * * Defaults to returning true for unknown events. * * @returns true if iflib needs to reinit the interface */ static bool ixgbe_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event) { switch (event) { case IFLIB_RESTART_VLAN_CONFIG: return (false); default: return (true); } } /************************************************************************ * ixgbe_add_media_types ************************************************************************/ static void ixgbe_add_media_types(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); struct ixgbe_hw *hw = &adapter->hw; device_t dev = iflib_get_dev(ctx); u64 layer; layer = adapter->phy_layer = ixgbe_get_supported_physical_layer(hw); /* Media types with matching FreeBSD media defines */ if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_T, 0, NULL); if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL); if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX) ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL); if (layer & IXGBE_PHYSICAL_LAYER_10BASE_T) ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T, 0, NULL); if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL); if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) { ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_LR, 0, NULL); if (hw->phy.multispeed_fiber) ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_LX, 0, NULL); } if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) { ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL); if (hw->phy.multispeed_fiber) ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL); } else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL); if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); #ifdef IFM_ETH_XTYPE if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_KR, 0, NULL); if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) ifmedia_add( adapter->media, IFM_ETHER | IFM_10G_KX4, 0, NULL); if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_KX, 0, NULL); if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_KX, 0, NULL); #else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) { device_printf(dev, "Media supported: 10GbaseKR\n"); device_printf(dev, "10GbaseKR mapped to 10GbaseSR\n"); ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_SR, 0, NULL); } if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4) { device_printf(dev, "Media supported: 10GbaseKX4\n"); device_printf(dev, "10GbaseKX4 mapped to 10GbaseCX4\n"); ifmedia_add(adapter->media, IFM_ETHER | IFM_10G_CX4, 0, NULL); } if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) { device_printf(dev, "Media supported: 1000baseKX\n"); device_printf(dev, "1000baseKX mapped to 1000baseCX\n"); ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_CX, 0, NULL); } if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX) { device_printf(dev, "Media supported: 2500baseKX\n"); device_printf(dev, "2500baseKX mapped to 2500baseSX\n"); ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_SX, 0, NULL); } #endif if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) device_printf(dev, "Media supported: 1000baseBX\n"); if (hw->device_id == IXGBE_DEV_ID_82598AT) { ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL); } ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); } /* ixgbe_add_media_types */ /************************************************************************ * ixgbe_is_sfp ************************************************************************/ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) { switch (hw->mac.type) { case ixgbe_mac_82598EB: if (hw->phy.type == ixgbe_phy_nl) - return (TRUE); - return (FALSE); + return (true); + return (false); case ixgbe_mac_82599EB: switch (hw->mac.ops.get_media_type(hw)) { case ixgbe_media_type_fiber: case ixgbe_media_type_fiber_qsfp: - return (TRUE); + return (true); default: - return (FALSE); + return (false); } case ixgbe_mac_X550EM_x: case ixgbe_mac_X550EM_a: if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) - return (TRUE); - return (FALSE); + return (true); + return (false); default: - return (FALSE); + return (false); } } /* ixgbe_is_sfp */ /************************************************************************ * ixgbe_config_link ************************************************************************/ static void ixgbe_config_link(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); struct ixgbe_hw *hw = &adapter->hw; u32 autoneg, err = 0; bool sfp, negotiate; sfp = ixgbe_is_sfp(hw); if (sfp) { adapter->task_requests |= IXGBE_REQUEST_TASK_MOD; iflib_admin_intr_deferred(ctx); } else { if (hw->mac.ops.check_link) err = ixgbe_check_link(hw, &adapter->link_speed, - &adapter->link_up, FALSE); + &adapter->link_up, false); if (err) return; autoneg = hw->phy.autoneg_advertised; if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) err = hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate); if (err) return; if (hw->mac.ops.setup_link) err = hw->mac.ops.setup_link(hw, autoneg, adapter->link_up); } } /* ixgbe_config_link */ /************************************************************************ * ixgbe_update_stats_counters - Update board statistics counters. ************************************************************************/ static void ixgbe_update_stats_counters(struct adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw_stats *stats = &adapter->stats.pf; u32 missed_rx = 0, bprc, lxon, lxoff, total; u32 lxoffrxc; u64 total_missed_rx = 0; stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC); stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); stats->mpc[0] += IXGBE_READ_REG(hw, IXGBE_MPC(0)); for (int i = 0; i < 16; i++) { stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); } stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC); stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC); stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); /* Hardware workaround, gprc counts missed packets */ stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); stats->gprc -= missed_rx; if (hw->mac.type != ixgbe_mac_82598EB) { stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) + ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) + ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32); stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) + ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); stats->lxoffrxc += lxoffrxc; } else { stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); lxoffrxc = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); stats->lxoffrxc += lxoffrxc; /* 82598 only has a counter in the high register */ stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); } /* * For watchdog management we need to know if we have been paused * during the last interval, so capture that here. */ if (lxoffrxc) adapter->shared->isc_pause_frames = 1; /* * Workaround: mprc hardware is incorrectly counting * broadcasts, so for now we subtract those. */ bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); stats->bprc += bprc; stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); if (hw->mac.type == ixgbe_mac_82598EB) stats->mprc -= bprc; stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); stats->lxontxc += lxon; lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); stats->lxofftxc += lxoff; total = lxon + lxoff; stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); stats->gptc -= total; stats->mptc -= total; stats->ptc64 -= total; stats->gotc -= total * ETHER_MIN_LEN; stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC); stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC); stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC); stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT); stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC); stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); /* Only read FCOE on 82599 */ if (hw->mac.type != ixgbe_mac_82598EB) { stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); } /* Fill out the OS statistics structure */ IXGBE_SET_IPACKETS(adapter, stats->gprc); IXGBE_SET_OPACKETS(adapter, stats->gptc); IXGBE_SET_IBYTES(adapter, stats->gorc); IXGBE_SET_OBYTES(adapter, stats->gotc); IXGBE_SET_IMCASTS(adapter, stats->mprc); IXGBE_SET_OMCASTS(adapter, stats->mptc); IXGBE_SET_COLLISIONS(adapter, 0); IXGBE_SET_IQDROPS(adapter, total_missed_rx); /* * Aggregate following types of errors as RX errors: * - CRC error count, * - illegal byte error count, * - checksum error count, * - missed packets count, * - length error count, * - undersized packets count, * - fragmented packets count, * - oversized packets count, * - jabber count. */ IXGBE_SET_IERRORS(adapter, stats->crcerrs + stats->illerrc + stats->xec + stats->mpc[0] + stats->rlec + stats->ruc + stats->rfc + stats->roc + stats->rjc); } /* ixgbe_update_stats_counters */ /************************************************************************ * ixgbe_add_hw_stats * * Add sysctl variables, one per statistic, to the system. ************************************************************************/ static void ixgbe_add_hw_stats(struct adapter *adapter) { device_t dev = iflib_get_dev(adapter->ctx); struct ix_rx_queue *rx_que; struct ix_tx_queue *tx_que; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); struct sysctl_oid *tree = device_get_sysctl_tree(dev); struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); struct ixgbe_hw_stats *stats = &adapter->stats.pf; struct sysctl_oid *stat_node, *queue_node; struct sysctl_oid_list *stat_list, *queue_list; int i; #define QUEUE_NAME_LEN 32 char namebuf[QUEUE_NAME_LEN]; /* Driver Statistics */ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events", CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq", CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled"); for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) { struct tx_ring *txr = &tx_que->txr; snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name"); queue_list = SYSCTL_CHILDREN(queue_node); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0, ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head"); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0, ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx", CTLFLAG_RD, &txr->tso_tx, "TSO"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", CTLFLAG_RD, &txr->total_packets, "Queue Packets Transmitted"); } for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) { struct rx_ring *rxr = &rx_que->rxr; snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name"); queue_list = SYSCTL_CHILDREN(queue_node); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &adapter->rx_queues[i], 0, ixgbe_sysctl_interrupt_rate_handler, "IU", "Interrupt Rate"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", CTLFLAG_RD, &(adapter->rx_queues[i].irqs), "irqs on this queue"); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0, ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head"); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0, ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies", CTLFLAG_RD, &rxr->rx_copies, "Copied RX Frames"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded", CTLFLAG_RD, &rxr->rx_discarded, "Discarded RX packets"); } /* MAC stats get their own sub node */ stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MAC Statistics"); stat_list = SYSCTL_CHILDREN(stat_node); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_errs", CTLFLAG_RD, &adapter->ierrors, IXGBE_SYSCTL_DESC_RX_ERRS); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs", CTLFLAG_RD, &stats->crcerrs, "CRC Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs", CTLFLAG_RD, &stats->illerrc, "Illegal Byte Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs", CTLFLAG_RD, &stats->errbc, "Byte Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards", CTLFLAG_RD, &stats->mspdc, "MAC Short Packets Discarded"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults", CTLFLAG_RD, &stats->mlfc, "MAC Local Faults"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults", CTLFLAG_RD, &stats->mrfc, "MAC Remote Faults"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs", CTLFLAG_RD, &stats->rlec, "Receive Length Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_missed_packets", CTLFLAG_RD, &stats->mpc[0], "RX Missed Packet Count"); /* Flow Control stats */ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd", CTLFLAG_RD, &stats->lxontxc, "Link XON Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd", CTLFLAG_RD, &stats->lxonrxc, "Link XON Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd", CTLFLAG_RD, &stats->lxofftxc, "Link XOFF Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd", CTLFLAG_RD, &stats->lxoffrxc, "Link XOFF Received"); /* Packet Reception Stats */ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd", CTLFLAG_RD, &stats->tor, "Total Octets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd", CTLFLAG_RD, &stats->gorc, "Good Octets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd", CTLFLAG_RD, &stats->tpr, "Total Packets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd", CTLFLAG_RD, &stats->gprc, "Good Packets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd", CTLFLAG_RD, &stats->mprc, "Multicast Packets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd", CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64", CTLFLAG_RD, &stats->prc64, "64 byte frames received "); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127", CTLFLAG_RD, &stats->prc127, "65-127 byte frames received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255", CTLFLAG_RD, &stats->prc255, "128-255 byte frames received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511", CTLFLAG_RD, &stats->prc511, "256-511 byte frames received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023", CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522", CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized", CTLFLAG_RD, &stats->ruc, "Receive Undersized"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented", CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received "); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized", CTLFLAG_RD, &stats->roc, "Oversized Packets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd", CTLFLAG_RD, &stats->rjc, "Received Jabber"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd", CTLFLAG_RD, &stats->mngprc, "Management Packets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd", CTLFLAG_RD, &stats->mngptc, "Management Packets Dropped"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs", CTLFLAG_RD, &stats->xec, "Checksum Errors"); /* Packet Transmission Stats */ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd", CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd", CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd", CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd", CTLFLAG_RD, &stats->mngptc, "Management Packets Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64", CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted "); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127", CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255", CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511", CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023", CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522", CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted"); } /* ixgbe_add_hw_stats */ /************************************************************************ * ixgbe_sysctl_tdh_handler - Transmit Descriptor Head handler function * * Retrieves the TDH value from the hardware ************************************************************************/ static int ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS) { struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); int error; unsigned int val; if (!txr) return (0); val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me)); error = sysctl_handle_int(oidp, &val, 0, req); if (error || !req->newptr) return error; return (0); } /* ixgbe_sysctl_tdh_handler */ /************************************************************************ * ixgbe_sysctl_tdt_handler - Transmit Descriptor Tail handler function * * Retrieves the TDT value from the hardware ************************************************************************/ static int ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS) { struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); int error; unsigned int val; if (!txr) return (0); val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me)); error = sysctl_handle_int(oidp, &val, 0, req); if (error || !req->newptr) return error; return (0); } /* ixgbe_sysctl_tdt_handler */ /************************************************************************ * ixgbe_sysctl_rdh_handler - Receive Descriptor Head handler function * * Retrieves the RDH value from the hardware ************************************************************************/ static int ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS) { struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); int error; unsigned int val; if (!rxr) return (0); val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me)); error = sysctl_handle_int(oidp, &val, 0, req); if (error || !req->newptr) return error; return (0); } /* ixgbe_sysctl_rdh_handler */ /************************************************************************ * ixgbe_sysctl_rdt_handler - Receive Descriptor Tail handler function * * Retrieves the RDT value from the hardware ************************************************************************/ static int ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS) { struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); int error; unsigned int val; if (!rxr) return (0); val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me)); error = sysctl_handle_int(oidp, &val, 0, req); if (error || !req->newptr) return error; return (0); } /* ixgbe_sysctl_rdt_handler */ /************************************************************************ * ixgbe_if_vlan_register * * Run via vlan config EVENT, it enables us to use the * HW Filter table since we can get the vlan id. This * just creates the entry in the soft version of the * VFTA, init will repopulate the real table. ************************************************************************/ static void ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag) { struct adapter *adapter = iflib_get_softc(ctx); u16 index, bit; index = (vtag >> 5) & 0x7F; bit = vtag & 0x1F; adapter->shadow_vfta[index] |= (1 << bit); ++adapter->num_vlans; ixgbe_setup_vlan_hw_support(ctx); } /* ixgbe_if_vlan_register */ /************************************************************************ * ixgbe_if_vlan_unregister * * Run via vlan unconfig EVENT, remove our entry in the soft vfta. ************************************************************************/ static void ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag) { struct adapter *adapter = iflib_get_softc(ctx); u16 index, bit; index = (vtag >> 5) & 0x7F; bit = vtag & 0x1F; adapter->shadow_vfta[index] &= ~(1 << bit); --adapter->num_vlans; /* Re-init to load the changes */ ixgbe_setup_vlan_hw_support(ctx); } /* ixgbe_if_vlan_unregister */ /************************************************************************ * ixgbe_setup_vlan_hw_support ************************************************************************/ static void ixgbe_setup_vlan_hw_support(if_ctx_t ctx) { struct ifnet *ifp = iflib_get_ifp(ctx); struct adapter *adapter = iflib_get_softc(ctx); struct ixgbe_hw *hw = &adapter->hw; struct rx_ring *rxr; int i; u32 ctrl; /* * We get here thru init_locked, meaning * a soft reset, this has already cleared * the VFTA and other state, so if there * have been no vlan's registered do nothing. */ if (adapter->num_vlans == 0) return; /* Setup the queues for vlans */ if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { for (i = 0; i < adapter->num_rx_queues; i++) { rxr = &adapter->rx_queues[i].rxr; /* On 82599 the VLAN enable is per/queue in RXDCTL */ if (hw->mac.type != ixgbe_mac_82598EB) { ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); ctrl |= IXGBE_RXDCTL_VME; IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl); } - rxr->vtag_strip = TRUE; + rxr->vtag_strip = true; } } if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0) return; /* * A soft reset zero's out the VFTA, so * we need to repopulate it now. */ for (i = 0; i < IXGBE_VFTA_SIZE; i++) if (adapter->shadow_vfta[i] != 0) IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), adapter->shadow_vfta[i]); ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); /* Enable the Filter Table if enabled */ if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { ctrl &= ~IXGBE_VLNCTRL_CFIEN; ctrl |= IXGBE_VLNCTRL_VFE; } if (hw->mac.type == ixgbe_mac_82598EB) ctrl |= IXGBE_VLNCTRL_VME; IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); } /* ixgbe_setup_vlan_hw_support */ /************************************************************************ * ixgbe_get_slot_info * * Get the width and transaction speed of * the slot this adapter is plugged into. ************************************************************************/ static void ixgbe_get_slot_info(struct adapter *adapter) { device_t dev = iflib_get_dev(adapter->ctx); struct ixgbe_hw *hw = &adapter->hw; - int bus_info_valid = TRUE; + int bus_info_valid = true; u32 offset; u16 link; /* Some devices are behind an internal bridge */ switch (hw->device_id) { case IXGBE_DEV_ID_82599_SFP_SF_QP: case IXGBE_DEV_ID_82599_QSFP_SF_QP: goto get_parent_info; default: break; } ixgbe_get_bus_info(hw); /* * Some devices don't use PCI-E, but there is no need * to display "Unknown" for bus speed and width. */ switch (hw->mac.type) { case ixgbe_mac_X550EM_x: case ixgbe_mac_X550EM_a: return; default: goto display; } get_parent_info: /* * For the Quad port adapter we need to parse back * up the PCI tree to find the speed of the expansion * slot into which this adapter is plugged. A bit more work. */ dev = device_get_parent(device_get_parent(dev)); #ifdef IXGBE_DEBUG device_printf(dev, "parent pcib = %x,%x,%x\n", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev)); #endif dev = device_get_parent(device_get_parent(dev)); #ifdef IXGBE_DEBUG device_printf(dev, "slot pcib = %x,%x,%x\n", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev)); #endif /* Now get the PCI Express Capabilities offset */ if (pci_find_cap(dev, PCIY_EXPRESS, &offset)) { /* * Hmm...can't get PCI-Express capabilities. * Falling back to default method. */ - bus_info_valid = FALSE; + bus_info_valid = false; ixgbe_get_bus_info(hw); goto display; } /* ...and read the Link Status Register */ link = pci_read_config(dev, offset + PCIER_LINK_STA, 2); ixgbe_set_pci_config_data_generic(hw, link); display: device_printf(dev, "PCI Express Bus: Speed %s %s\n", ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" : (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" : (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" : "Unknown"), ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" : (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" : (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" : "Unknown")); if (bus_info_valid) { if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) && ((hw->bus.width <= ixgbe_bus_width_pcie_x4) && (hw->bus.speed == ixgbe_bus_speed_2500))) { device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n"); device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n"); } if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) && ((hw->bus.width <= ixgbe_bus_width_pcie_x8) && (hw->bus.speed < ixgbe_bus_speed_8000))) { device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n"); device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n"); } } else device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n"); return; } /* ixgbe_get_slot_info */ /************************************************************************ * ixgbe_if_msix_intr_assign * * Setup MSI-X Interrupt resources and handlers ************************************************************************/ static int ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix) { struct adapter *adapter = iflib_get_softc(ctx); struct ix_rx_queue *rx_que = adapter->rx_queues; struct ix_tx_queue *tx_que; int error, rid, vector = 0; int cpu_id = 0; char buf[16]; /* Admin Que is vector 0*/ rid = vector + 1; for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) { rid = vector + 1; snprintf(buf, sizeof(buf), "rxq%d", i); error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, IFLIB_INTR_RXTX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf); if (error) { device_printf(iflib_get_dev(ctx), "Failed to allocate que int %d err: %d", i, error); adapter->num_rx_queues = i + 1; goto fail; } rx_que->msix = vector; if (adapter->feat_en & IXGBE_FEATURE_RSS) { /* * The queue ID is used as the RSS layer bucket ID. * We look up the queue ID -> RSS CPU ID and select * that. */ cpu_id = rss_getcpu(i % rss_getnumbuckets()); } else { /* * Bind the MSI-X vector, and thus the * rings to the corresponding cpu. * * This just happens to match the default RSS * round-robin bucket -> queue -> CPU allocation. */ if (adapter->num_rx_queues > 1) cpu_id = i; } } for (int i = 0; i < adapter->num_tx_queues; i++) { snprintf(buf, sizeof(buf), "txq%d", i); tx_que = &adapter->tx_queues[i]; tx_que->msix = i % adapter->num_rx_queues; iflib_softirq_alloc_generic(ctx, &adapter->rx_queues[tx_que->msix].que_irq, IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf); } rid = vector + 1; error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid, IFLIB_INTR_ADMIN, ixgbe_msix_link, adapter, 0, "aq"); if (error) { device_printf(iflib_get_dev(ctx), "Failed to register admin handler"); return (error); } adapter->vector = vector; return (0); fail: iflib_irq_free(ctx, &adapter->irq); rx_que = adapter->rx_queues; for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) iflib_irq_free(ctx, &rx_que->que_irq); return (error); } /* ixgbe_if_msix_intr_assign */ static inline void ixgbe_perform_aim(struct adapter *adapter, struct ix_rx_queue *que) { uint32_t newitr = 0; struct rx_ring *rxr = &que->rxr; /* * Do Adaptive Interrupt Moderation: * - Write out last calculated setting * - Calculate based on average size over * the last interval. */ if (que->eitr_setting) { IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(que->msix), que->eitr_setting); } que->eitr_setting = 0; /* Idle, do nothing */ if (rxr->bytes == 0) { return; } if ((rxr->bytes) && (rxr->packets)) { newitr = (rxr->bytes / rxr->packets); } newitr += 24; /* account for hardware frame, crc */ /* set an upper boundary */ newitr = min(newitr, 3000); /* Be nice to the mid range */ if ((newitr > 300) && (newitr < 1200)) { newitr = (newitr / 3); } else { newitr = (newitr / 2); } if (adapter->hw.mac.type == ixgbe_mac_82598EB) { newitr |= newitr << 16; } else { newitr |= IXGBE_EITR_CNT_WDIS; } /* save for next interrupt */ que->eitr_setting = newitr; /* Reset state */ rxr->bytes = 0; rxr->packets = 0; return; } /********************************************************************* * ixgbe_msix_que - MSI-X Queue Interrupt Service routine **********************************************************************/ static int ixgbe_msix_que(void *arg) { struct ix_rx_queue *que = arg; struct adapter *adapter = que->adapter; struct ifnet *ifp = iflib_get_ifp(que->adapter->ctx); /* Protect against spurious interrupts */ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return (FILTER_HANDLED); ixgbe_disable_queue(adapter, que->msix); ++que->irqs; /* Check for AIM */ if (adapter->enable_aim) { ixgbe_perform_aim(adapter, que); } return (FILTER_SCHEDULE_THREAD); } /* ixgbe_msix_que */ /************************************************************************ * ixgbe_media_status - Media Ioctl callback * * Called whenever the user queries the status of * the interface using ifconfig. ************************************************************************/ static void ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr) { struct adapter *adapter = iflib_get_softc(ctx); struct ixgbe_hw *hw = &adapter->hw; int layer; INIT_DEBUGOUT("ixgbe_if_media_status: begin"); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (!adapter->link_active) return; ifmr->ifm_status |= IFM_ACTIVE; layer = adapter->phy_layer; if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T || layer & IXGBE_PHYSICAL_LAYER_1000BASE_T || layer & IXGBE_PHYSICAL_LAYER_100BASE_TX || layer & IXGBE_PHYSICAL_LAYER_10BASE_T) switch (adapter->link_speed) { case IXGBE_LINK_SPEED_10GB_FULL: ifmr->ifm_active |= IFM_10G_T | IFM_FDX; break; case IXGBE_LINK_SPEED_1GB_FULL: ifmr->ifm_active |= IFM_1000_T | IFM_FDX; break; case IXGBE_LINK_SPEED_100_FULL: ifmr->ifm_active |= IFM_100_TX | IFM_FDX; break; case IXGBE_LINK_SPEED_10_FULL: ifmr->ifm_active |= IFM_10_T | IFM_FDX; break; } if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU || layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) switch (adapter->link_speed) { case IXGBE_LINK_SPEED_10GB_FULL: ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX; break; } if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) switch (adapter->link_speed) { case IXGBE_LINK_SPEED_10GB_FULL: ifmr->ifm_active |= IFM_10G_LR | IFM_FDX; break; case IXGBE_LINK_SPEED_1GB_FULL: ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; break; } if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM) switch (adapter->link_speed) { case IXGBE_LINK_SPEED_10GB_FULL: ifmr->ifm_active |= IFM_10G_LRM | IFM_FDX; break; case IXGBE_LINK_SPEED_1GB_FULL: ifmr->ifm_active |= IFM_1000_LX | IFM_FDX; break; } if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR || layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) switch (adapter->link_speed) { case IXGBE_LINK_SPEED_10GB_FULL: ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; break; case IXGBE_LINK_SPEED_1GB_FULL: ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; break; } if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) switch (adapter->link_speed) { case IXGBE_LINK_SPEED_10GB_FULL: ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; break; } /* * XXX: These need to use the proper media types once * they're added. */ #ifndef IFM_ETH_XTYPE if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) switch (adapter->link_speed) { case IXGBE_LINK_SPEED_10GB_FULL: ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; break; case IXGBE_LINK_SPEED_2_5GB_FULL: ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; break; case IXGBE_LINK_SPEED_1GB_FULL: ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; break; } else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 || layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) switch (adapter->link_speed) { case IXGBE_LINK_SPEED_10GB_FULL: ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX; break; case IXGBE_LINK_SPEED_2_5GB_FULL: ifmr->ifm_active |= IFM_2500_SX | IFM_FDX; break; case IXGBE_LINK_SPEED_1GB_FULL: ifmr->ifm_active |= IFM_1000_CX | IFM_FDX; break; } #else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) switch (adapter->link_speed) { case IXGBE_LINK_SPEED_10GB_FULL: ifmr->ifm_active |= IFM_10G_KR | IFM_FDX; break; case IXGBE_LINK_SPEED_2_5GB_FULL: ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; break; case IXGBE_LINK_SPEED_1GB_FULL: ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; break; } else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 || layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX || layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) switch (adapter->link_speed) { case IXGBE_LINK_SPEED_10GB_FULL: ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX; break; case IXGBE_LINK_SPEED_2_5GB_FULL: ifmr->ifm_active |= IFM_2500_KX | IFM_FDX; break; case IXGBE_LINK_SPEED_1GB_FULL: ifmr->ifm_active |= IFM_1000_KX | IFM_FDX; break; } #endif /* If nothing is recognized... */ if (IFM_SUBTYPE(ifmr->ifm_active) == 0) ifmr->ifm_active |= IFM_UNKNOWN; /* Display current flow control setting used on link */ if (hw->fc.current_mode == ixgbe_fc_rx_pause || hw->fc.current_mode == ixgbe_fc_full) ifmr->ifm_active |= IFM_ETH_RXPAUSE; if (hw->fc.current_mode == ixgbe_fc_tx_pause || hw->fc.current_mode == ixgbe_fc_full) ifmr->ifm_active |= IFM_ETH_TXPAUSE; } /* ixgbe_media_status */ /************************************************************************ * ixgbe_media_change - Media Ioctl callback * * Called when the user changes speed/duplex using * media/mediopt option with ifconfig. ************************************************************************/ static int ixgbe_if_media_change(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); struct ifmedia *ifm = iflib_get_media(ctx); struct ixgbe_hw *hw = &adapter->hw; ixgbe_link_speed speed = 0; INIT_DEBUGOUT("ixgbe_if_media_change: begin"); if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); if (hw->phy.media_type == ixgbe_media_type_backplane) return (EPERM); /* * We don't actually need to check against the supported * media types of the adapter; ifmedia will take care of * that for us. */ switch (IFM_SUBTYPE(ifm->ifm_media)) { case IFM_AUTO: case IFM_10G_T: speed |= IXGBE_LINK_SPEED_100_FULL; speed |= IXGBE_LINK_SPEED_1GB_FULL; speed |= IXGBE_LINK_SPEED_10GB_FULL; break; case IFM_10G_LRM: case IFM_10G_LR: #ifndef IFM_ETH_XTYPE case IFM_10G_SR: /* KR, too */ case IFM_10G_CX4: /* KX4 */ #else case IFM_10G_KR: case IFM_10G_KX4: #endif speed |= IXGBE_LINK_SPEED_1GB_FULL; speed |= IXGBE_LINK_SPEED_10GB_FULL; break; #ifndef IFM_ETH_XTYPE case IFM_1000_CX: /* KX */ #else case IFM_1000_KX: #endif case IFM_1000_LX: case IFM_1000_SX: speed |= IXGBE_LINK_SPEED_1GB_FULL; break; case IFM_1000_T: speed |= IXGBE_LINK_SPEED_100_FULL; speed |= IXGBE_LINK_SPEED_1GB_FULL; break; case IFM_10G_TWINAX: speed |= IXGBE_LINK_SPEED_10GB_FULL; break; case IFM_100_TX: speed |= IXGBE_LINK_SPEED_100_FULL; break; case IFM_10_T: speed |= IXGBE_LINK_SPEED_10_FULL; break; default: goto invalid; } - hw->mac.autotry_restart = TRUE; - hw->mac.ops.setup_link(hw, speed, TRUE); + hw->mac.autotry_restart = true; + hw->mac.ops.setup_link(hw, speed, true); adapter->advertise = ((speed & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) | ((speed & IXGBE_LINK_SPEED_1GB_FULL) ? 2 : 0) | ((speed & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0) | ((speed & IXGBE_LINK_SPEED_10_FULL) ? 8 : 0); return (0); invalid: device_printf(iflib_get_dev(ctx), "Invalid media type!\n"); return (EINVAL); } /* ixgbe_if_media_change */ /************************************************************************ * ixgbe_set_promisc ************************************************************************/ static int ixgbe_if_promisc_set(if_ctx_t ctx, int flags) { struct adapter *adapter = iflib_get_softc(ctx); struct ifnet *ifp = iflib_get_ifp(ctx); u32 rctl; int mcnt = 0; rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); rctl &= (~IXGBE_FCTRL_UPE); if (ifp->if_flags & IFF_ALLMULTI) mcnt = MAX_NUM_MULTICAST_ADDRESSES; else { mcnt = min(if_llmaddr_count(ifp), MAX_NUM_MULTICAST_ADDRESSES); } if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) rctl &= (~IXGBE_FCTRL_MPE); IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl); if (ifp->if_flags & IFF_PROMISC) { rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl); } else if (ifp->if_flags & IFF_ALLMULTI) { rctl |= IXGBE_FCTRL_MPE; rctl &= ~IXGBE_FCTRL_UPE; IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, rctl); } return (0); } /* ixgbe_if_promisc_set */ /************************************************************************ * ixgbe_msix_link - Link status change ISR (MSI/MSI-X) ************************************************************************/ static int ixgbe_msix_link(void *arg) { struct adapter *adapter = arg; struct ixgbe_hw *hw = &adapter->hw; u32 eicr, eicr_mask; s32 retval; ++adapter->link_irq; /* Pause other interrupts */ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_OTHER); /* First get the cause */ eicr = IXGBE_READ_REG(hw, IXGBE_EICS); /* Be sure the queue bits are not cleared */ eicr &= ~IXGBE_EICR_RTX_QUEUE; /* Clear interrupt with write */ IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); /* Link status change */ if (eicr & IXGBE_EICR_LSC) { IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); adapter->task_requests |= IXGBE_REQUEST_TASK_LSC; } if (adapter->hw.mac.type != ixgbe_mac_82598EB) { if ((adapter->feat_en & IXGBE_FEATURE_FDIR) && (eicr & IXGBE_EICR_FLOW_DIR)) { /* This is probably overkill :) */ if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1)) return (FILTER_HANDLED); /* Disable the interrupt */ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FLOW_DIR); adapter->task_requests |= IXGBE_REQUEST_TASK_FDIR; } else if (eicr & IXGBE_EICR_ECC) { device_printf(iflib_get_dev(adapter->ctx), "\nCRITICAL: ECC ERROR!! Please Reboot!!\n"); IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); } /* Check for over temp condition */ if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) { switch (adapter->hw.mac.type) { case ixgbe_mac_X550EM_a: if (!(eicr & IXGBE_EICR_GPI_SDP0_X550EM_a)) break; IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_GPI_SDP0_X550EM_a); IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X550EM_a); retval = hw->phy.ops.check_overtemp(hw); if (retval != IXGBE_ERR_OVERTEMP) break; device_printf(iflib_get_dev(adapter->ctx), "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n"); device_printf(iflib_get_dev(adapter->ctx), "System shutdown required!\n"); break; default: if (!(eicr & IXGBE_EICR_TS)) break; retval = hw->phy.ops.check_overtemp(hw); if (retval != IXGBE_ERR_OVERTEMP) break; device_printf(iflib_get_dev(adapter->ctx), "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n"); device_printf(iflib_get_dev(adapter->ctx), "System shutdown required!\n"); IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS); break; } } /* Check for VF message */ if ((adapter->feat_en & IXGBE_FEATURE_SRIOV) && (eicr & IXGBE_EICR_MAILBOX)) adapter->task_requests |= IXGBE_REQUEST_TASK_MBX; } if (ixgbe_is_sfp(hw)) { /* Pluggable optics-related interrupt */ if (hw->mac.type >= ixgbe_mac_X540) eicr_mask = IXGBE_EICR_GPI_SDP0_X540; else eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); if (eicr & eicr_mask) { IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); adapter->task_requests |= IXGBE_REQUEST_TASK_MOD; } if ((hw->mac.type == ixgbe_mac_82599EB) && (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); adapter->task_requests |= IXGBE_REQUEST_TASK_MSF; } } /* Check for fan failure */ if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) { - ixgbe_check_fan_failure(adapter, eicr, TRUE); + ixgbe_check_fan_failure(adapter, eicr, true); IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); } /* External PHY interrupt */ if ((hw->phy.type == ixgbe_phy_x550em_ext_t) && (eicr & IXGBE_EICR_GPI_SDP0_X540)) { IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540); adapter->task_requests |= IXGBE_REQUEST_TASK_PHY; } return (adapter->task_requests != 0) ? FILTER_SCHEDULE_THREAD : FILTER_HANDLED; } /* ixgbe_msix_link */ /************************************************************************ * ixgbe_sysctl_interrupt_rate_handler ************************************************************************/ static int ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS) { struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1); int error; unsigned int reg, usec, rate; reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix)); usec = ((reg & 0x0FF8) >> 3); if (usec > 0) rate = 500000 / usec; else rate = 0; error = sysctl_handle_int(oidp, &rate, 0, req); if (error || !req->newptr) return error; reg &= ~0xfff; /* default, no limitation */ ixgbe_max_interrupt_rate = 0; if (rate > 0 && rate < 500000) { if (rate < 1000) rate = 1000; ixgbe_max_interrupt_rate = rate; reg |= ((4000000/rate) & 0xff8); } IXGBE_WRITE_REG(&que->adapter->hw, IXGBE_EITR(que->msix), reg); return (0); } /* ixgbe_sysctl_interrupt_rate_handler */ /************************************************************************ * ixgbe_add_device_sysctls ************************************************************************/ static void ixgbe_add_device_sysctls(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); device_t dev = iflib_get_dev(ctx); struct ixgbe_hw *hw = &adapter->hw; struct sysctl_oid_list *child; struct sysctl_ctx_list *ctx_list; ctx_list = device_get_sysctl_ctx(dev); child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); /* Sysctls for all devices */ SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, adapter, 0, ixgbe_sysctl_flowcntl, "I", IXGBE_SYSCTL_DESC_SET_FC); SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, adapter, 0, ixgbe_sysctl_advertise, "I", IXGBE_SYSCTL_DESC_ADV_SPEED); adapter->enable_aim = ixgbe_enable_aim; SYSCTL_ADD_INT(ctx_list, child, OID_AUTO, "enable_aim", CTLFLAG_RW, &adapter->enable_aim, 0, "Interrupt Moderation"); SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, 0, ixgbe_sysctl_print_fw_version, "A", "Prints FW/NVM Versions"); #ifdef IXGBE_DEBUG /* testing sysctls (for all devices) */ SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, adapter, 0, ixgbe_sysctl_power_state, "I", "PCI Power State"); SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, 0, ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration"); #endif /* for X550 series devices */ if (hw->mac.type >= ixgbe_mac_X550) SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac", CTLTYPE_U16 | CTLFLAG_RW | CTLFLAG_NEEDGIANT, adapter, 0, ixgbe_sysctl_dmac, "I", "DMA Coalesce"); /* for WoL-capable devices */ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, adapter, 0, ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN"); SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc", CTLTYPE_U32 | CTLFLAG_RW | CTLFLAG_NEEDGIANT, adapter, 0, ixgbe_sysctl_wufc, "I", "Enable/Disable Wake Up Filters"); } /* for X552/X557-AT devices */ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { struct sysctl_oid *phy_node; struct sysctl_oid_list *phy_list; phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "External PHY sysctls"); phy_list = SYSCTL_CHILDREN(phy_node); SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp", CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, 0, ixgbe_sysctl_phy_temp, "I", "Current External PHY Temperature (Celsius)"); SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "overtemp_occurred", CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, 0, ixgbe_sysctl_phy_overtemp_occurred, "I", "External PHY High Temperature Event Occurred"); } if (adapter->feat_cap & IXGBE_FEATURE_EEE) { SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, adapter, 0, ixgbe_sysctl_eee_state, "I", "EEE Power Save State"); } } /* ixgbe_add_device_sysctls */ /************************************************************************ * ixgbe_allocate_pci_resources ************************************************************************/ static int ixgbe_allocate_pci_resources(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); device_t dev = iflib_get_dev(ctx); int rid; rid = PCIR_BAR(0); adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!(adapter->pci_mem)) { device_printf(dev, "Unable to allocate bus resource: memory\n"); return (ENXIO); } /* Save bus_space values for READ/WRITE_REG macros */ adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem); adapter->osdep.mem_bus_space_handle = rman_get_bushandle(adapter->pci_mem); /* Set hw values for shared code */ adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle; return (0); } /* ixgbe_allocate_pci_resources */ /************************************************************************ * ixgbe_detach - Device removal routine * * Called when the driver is being removed. * Stops the adapter and deallocates all the resources * that were allocated for driver operation. * * return 0 on success, positive on failure ************************************************************************/ static int ixgbe_if_detach(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); device_t dev = iflib_get_dev(ctx); u32 ctrl_ext; INIT_DEBUGOUT("ixgbe_detach: begin"); if (ixgbe_pci_iov_detach(dev) != 0) { device_printf(dev, "SR-IOV in use; detach first.\n"); return (EBUSY); } ixgbe_setup_low_power_mode(ctx); /* let hardware know driver is unloading */ ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext); ixgbe_free_pci_resources(ctx); free(adapter->mta, M_IXGBE); return (0); } /* ixgbe_if_detach */ /************************************************************************ * ixgbe_setup_low_power_mode - LPLU/WoL preparation * * Prepare the adapter/port for LPLU and/or WoL ************************************************************************/ static int ixgbe_setup_low_power_mode(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); struct ixgbe_hw *hw = &adapter->hw; device_t dev = iflib_get_dev(ctx); s32 error = 0; if (!hw->wol_enabled) - ixgbe_set_phy_power(hw, FALSE); + ixgbe_set_phy_power(hw, false); /* Limit power management flow to X550EM baseT */ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T && hw->phy.ops.enter_lplu) { /* Turn off support for APM wakeup. (Using ACPI instead) */ IXGBE_WRITE_REG(hw, IXGBE_GRC, IXGBE_READ_REG(hw, IXGBE_GRC) & ~(u32)2); /* * Clear Wake Up Status register to prevent any previous wakeup * events from waking us up immediately after we suspend. */ IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); /* * Program the Wakeup Filter Control register with user filter * settings */ IXGBE_WRITE_REG(hw, IXGBE_WUFC, adapter->wufc); /* Enable wakeups and power management in Wakeup Control */ IXGBE_WRITE_REG(hw, IXGBE_WUC, IXGBE_WUC_WKEN | IXGBE_WUC_PME_EN); /* X550EM baseT adapters need a special LPLU flow */ - hw->phy.reset_disable = TRUE; + hw->phy.reset_disable = true; ixgbe_if_stop(ctx); error = hw->phy.ops.enter_lplu(hw); if (error) device_printf(dev, "Error entering LPLU: %d\n", error); - hw->phy.reset_disable = FALSE; + hw->phy.reset_disable = false; } else { /* Just stop for other adapters */ ixgbe_if_stop(ctx); } return error; } /* ixgbe_setup_low_power_mode */ /************************************************************************ * ixgbe_shutdown - Shutdown entry point ************************************************************************/ static int ixgbe_if_shutdown(if_ctx_t ctx) { int error = 0; INIT_DEBUGOUT("ixgbe_shutdown: begin"); error = ixgbe_setup_low_power_mode(ctx); return (error); } /* ixgbe_if_shutdown */ /************************************************************************ * ixgbe_suspend * * From D0 to D3 ************************************************************************/ static int ixgbe_if_suspend(if_ctx_t ctx) { int error = 0; INIT_DEBUGOUT("ixgbe_suspend: begin"); error = ixgbe_setup_low_power_mode(ctx); return (error); } /* ixgbe_if_suspend */ /************************************************************************ * ixgbe_resume * * From D3 to D0 ************************************************************************/ static int ixgbe_if_resume(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); device_t dev = iflib_get_dev(ctx); struct ifnet *ifp = iflib_get_ifp(ctx); struct ixgbe_hw *hw = &adapter->hw; u32 wus; INIT_DEBUGOUT("ixgbe_resume: begin"); /* Read & clear WUS register */ wus = IXGBE_READ_REG(hw, IXGBE_WUS); if (wus) device_printf(dev, "Woken up by (WUS): %#010x\n", IXGBE_READ_REG(hw, IXGBE_WUS)); IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff); /* And clear WUFC until next low-power transition */ IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); /* * Required after D3->D0 transition; * will re-advertise all previous advertised speeds */ if (ifp->if_flags & IFF_UP) ixgbe_if_init(ctx); return (0); } /* ixgbe_if_resume */ /************************************************************************ * ixgbe_if_mtu_set - Ioctl mtu entry point * * Return 0 on success, EINVAL on failure ************************************************************************/ static int ixgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu) { struct adapter *adapter = iflib_get_softc(ctx); int error = 0; IOCTL_DEBUGOUT("ioctl: SIOCIFMTU (Set Interface MTU)"); if (mtu > IXGBE_MAX_MTU) { error = EINVAL; } else { adapter->max_frame_size = mtu + IXGBE_MTU_HDR; } return error; } /* ixgbe_if_mtu_set */ /************************************************************************ * ixgbe_if_crcstrip_set ************************************************************************/ static void ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip) { struct adapter *sc = iflib_get_softc(ctx); struct ixgbe_hw *hw = &sc->hw; /* crc stripping is set in two places: * IXGBE_HLREG0 (modified on init_locked and hw reset) * IXGBE_RDRXCTL (set by the original driver in * ixgbe_setup_hw_rsc() called in init_locked. * We disable the setting when netmap is compiled in). * We update the values here, but also in ixgbe.c because * init_locked sometimes is called outside our control. */ uint32_t hl, rxc; hl = IXGBE_READ_REG(hw, IXGBE_HLREG0); rxc = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); #ifdef NETMAP if (netmap_verbose) D("%s read HLREG 0x%x rxc 0x%x", onoff ? "enter" : "exit", hl, rxc); #endif /* hw requirements ... */ rxc &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; rxc |= IXGBE_RDRXCTL_RSCACKC; if (onoff && !crcstrip) { /* keep the crc. Fast rx */ hl &= ~IXGBE_HLREG0_RXCRCSTRP; rxc &= ~IXGBE_RDRXCTL_CRCSTRIP; } else { /* reset default mode */ hl |= IXGBE_HLREG0_RXCRCSTRP; rxc |= IXGBE_RDRXCTL_CRCSTRIP; } #ifdef NETMAP if (netmap_verbose) D("%s write HLREG 0x%x rxc 0x%x", onoff ? "enter" : "exit", hl, rxc); #endif IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hl); IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rxc); } /* ixgbe_if_crcstrip_set */ /********************************************************************* * ixgbe_if_init - Init entry point * * Used in two ways: It is used by the stack as an init * entry point in network interface structure. It is also * used by the driver as a hw/sw initialization routine to * get to a consistent state. * * Return 0 on success, positive on failure **********************************************************************/ void ixgbe_if_init(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); struct ifnet *ifp = iflib_get_ifp(ctx); device_t dev = iflib_get_dev(ctx); struct ixgbe_hw *hw = &adapter->hw; struct ix_rx_queue *rx_que; struct ix_tx_queue *tx_que; u32 txdctl, mhadd; u32 rxdctl, rxctrl; u32 ctrl_ext; int i, j, err; INIT_DEBUGOUT("ixgbe_if_init: begin"); /* Queue indices may change with IOV mode */ ixgbe_align_all_queue_indices(adapter); /* reprogram the RAR[0] in case user changed it. */ ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, IXGBE_RAH_AV); /* Get the latest mac address, User can use a LAA */ bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS); ixgbe_set_rar(hw, 0, hw->mac.addr, adapter->pool, 1); hw->addr_ctrl.rar_used_count = 1; ixgbe_init_hw(hw); ixgbe_initialize_iov(adapter); ixgbe_initialize_transmit_units(ctx); /* Setup Multicast table */ ixgbe_if_multi_set(ctx); /* Determine the correct mbuf pool, based on frame size */ adapter->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx); /* Configure RX settings */ ixgbe_initialize_receive_units(ctx); /* * Initialize variable holding task enqueue requests * from MSI-X interrupts */ adapter->task_requests = 0; /* Enable SDP & MSI-X interrupts based on adapter */ ixgbe_config_gpie(adapter); /* Set MTU size */ if (ifp->if_mtu > ETHERMTU) { /* aka IXGBE_MAXFRS on 82599 and newer */ mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); mhadd &= ~IXGBE_MHADD_MFS_MASK; mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT; IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); } /* Now enable all the queues */ for (i = 0, tx_que = adapter->tx_queues; i < adapter->num_tx_queues; i++, tx_que++) { struct tx_ring *txr = &tx_que->txr; txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me)); txdctl |= IXGBE_TXDCTL_ENABLE; /* Set WTHRESH to 8, burst writeback */ txdctl |= (8 << 16); /* * When the internal queue falls below PTHRESH (32), * start prefetching as long as there are at least * HTHRESH (1) buffers ready. The values are taken * from the Intel linux driver 3.8.21. * Prefetching enables tx line rate even with 1 queue. */ txdctl |= (32 << 0) | (1 << 8); IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl); } for (i = 0, rx_que = adapter->rx_queues; i < adapter->num_rx_queues; i++, rx_que++) { struct rx_ring *rxr = &rx_que->rxr; rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)); if (hw->mac.type == ixgbe_mac_82598EB) { /* * PTHRESH = 21 * HTHRESH = 4 * WTHRESH = 8 */ rxdctl &= ~0x3FFFFF; rxdctl |= 0x080420; } rxdctl |= IXGBE_RXDCTL_ENABLE; IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), rxdctl); for (j = 0; j < 10; j++) { if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me)) & IXGBE_RXDCTL_ENABLE) break; else msec_delay(1); } wmb(); } /* Enable Receive engine */ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); if (hw->mac.type == ixgbe_mac_82598EB) rxctrl |= IXGBE_RXCTRL_DMBYPS; rxctrl |= IXGBE_RXCTRL_RXEN; ixgbe_enable_rx_dma(hw, rxctrl); /* Set up MSI/MSI-X routing */ if (ixgbe_enable_msix) { ixgbe_configure_ivars(adapter); /* Set up auto-mask */ if (hw->mac.type == ixgbe_mac_82598EB) IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); else { IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); } } else { /* Simple settings for Legacy/MSI */ ixgbe_set_ivar(adapter, 0, 0, 0); ixgbe_set_ivar(adapter, 0, 0, 1); IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); } ixgbe_init_fdir(adapter); /* * Check on any SFP devices that * need to be kick-started */ if (hw->phy.type == ixgbe_phy_none) { err = hw->phy.ops.identify(hw); if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { device_printf(dev, "Unsupported SFP+ module type was detected.\n"); return; } } /* Set moderation on the Link interrupt */ IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->vector), IXGBE_LINK_ITR); /* Enable power to the phy. */ - ixgbe_set_phy_power(hw, TRUE); + ixgbe_set_phy_power(hw, true); /* Config/Enable Link */ ixgbe_config_link(ctx); /* Hardware Packet Buffer & Flow Control setup */ ixgbe_config_delay_values(adapter); /* Initialize the FC settings */ ixgbe_start_hw(hw); /* Set up VLAN support and filter */ ixgbe_setup_vlan_hw_support(ctx); /* Setup DMA Coalescing */ ixgbe_config_dmac(adapter); /* And now turn on interrupts */ ixgbe_if_enable_intr(ctx); /* Enable the use of the MBX by the VF's */ if (adapter->feat_en & IXGBE_FEATURE_SRIOV) { ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); } } /* ixgbe_init_locked */ /************************************************************************ * ixgbe_set_ivar * * Setup the correct IVAR register for a particular MSI-X interrupt * (yes this is all very magic and confusing :) * - entry is the register array entry * - vector is the MSI-X vector for this queue * - type is RX/TX/MISC ************************************************************************/ static void ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type) { struct ixgbe_hw *hw = &adapter->hw; u32 ivar, index; vector |= IXGBE_IVAR_ALLOC_VAL; switch (hw->mac.type) { case ixgbe_mac_82598EB: if (type == -1) entry = IXGBE_IVAR_OTHER_CAUSES_INDEX; else entry += (type * 64); index = (entry >> 2) & 0x1F; ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); ivar &= ~(0xFF << (8 * (entry & 0x3))); ivar |= (vector << (8 * (entry & 0x3))); IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_X550EM_a: if (type == -1) { /* MISC IVAR */ index = (entry & 1) * 8; ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); ivar &= ~(0xFF << index); ivar |= (vector << index); IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); } else { /* RX/TX IVARS */ index = (16 * (entry & 1)) + (8 * type); ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1)); ivar &= ~(0xFF << index); ivar |= (vector << index); IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar); } default: break; } } /* ixgbe_set_ivar */ /************************************************************************ * ixgbe_configure_ivars ************************************************************************/ static void ixgbe_configure_ivars(struct adapter *adapter) { struct ix_rx_queue *rx_que = adapter->rx_queues; struct ix_tx_queue *tx_que = adapter->tx_queues; u32 newitr; if (ixgbe_max_interrupt_rate > 0) newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8; else { /* * Disable DMA coalescing if interrupt moderation is * disabled. */ adapter->dmac = 0; newitr = 0; } for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) { struct rx_ring *rxr = &rx_que->rxr; /* First the RX queue entry */ ixgbe_set_ivar(adapter, rxr->me, rx_que->msix, 0); /* Set an Initial EITR value */ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(rx_que->msix), newitr); } for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) { struct tx_ring *txr = &tx_que->txr; /* ... and the TX */ ixgbe_set_ivar(adapter, txr->me, tx_que->msix, 1); } /* For the Link interrupt */ ixgbe_set_ivar(adapter, 1, adapter->vector, -1); } /* ixgbe_configure_ivars */ /************************************************************************ * ixgbe_config_gpie ************************************************************************/ static void ixgbe_config_gpie(struct adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 gpie; gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); if (adapter->intr_type == IFLIB_INTR_MSIX) { /* Enable Enhanced MSI-X mode */ gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD; } /* Fan Failure Interrupt */ if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) gpie |= IXGBE_SDP1_GPIEN; /* Thermal Sensor Interrupt */ if (adapter->feat_en & IXGBE_FEATURE_TEMP_SENSOR) gpie |= IXGBE_SDP0_GPIEN_X540; /* Link detection */ switch (hw->mac.type) { case ixgbe_mac_82599EB: gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN; break; case ixgbe_mac_X550EM_x: case ixgbe_mac_X550EM_a: gpie |= IXGBE_SDP0_GPIEN_X540; break; default: break; } IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); } /* ixgbe_config_gpie */ /************************************************************************ * ixgbe_config_delay_values * * Requires adapter->max_frame_size to be set. ************************************************************************/ static void ixgbe_config_delay_values(struct adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 rxpb, frame, size, tmp; frame = adapter->max_frame_size; /* Calculate High Water */ switch (hw->mac.type) { case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_X550EM_a: tmp = IXGBE_DV_X540(frame, frame); break; default: tmp = IXGBE_DV(frame, frame); break; } size = IXGBE_BT2KB(tmp); rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10; hw->fc.high_water[0] = rxpb - size; /* Now calculate Low Water */ switch (hw->mac.type) { case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_X550EM_a: tmp = IXGBE_LOW_DV_X540(frame); break; default: tmp = IXGBE_LOW_DV(frame); break; } hw->fc.low_water[0] = IXGBE_BT2KB(tmp); hw->fc.pause_time = IXGBE_FC_PAUSE; - hw->fc.send_xon = TRUE; + hw->fc.send_xon = true; } /* ixgbe_config_delay_values */ /************************************************************************ * ixgbe_set_multi - Multicast Update * * Called whenever multicast address list is updated. ************************************************************************/ static u_int ixgbe_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int idx) { struct adapter *adapter = arg; struct ixgbe_mc_addr *mta = adapter->mta; if (idx == MAX_NUM_MULTICAST_ADDRESSES) return (0); bcopy(LLADDR(sdl), mta[idx].addr, IXGBE_ETH_LENGTH_OF_ADDRESS); mta[idx].vmdq = adapter->pool; return (1); } /* ixgbe_mc_filter_apply */ static void ixgbe_if_multi_set(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); struct ixgbe_mc_addr *mta; struct ifnet *ifp = iflib_get_ifp(ctx); u8 *update_ptr; u32 fctrl; u_int mcnt; IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin"); mta = adapter->mta; bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES); mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixgbe_mc_filter_apply, adapter); fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); if (ifp->if_flags & IFF_PROMISC) fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES || ifp->if_flags & IFF_ALLMULTI) { fctrl |= IXGBE_FCTRL_MPE; fctrl &= ~IXGBE_FCTRL_UPE; } else fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl); if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) { update_ptr = (u8 *)mta; ixgbe_update_mc_addr_list(&adapter->hw, update_ptr, mcnt, - ixgbe_mc_array_itr, TRUE); + ixgbe_mc_array_itr, true); } } /* ixgbe_if_multi_set */ /************************************************************************ * ixgbe_mc_array_itr * * An iterator function needed by the multicast shared code. * It feeds the shared code routine the addresses in the * array of ixgbe_set_multi() one by one. ************************************************************************/ static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) { struct ixgbe_mc_addr *mta; mta = (struct ixgbe_mc_addr *)*update_ptr; *vmdq = mta->vmdq; *update_ptr = (u8*)(mta + 1); return (mta->addr); } /* ixgbe_mc_array_itr */ /************************************************************************ * ixgbe_local_timer - Timer routine * * Checks for link status, updates statistics, * and runs the watchdog check. ************************************************************************/ static void ixgbe_if_timer(if_ctx_t ctx, uint16_t qid) { struct adapter *adapter = iflib_get_softc(ctx); if (qid != 0) return; /* Check for pluggable optics */ if (adapter->sfp_probe) if (!ixgbe_sfp_probe(ctx)) return; /* Nothing to do */ ixgbe_check_link(&adapter->hw, &adapter->link_speed, &adapter->link_up, 0); /* Fire off the adminq task */ iflib_admin_intr_deferred(ctx); } /* ixgbe_if_timer */ /************************************************************************ * ixgbe_sfp_probe * * Determine if a port had optics inserted. ************************************************************************/ static bool ixgbe_sfp_probe(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); struct ixgbe_hw *hw = &adapter->hw; device_t dev = iflib_get_dev(ctx); - bool result = FALSE; + bool result = false; if ((hw->phy.type == ixgbe_phy_nl) && (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) { s32 ret = hw->phy.ops.identify_sfp(hw); if (ret) goto out; ret = hw->phy.ops.reset(hw); - adapter->sfp_probe = FALSE; + adapter->sfp_probe = false; if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) { device_printf(dev, "Unsupported SFP+ module detected!"); device_printf(dev, "Reload driver with supported module.\n"); goto out; } else device_printf(dev, "SFP+ module detected!\n"); /* We now have supported optics */ - result = TRUE; + result = true; } out: return (result); } /* ixgbe_sfp_probe */ /************************************************************************ * ixgbe_handle_mod - Tasklet for SFP module interrupts ************************************************************************/ static void ixgbe_handle_mod(void *context) { if_ctx_t ctx = context; struct adapter *adapter = iflib_get_softc(ctx); struct ixgbe_hw *hw = &adapter->hw; device_t dev = iflib_get_dev(ctx); u32 err, cage_full = 0; if (adapter->hw.need_crosstalk_fix) { switch (hw->mac.type) { case ixgbe_mac_82599EB: cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & IXGBE_ESDP_SDP2; break; case ixgbe_mac_X550EM_x: case ixgbe_mac_X550EM_a: cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & IXGBE_ESDP_SDP0; break; default: break; } if (!cage_full) goto handle_mod_out; } err = hw->phy.ops.identify_sfp(hw); if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { device_printf(dev, "Unsupported SFP+ module type was detected.\n"); goto handle_mod_out; } if (hw->mac.type == ixgbe_mac_82598EB) err = hw->phy.ops.reset(hw); else err = hw->mac.ops.setup_sfp(hw); if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { device_printf(dev, "Setup failure - unsupported SFP+ module type.\n"); goto handle_mod_out; } adapter->task_requests |= IXGBE_REQUEST_TASK_MSF; return; handle_mod_out: adapter->task_requests &= ~(IXGBE_REQUEST_TASK_MSF); } /* ixgbe_handle_mod */ /************************************************************************ * ixgbe_handle_msf - Tasklet for MSF (multispeed fiber) interrupts ************************************************************************/ static void ixgbe_handle_msf(void *context) { if_ctx_t ctx = context; struct adapter *adapter = iflib_get_softc(ctx); struct ixgbe_hw *hw = &adapter->hw; u32 autoneg; bool negotiate; /* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */ adapter->phy_layer = ixgbe_get_supported_physical_layer(hw); autoneg = hw->phy.autoneg_advertised; if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate); if (hw->mac.ops.setup_link) - hw->mac.ops.setup_link(hw, autoneg, TRUE); + hw->mac.ops.setup_link(hw, autoneg, true); /* Adjust media types shown in ifconfig */ ifmedia_removeall(adapter->media); ixgbe_add_media_types(adapter->ctx); ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO); } /* ixgbe_handle_msf */ /************************************************************************ * ixgbe_handle_phy - Tasklet for external PHY interrupts ************************************************************************/ static void ixgbe_handle_phy(void *context) { if_ctx_t ctx = context; struct adapter *adapter = iflib_get_softc(ctx); struct ixgbe_hw *hw = &adapter->hw; int error; error = hw->phy.ops.handle_lasi(hw); if (error == IXGBE_ERR_OVERTEMP) device_printf(adapter->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!! PHY will downshift to lower power state!\n"); else if (error) device_printf(adapter->dev, "Error handling LASI interrupt: %d\n", error); } /* ixgbe_handle_phy */ /************************************************************************ * ixgbe_if_stop - Stop the hardware * * Disables all traffic on the adapter by issuing a * global reset on the MAC and deallocates TX/RX buffers. ************************************************************************/ static void ixgbe_if_stop(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); struct ixgbe_hw *hw = &adapter->hw; INIT_DEBUGOUT("ixgbe_if_stop: begin\n"); ixgbe_reset_hw(hw); - hw->adapter_stopped = FALSE; + hw->adapter_stopped = false; ixgbe_stop_adapter(hw); if (hw->mac.type == ixgbe_mac_82599EB) ixgbe_stop_mac_link_on_d3_82599(hw); /* Turn off the laser - noop with no optics */ ixgbe_disable_tx_laser(hw); /* Update the stack */ - adapter->link_up = FALSE; + adapter->link_up = false; ixgbe_if_update_admin_status(ctx); /* reprogram the RAR[0] in case user changed it. */ ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV); return; } /* ixgbe_if_stop */ /************************************************************************ * ixgbe_update_link_status - Update OS on link state * * Note: Only updates the OS on the cached link state. * The real check of the hardware only happens with * a link interrupt. ************************************************************************/ static void ixgbe_if_update_admin_status(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); device_t dev = iflib_get_dev(ctx); if (adapter->link_up) { - if (adapter->link_active == FALSE) { + if (adapter->link_active == false) { if (bootverbose) device_printf(dev, "Link is up %d Gbps %s \n", ((adapter->link_speed == 128) ? 10 : 1), "Full Duplex"); - adapter->link_active = TRUE; + adapter->link_active = true; /* Update any Flow Control changes */ ixgbe_fc_enable(&adapter->hw); /* Update DMA coalescing config */ ixgbe_config_dmac(adapter); /* should actually be negotiated value */ iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10)); if (adapter->feat_en & IXGBE_FEATURE_SRIOV) ixgbe_ping_all_vfs(adapter); } } else { /* Link down */ - if (adapter->link_active == TRUE) { + if (adapter->link_active == true) { if (bootverbose) device_printf(dev, "Link is Down\n"); iflib_link_state_change(ctx, LINK_STATE_DOWN, 0); - adapter->link_active = FALSE; + adapter->link_active = false; if (adapter->feat_en & IXGBE_FEATURE_SRIOV) ixgbe_ping_all_vfs(adapter); } } /* Handle task requests from msix_link() */ if (adapter->task_requests & IXGBE_REQUEST_TASK_MOD) ixgbe_handle_mod(ctx); if (adapter->task_requests & IXGBE_REQUEST_TASK_MSF) ixgbe_handle_msf(ctx); if (adapter->task_requests & IXGBE_REQUEST_TASK_MBX) ixgbe_handle_mbx(ctx); if (adapter->task_requests & IXGBE_REQUEST_TASK_FDIR) ixgbe_reinit_fdir(ctx); if (adapter->task_requests & IXGBE_REQUEST_TASK_PHY) ixgbe_handle_phy(ctx); adapter->task_requests = 0; ixgbe_update_stats_counters(adapter); } /* ixgbe_if_update_admin_status */ /************************************************************************ * ixgbe_config_dmac - Configure DMA Coalescing ************************************************************************/ static void ixgbe_config_dmac(struct adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config; if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config) return; if (dcfg->watchdog_timer ^ adapter->dmac || dcfg->link_speed ^ adapter->link_speed) { dcfg->watchdog_timer = adapter->dmac; - dcfg->fcoe_en = FALSE; + dcfg->fcoe_en = false; dcfg->link_speed = adapter->link_speed; dcfg->num_tcs = 1; INIT_DEBUGOUT2("dmac settings: watchdog %d, link speed %d\n", dcfg->watchdog_timer, dcfg->link_speed); hw->mac.ops.dmac_config(hw); } } /* ixgbe_config_dmac */ /************************************************************************ * ixgbe_if_enable_intr ************************************************************************/ void ixgbe_if_enable_intr(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); struct ixgbe_hw *hw = &adapter->hw; struct ix_rx_queue *que = adapter->rx_queues; u32 mask, fwsm; mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: mask |= IXGBE_EIMS_ECC; /* Temperature sensor on some adapters */ mask |= IXGBE_EIMS_GPI_SDP0; /* SFP+ (RX_LOS_N & MOD_ABS_N) */ mask |= IXGBE_EIMS_GPI_SDP1; mask |= IXGBE_EIMS_GPI_SDP2; break; case ixgbe_mac_X540: /* Detect if Thermal Sensor is enabled */ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); if (fwsm & IXGBE_FWSM_TS_ENABLED) mask |= IXGBE_EIMS_TS; mask |= IXGBE_EIMS_ECC; break; case ixgbe_mac_X550: /* MAC thermal sensor is automatically enabled */ mask |= IXGBE_EIMS_TS; mask |= IXGBE_EIMS_ECC; break; case ixgbe_mac_X550EM_x: case ixgbe_mac_X550EM_a: /* Some devices use SDP0 for important information */ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP || hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP || hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N || hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(hw); if (hw->phy.type == ixgbe_phy_x550em_ext_t) mask |= IXGBE_EICR_GPI_SDP0_X540; mask |= IXGBE_EIMS_ECC; break; default: break; } /* Enable Fan Failure detection */ if (adapter->feat_en & IXGBE_FEATURE_FAN_FAIL) mask |= IXGBE_EIMS_GPI_SDP1; /* Enable SR-IOV */ if (adapter->feat_en & IXGBE_FEATURE_SRIOV) mask |= IXGBE_EIMS_MAILBOX; /* Enable Flow Director */ if (adapter->feat_en & IXGBE_FEATURE_FDIR) mask |= IXGBE_EIMS_FLOW_DIR; IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); /* With MSI-X we use auto clear */ if (adapter->intr_type == IFLIB_INTR_MSIX) { mask = IXGBE_EIMS_ENABLE_MASK; /* Don't autoclear Link */ mask &= ~IXGBE_EIMS_OTHER; mask &= ~IXGBE_EIMS_LSC; if (adapter->feat_cap & IXGBE_FEATURE_SRIOV) mask &= ~IXGBE_EIMS_MAILBOX; IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); } /* * Now enable all queues, this is done separately to * allow for handling the extended (beyond 32) MSI-X * vectors that can be used by 82599 */ for (int i = 0; i < adapter->num_rx_queues; i++, que++) ixgbe_enable_queue(adapter, que->msix); IXGBE_WRITE_FLUSH(hw); } /* ixgbe_if_enable_intr */ /************************************************************************ * ixgbe_disable_intr ************************************************************************/ static void ixgbe_if_disable_intr(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); if (adapter->intr_type == IFLIB_INTR_MSIX) IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0); if (adapter->hw.mac.type == ixgbe_mac_82598EB) { IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); } else { IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); } IXGBE_WRITE_FLUSH(&adapter->hw); } /* ixgbe_if_disable_intr */ /************************************************************************ * ixgbe_link_intr_enable ************************************************************************/ static void ixgbe_link_intr_enable(if_ctx_t ctx) { struct ixgbe_hw *hw = &((struct adapter *)iflib_get_softc(ctx))->hw; /* Re-enable other interrupts */ IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); } /* ixgbe_link_intr_enable */ /************************************************************************ * ixgbe_if_rx_queue_intr_enable ************************************************************************/ static int ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) { struct adapter *adapter = iflib_get_softc(ctx); struct ix_rx_queue *que = &adapter->rx_queues[rxqid]; ixgbe_enable_queue(adapter, que->msix); return (0); } /* ixgbe_if_rx_queue_intr_enable */ /************************************************************************ * ixgbe_enable_queue ************************************************************************/ static void ixgbe_enable_queue(struct adapter *adapter, u32 vector) { struct ixgbe_hw *hw = &adapter->hw; u64 queue = 1ULL << vector; u32 mask; if (hw->mac.type == ixgbe_mac_82598EB) { mask = (IXGBE_EIMS_RTX_QUEUE & queue); IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); } else { mask = (queue & 0xFFFFFFFF); if (mask) IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); mask = (queue >> 32); if (mask) IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); } } /* ixgbe_enable_queue */ /************************************************************************ * ixgbe_disable_queue ************************************************************************/ static void ixgbe_disable_queue(struct adapter *adapter, u32 vector) { struct ixgbe_hw *hw = &adapter->hw; u64 queue = 1ULL << vector; u32 mask; if (hw->mac.type == ixgbe_mac_82598EB) { mask = (IXGBE_EIMS_RTX_QUEUE & queue); IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); } else { mask = (queue & 0xFFFFFFFF); if (mask) IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); mask = (queue >> 32); if (mask) IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); } } /* ixgbe_disable_queue */ /************************************************************************ * ixgbe_intr - Legacy Interrupt Service Routine ************************************************************************/ int ixgbe_intr(void *arg) { struct adapter *adapter = arg; struct ix_rx_queue *que = adapter->rx_queues; struct ixgbe_hw *hw = &adapter->hw; if_ctx_t ctx = adapter->ctx; u32 eicr, eicr_mask; eicr = IXGBE_READ_REG(hw, IXGBE_EICR); ++que->irqs; if (eicr == 0) { ixgbe_if_enable_intr(ctx); return (FILTER_HANDLED); } /* Check for fan failure */ if ((hw->device_id == IXGBE_DEV_ID_82598AT) && (eicr & IXGBE_EICR_GPI_SDP1)) { device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n"); IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); } /* Link status change */ if (eicr & IXGBE_EICR_LSC) { IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); iflib_admin_intr_deferred(ctx); } if (ixgbe_is_sfp(hw)) { /* Pluggable optics-related interrupt */ if (hw->mac.type >= ixgbe_mac_X540) eicr_mask = IXGBE_EICR_GPI_SDP0_X540; else eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); if (eicr & eicr_mask) { IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); adapter->task_requests |= IXGBE_REQUEST_TASK_MOD; } if ((hw->mac.type == ixgbe_mac_82599EB) && (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); adapter->task_requests |= IXGBE_REQUEST_TASK_MSF; } } /* External PHY interrupt */ if ((hw->phy.type == ixgbe_phy_x550em_ext_t) && (eicr & IXGBE_EICR_GPI_SDP0_X540)) adapter->task_requests |= IXGBE_REQUEST_TASK_PHY; return (FILTER_SCHEDULE_THREAD); } /* ixgbe_intr */ /************************************************************************ * ixgbe_free_pci_resources ************************************************************************/ static void ixgbe_free_pci_resources(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); struct ix_rx_queue *que = adapter->rx_queues; device_t dev = iflib_get_dev(ctx); /* Release all MSI-X queue resources */ if (adapter->intr_type == IFLIB_INTR_MSIX) iflib_irq_free(ctx, &adapter->irq); if (que != NULL) { for (int i = 0; i < adapter->num_rx_queues; i++, que++) { iflib_irq_free(ctx, &que->que_irq); } } if (adapter->pci_mem != NULL) bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(adapter->pci_mem), adapter->pci_mem); } /* ixgbe_free_pci_resources */ /************************************************************************ * ixgbe_sysctl_flowcntl * * SYSCTL wrapper around setting Flow Control ************************************************************************/ static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS) { struct adapter *adapter; int error, fc; adapter = (struct adapter *)arg1; fc = adapter->hw.fc.current_mode; error = sysctl_handle_int(oidp, &fc, 0, req); if ((error) || (req->newptr == NULL)) return (error); /* Don't bother if it's not changed */ if (fc == adapter->hw.fc.current_mode) return (0); return ixgbe_set_flowcntl(adapter, fc); } /* ixgbe_sysctl_flowcntl */ /************************************************************************ * ixgbe_set_flowcntl - Set flow control * * Flow control values: * 0 - off * 1 - rx pause * 2 - tx pause * 3 - full ************************************************************************/ static int ixgbe_set_flowcntl(struct adapter *adapter, int fc) { switch (fc) { case ixgbe_fc_rx_pause: case ixgbe_fc_tx_pause: case ixgbe_fc_full: adapter->hw.fc.requested_mode = fc; if (adapter->num_rx_queues > 1) ixgbe_disable_rx_drop(adapter); break; case ixgbe_fc_none: adapter->hw.fc.requested_mode = ixgbe_fc_none; if (adapter->num_rx_queues > 1) ixgbe_enable_rx_drop(adapter); break; default: return (EINVAL); } /* Don't autoneg if forcing a value */ - adapter->hw.fc.disable_fc_autoneg = TRUE; + adapter->hw.fc.disable_fc_autoneg = true; ixgbe_fc_enable(&adapter->hw); return (0); } /* ixgbe_set_flowcntl */ /************************************************************************ * ixgbe_enable_rx_drop * * Enable the hardware to drop packets when the buffer is * full. This is useful with multiqueue, so that no single * queue being full stalls the entire RX engine. We only * enable this when Multiqueue is enabled AND Flow Control * is disabled. ************************************************************************/ static void ixgbe_enable_rx_drop(struct adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct rx_ring *rxr; u32 srrctl; for (int i = 0; i < adapter->num_rx_queues; i++) { rxr = &adapter->rx_queues[i].rxr; srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); srrctl |= IXGBE_SRRCTL_DROP_EN; IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); } /* enable drop for each vf */ for (int i = 0; i < adapter->num_vfs; i++) { IXGBE_WRITE_REG(hw, IXGBE_QDE, (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) | IXGBE_QDE_ENABLE)); } } /* ixgbe_enable_rx_drop */ /************************************************************************ * ixgbe_disable_rx_drop ************************************************************************/ static void ixgbe_disable_rx_drop(struct adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct rx_ring *rxr; u32 srrctl; for (int i = 0; i < adapter->num_rx_queues; i++) { rxr = &adapter->rx_queues[i].rxr; srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(rxr->me)); srrctl &= ~IXGBE_SRRCTL_DROP_EN; IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxr->me), srrctl); } /* disable drop for each vf */ for (int i = 0; i < adapter->num_vfs; i++) { IXGBE_WRITE_REG(hw, IXGBE_QDE, (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT))); } } /* ixgbe_disable_rx_drop */ /************************************************************************ * ixgbe_sysctl_advertise * * SYSCTL wrapper around setting advertised speed ************************************************************************/ static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS) { struct adapter *adapter; int error, advertise; adapter = (struct adapter *)arg1; advertise = adapter->advertise; error = sysctl_handle_int(oidp, &advertise, 0, req); if ((error) || (req->newptr == NULL)) return (error); return ixgbe_set_advertise(adapter, advertise); } /* ixgbe_sysctl_advertise */ /************************************************************************ * ixgbe_set_advertise - Control advertised link speed * * Flags: * 0x1 - advertise 100 Mb * 0x2 - advertise 1G * 0x4 - advertise 10G * 0x8 - advertise 10 Mb (yes, Mb) ************************************************************************/ static int ixgbe_set_advertise(struct adapter *adapter, int advertise) { device_t dev = iflib_get_dev(adapter->ctx); struct ixgbe_hw *hw; ixgbe_link_speed speed = 0; ixgbe_link_speed link_caps = 0; s32 err = IXGBE_NOT_IMPLEMENTED; - bool negotiate = FALSE; + bool negotiate = false; /* Checks to validate new value */ if (adapter->advertise == advertise) /* no change */ return (0); hw = &adapter->hw; /* No speed changes for backplane media */ if (hw->phy.media_type == ixgbe_media_type_backplane) return (ENODEV); if (!((hw->phy.media_type == ixgbe_media_type_copper) || (hw->phy.multispeed_fiber))) { device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n"); return (EINVAL); } if (advertise < 0x1 || advertise > 0xF) { device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0xF\n"); return (EINVAL); } if (hw->mac.ops.get_link_capabilities) { err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate); if (err != IXGBE_SUCCESS) { device_printf(dev, "Unable to determine supported advertise speeds\n"); return (ENODEV); } } /* Set new value and report new advertised mode */ if (advertise & 0x1) { if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) { device_printf(dev, "Interface does not support 100Mb advertised speed\n"); return (EINVAL); } speed |= IXGBE_LINK_SPEED_100_FULL; } if (advertise & 0x2) { if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) { device_printf(dev, "Interface does not support 1Gb advertised speed\n"); return (EINVAL); } speed |= IXGBE_LINK_SPEED_1GB_FULL; } if (advertise & 0x4) { if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) { device_printf(dev, "Interface does not support 10Gb advertised speed\n"); return (EINVAL); } speed |= IXGBE_LINK_SPEED_10GB_FULL; } if (advertise & 0x8) { if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) { device_printf(dev, "Interface does not support 10Mb advertised speed\n"); return (EINVAL); } speed |= IXGBE_LINK_SPEED_10_FULL; } - hw->mac.autotry_restart = TRUE; - hw->mac.ops.setup_link(hw, speed, TRUE); + hw->mac.autotry_restart = true; + hw->mac.ops.setup_link(hw, speed, true); adapter->advertise = advertise; return (0); } /* ixgbe_set_advertise */ /************************************************************************ * ixgbe_get_advertise - Get current advertised speed settings * * Formatted for sysctl usage. * Flags: * 0x1 - advertise 100 Mb * 0x2 - advertise 1G * 0x4 - advertise 10G * 0x8 - advertise 10 Mb (yes, Mb) ************************************************************************/ static int ixgbe_get_advertise(struct adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int speed; ixgbe_link_speed link_caps = 0; s32 err; - bool negotiate = FALSE; + bool negotiate = false; /* * Advertised speed means nothing unless it's copper or * multi-speed fiber */ if (!(hw->phy.media_type == ixgbe_media_type_copper) && !(hw->phy.multispeed_fiber)) return (0); err = hw->mac.ops.get_link_capabilities(hw, &link_caps, &negotiate); if (err != IXGBE_SUCCESS) return (0); speed = ((link_caps & IXGBE_LINK_SPEED_10GB_FULL) ? 4 : 0) | ((link_caps & IXGBE_LINK_SPEED_1GB_FULL) ? 2 : 0) | ((link_caps & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0) | ((link_caps & IXGBE_LINK_SPEED_10_FULL) ? 8 : 0); return speed; } /* ixgbe_get_advertise */ /************************************************************************ * ixgbe_sysctl_dmac - Manage DMA Coalescing * * Control values: * 0/1 - off / on (use default value of 1000) * * Legal timer values are: * 50,100,250,500,1000,2000,5000,10000 * * Turning off interrupt moderation will also turn this off. ************************************************************************/ static int ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS) { struct adapter *adapter = (struct adapter *)arg1; struct ifnet *ifp = iflib_get_ifp(adapter->ctx); int error; u16 newval; newval = adapter->dmac; error = sysctl_handle_16(oidp, &newval, 0, req); if ((error) || (req->newptr == NULL)) return (error); switch (newval) { case 0: /* Disabled */ adapter->dmac = 0; break; case 1: /* Enable and use default */ adapter->dmac = 1000; break; case 50: case 100: case 250: case 500: case 1000: case 2000: case 5000: case 10000: /* Legal values - allow */ adapter->dmac = newval; break; default: /* Do nothing, illegal value */ return (EINVAL); } /* Re-initialize hardware if it's already running */ if (ifp->if_drv_flags & IFF_DRV_RUNNING) ifp->if_init(ifp); return (0); } /* ixgbe_sysctl_dmac */ #ifdef IXGBE_DEBUG /************************************************************************ * ixgbe_sysctl_power_state * * Sysctl to test power states * Values: * 0 - set device to D0 * 3 - set device to D3 * (none) - get current device power state ************************************************************************/ static int ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS) { struct adapter *adapter = (struct adapter *)arg1; device_t dev = adapter->dev; int curr_ps, new_ps, error = 0; curr_ps = new_ps = pci_get_powerstate(dev); error = sysctl_handle_int(oidp, &new_ps, 0, req); if ((error) || (req->newptr == NULL)) return (error); if (new_ps == curr_ps) return (0); if (new_ps == 3 && curr_ps == 0) error = DEVICE_SUSPEND(dev); else if (new_ps == 0 && curr_ps == 3) error = DEVICE_RESUME(dev); else return (EINVAL); device_printf(dev, "New state: %d\n", pci_get_powerstate(dev)); return (error); } /* ixgbe_sysctl_power_state */ #endif /************************************************************************ * ixgbe_sysctl_wol_enable * * Sysctl to enable/disable the WoL capability, * if supported by the adapter. * * Values: * 0 - disabled * 1 - enabled ************************************************************************/ static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS) { struct adapter *adapter = (struct adapter *)arg1; struct ixgbe_hw *hw = &adapter->hw; int new_wol_enabled; int error = 0; new_wol_enabled = hw->wol_enabled; error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req); if ((error) || (req->newptr == NULL)) return (error); new_wol_enabled = !!(new_wol_enabled); if (new_wol_enabled == hw->wol_enabled) return (0); if (new_wol_enabled > 0 && !adapter->wol_support) return (ENODEV); else hw->wol_enabled = new_wol_enabled; return (0); } /* ixgbe_sysctl_wol_enable */ /************************************************************************ * ixgbe_sysctl_wufc - Wake Up Filter Control * * Sysctl to enable/disable the types of packets that the * adapter will wake up on upon receipt. * Flags: * 0x1 - Link Status Change * 0x2 - Magic Packet * 0x4 - Direct Exact * 0x8 - Directed Multicast * 0x10 - Broadcast * 0x20 - ARP/IPv4 Request Packet * 0x40 - Direct IPv4 Packet * 0x80 - Direct IPv6 Packet * * Settings not listed above will cause the sysctl to return an error. ************************************************************************/ static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS) { struct adapter *adapter = (struct adapter *)arg1; int error = 0; u32 new_wufc; new_wufc = adapter->wufc; error = sysctl_handle_32(oidp, &new_wufc, 0, req); if ((error) || (req->newptr == NULL)) return (error); if (new_wufc == adapter->wufc) return (0); if (new_wufc & 0xffffff00) return (EINVAL); new_wufc &= 0xff; new_wufc |= (0xffffff & adapter->wufc); adapter->wufc = new_wufc; return (0); } /* ixgbe_sysctl_wufc */ #ifdef IXGBE_DEBUG /************************************************************************ * ixgbe_sysctl_print_rss_config ************************************************************************/ static int ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS) { struct adapter *adapter = (struct adapter *)arg1; struct ixgbe_hw *hw = &adapter->hw; device_t dev = adapter->dev; struct sbuf *buf; int error = 0, reta_size; u32 reg; buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (!buf) { device_printf(dev, "Could not allocate sbuf for output.\n"); return (ENOMEM); } // TODO: use sbufs to make a string to print out /* Set multiplier for RETA setup and table size based on MAC */ switch (adapter->hw.mac.type) { case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_X550EM_a: reta_size = 128; break; default: reta_size = 32; break; } /* Print out the redirection table */ sbuf_cat(buf, "\n"); for (int i = 0; i < reta_size; i++) { if (i < 32) { reg = IXGBE_READ_REG(hw, IXGBE_RETA(i)); sbuf_printf(buf, "RETA(%2d): 0x%08x\n", i, reg); } else { reg = IXGBE_READ_REG(hw, IXGBE_ERETA(i - 32)); sbuf_printf(buf, "ERETA(%2d): 0x%08x\n", i - 32, reg); } } // TODO: print more config error = sbuf_finish(buf); if (error) device_printf(dev, "Error finishing sbuf: %d\n", error); sbuf_delete(buf); return (0); } /* ixgbe_sysctl_print_rss_config */ #endif /* IXGBE_DEBUG */ /************************************************************************ * ixgbe_sysctl_phy_temp - Retrieve temperature of PHY * * For X552/X557-AT devices using an external PHY ************************************************************************/ static int ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS) { struct adapter *adapter = (struct adapter *)arg1; struct ixgbe_hw *hw = &adapter->hw; u16 reg; if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { device_printf(iflib_get_dev(adapter->ctx), "Device has no supported external thermal sensor.\n"); return (ENODEV); } if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP, IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) { device_printf(iflib_get_dev(adapter->ctx), "Error reading from PHY's current temperature register\n"); return (EAGAIN); } /* Shift temp for output */ reg = reg >> 8; return (sysctl_handle_16(oidp, NULL, reg, req)); } /* ixgbe_sysctl_phy_temp */ /************************************************************************ * ixgbe_sysctl_phy_overtemp_occurred * * Reports (directly from the PHY) whether the current PHY * temperature is over the overtemp threshold. ************************************************************************/ static int ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS) { struct adapter *adapter = (struct adapter *)arg1; struct ixgbe_hw *hw = &adapter->hw; u16 reg; if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) { device_printf(iflib_get_dev(adapter->ctx), "Device has no supported external thermal sensor.\n"); return (ENODEV); } if (hw->phy.ops.read_reg(hw, IXGBE_PHY_OVERTEMP_STATUS, IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®)) { device_printf(iflib_get_dev(adapter->ctx), "Error reading from PHY's temperature status register\n"); return (EAGAIN); } /* Get occurrence bit */ reg = !!(reg & 0x4000); return (sysctl_handle_16(oidp, 0, reg, req)); } /* ixgbe_sysctl_phy_overtemp_occurred */ /************************************************************************ * ixgbe_sysctl_eee_state * * Sysctl to set EEE power saving feature * Values: * 0 - disable EEE * 1 - enable EEE * (none) - get current device EEE state ************************************************************************/ static int ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS) { struct adapter *adapter = (struct adapter *)arg1; device_t dev = adapter->dev; struct ifnet *ifp = iflib_get_ifp(adapter->ctx); int curr_eee, new_eee, error = 0; s32 retval; curr_eee = new_eee = !!(adapter->feat_en & IXGBE_FEATURE_EEE); error = sysctl_handle_int(oidp, &new_eee, 0, req); if ((error) || (req->newptr == NULL)) return (error); /* Nothing to do */ if (new_eee == curr_eee) return (0); /* Not supported */ if (!(adapter->feat_cap & IXGBE_FEATURE_EEE)) return (EINVAL); /* Bounds checking */ if ((new_eee < 0) || (new_eee > 1)) return (EINVAL); retval = ixgbe_setup_eee(&adapter->hw, new_eee); if (retval) { device_printf(dev, "Error in EEE setup: 0x%08X\n", retval); return (EINVAL); } /* Restart auto-neg */ ifp->if_init(ifp); device_printf(dev, "New EEE state: %d\n", new_eee); /* Cache new value */ if (new_eee) adapter->feat_en |= IXGBE_FEATURE_EEE; else adapter->feat_en &= ~IXGBE_FEATURE_EEE; return (error); } /* ixgbe_sysctl_eee_state */ /************************************************************************ * ixgbe_init_device_features ************************************************************************/ static void ixgbe_init_device_features(struct adapter *adapter) { adapter->feat_cap = IXGBE_FEATURE_NETMAP | IXGBE_FEATURE_RSS | IXGBE_FEATURE_MSI | IXGBE_FEATURE_MSIX | IXGBE_FEATURE_LEGACY_IRQ; /* Set capabilities first... */ switch (adapter->hw.mac.type) { case ixgbe_mac_82598EB: if (adapter->hw.device_id == IXGBE_DEV_ID_82598AT) adapter->feat_cap |= IXGBE_FEATURE_FAN_FAIL; break; case ixgbe_mac_X540: adapter->feat_cap |= IXGBE_FEATURE_SRIOV; adapter->feat_cap |= IXGBE_FEATURE_FDIR; if ((adapter->hw.device_id == IXGBE_DEV_ID_X540_BYPASS) && (adapter->hw.bus.func == 0)) adapter->feat_cap |= IXGBE_FEATURE_BYPASS; break; case ixgbe_mac_X550: adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR; adapter->feat_cap |= IXGBE_FEATURE_SRIOV; adapter->feat_cap |= IXGBE_FEATURE_FDIR; break; case ixgbe_mac_X550EM_x: adapter->feat_cap |= IXGBE_FEATURE_SRIOV; adapter->feat_cap |= IXGBE_FEATURE_FDIR; break; case ixgbe_mac_X550EM_a: adapter->feat_cap |= IXGBE_FEATURE_SRIOV; adapter->feat_cap |= IXGBE_FEATURE_FDIR; adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ; if ((adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T) || (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) { adapter->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR; adapter->feat_cap |= IXGBE_FEATURE_EEE; } break; case ixgbe_mac_82599EB: adapter->feat_cap |= IXGBE_FEATURE_SRIOV; adapter->feat_cap |= IXGBE_FEATURE_FDIR; if ((adapter->hw.device_id == IXGBE_DEV_ID_82599_BYPASS) && (adapter->hw.bus.func == 0)) adapter->feat_cap |= IXGBE_FEATURE_BYPASS; if (adapter->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) adapter->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ; break; default: break; } /* Enabled by default... */ /* Fan failure detection */ if (adapter->feat_cap & IXGBE_FEATURE_FAN_FAIL) adapter->feat_en |= IXGBE_FEATURE_FAN_FAIL; /* Netmap */ if (adapter->feat_cap & IXGBE_FEATURE_NETMAP) adapter->feat_en |= IXGBE_FEATURE_NETMAP; /* EEE */ if (adapter->feat_cap & IXGBE_FEATURE_EEE) adapter->feat_en |= IXGBE_FEATURE_EEE; /* Thermal Sensor */ if (adapter->feat_cap & IXGBE_FEATURE_TEMP_SENSOR) adapter->feat_en |= IXGBE_FEATURE_TEMP_SENSOR; /* Enabled via global sysctl... */ /* Flow Director */ if (ixgbe_enable_fdir) { if (adapter->feat_cap & IXGBE_FEATURE_FDIR) adapter->feat_en |= IXGBE_FEATURE_FDIR; else device_printf(adapter->dev, "Device does not support Flow Director. Leaving disabled."); } /* * Message Signal Interrupts - Extended (MSI-X) * Normal MSI is only enabled if MSI-X calls fail. */ if (!ixgbe_enable_msix) adapter->feat_cap &= ~IXGBE_FEATURE_MSIX; /* Receive-Side Scaling (RSS) */ if ((adapter->feat_cap & IXGBE_FEATURE_RSS) && ixgbe_enable_rss) adapter->feat_en |= IXGBE_FEATURE_RSS; /* Disable features with unmet dependencies... */ /* No MSI-X */ if (!(adapter->feat_cap & IXGBE_FEATURE_MSIX)) { adapter->feat_cap &= ~IXGBE_FEATURE_RSS; adapter->feat_cap &= ~IXGBE_FEATURE_SRIOV; adapter->feat_en &= ~IXGBE_FEATURE_RSS; adapter->feat_en &= ~IXGBE_FEATURE_SRIOV; } } /* ixgbe_init_device_features */ /************************************************************************ * ixgbe_check_fan_failure ************************************************************************/ static void ixgbe_check_fan_failure(struct adapter *adapter, u32 reg, bool in_interrupt) { u32 mask; mask = (in_interrupt) ? IXGBE_EICR_GPI_SDP1_BY_MAC(&adapter->hw) : IXGBE_ESDP_SDP1; if (reg & mask) device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n"); } /* ixgbe_check_fan_failure */ /************************************************************************ * ixgbe_sbuf_fw_version ************************************************************************/ static void ixgbe_sbuf_fw_version(struct ixgbe_hw *hw, struct sbuf *buf) { struct ixgbe_nvm_version nvm_ver = {0}; uint16_t phyfw = 0; int status; const char *space = ""; ixgbe_get_oem_prod_version(hw, &nvm_ver); /* OEM's NVM version */ ixgbe_get_orom_version(hw, &nvm_ver); /* Option ROM */ ixgbe_get_etk_id(hw, &nvm_ver); /* eTrack identifies a build in Intel's SCM */ status = ixgbe_get_phy_firmware_version(hw, &phyfw); if (nvm_ver.oem_valid) { sbuf_printf(buf, "NVM OEM V%d.%d R%d", nvm_ver.oem_major, nvm_ver.oem_minor, nvm_ver.oem_release); space = " "; } if (nvm_ver.or_valid) { sbuf_printf(buf, "%sOption ROM V%d-b%d-p%d", space, nvm_ver.or_major, nvm_ver.or_build, nvm_ver.or_patch); space = " "; } if (nvm_ver.etk_id != ((NVM_VER_INVALID << NVM_ETK_SHIFT) | NVM_VER_INVALID)) { sbuf_printf(buf, "%seTrack 0x%08x", space, nvm_ver.etk_id); space = " "; } if (phyfw != 0 && status == IXGBE_SUCCESS) sbuf_printf(buf, "%sPHY FW V%d", space, phyfw); } /* ixgbe_sbuf_fw_version */ /************************************************************************ * ixgbe_print_fw_version ************************************************************************/ static void ixgbe_print_fw_version(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); struct ixgbe_hw *hw = &adapter->hw; device_t dev = adapter->dev; struct sbuf *buf; int error = 0; buf = sbuf_new_auto(); if (!buf) { device_printf(dev, "Could not allocate sbuf for output.\n"); return; } ixgbe_sbuf_fw_version(hw, buf); error = sbuf_finish(buf); if (error) device_printf(dev, "Error finishing sbuf: %d\n", error); else if (sbuf_len(buf)) device_printf(dev, "%s\n", sbuf_data(buf)); sbuf_delete(buf); } /* ixgbe_print_fw_version */ /************************************************************************ * ixgbe_sysctl_print_fw_version ************************************************************************/ static int ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS) { struct adapter *adapter = (struct adapter *)arg1; struct ixgbe_hw *hw = &adapter->hw; device_t dev = adapter->dev; struct sbuf *buf; int error = 0; buf = sbuf_new_for_sysctl(NULL, NULL, 128, req); if (!buf) { device_printf(dev, "Could not allocate sbuf for output.\n"); return (ENOMEM); } ixgbe_sbuf_fw_version(hw, buf); error = sbuf_finish(buf); if (error) device_printf(dev, "Error finishing sbuf: %d\n", error); sbuf_delete(buf); return (0); } /* ixgbe_sysctl_print_fw_version */ diff --git a/sys/dev/ixgbe/if_ixv.c b/sys/dev/ixgbe/if_ixv.c index 301d3c0368ae..acebf0a65e12 100644 --- a/sys/dev/ixgbe/if_ixv.c +++ b/sys/dev/ixgbe/if_ixv.c @@ -1,1948 +1,1948 @@ /****************************************************************************** Copyright (c) 2001-2017, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "opt_inet.h" #include "opt_inet6.h" #include "opt_rss.h" #include "ixgbe.h" #include "ifdi_if.h" #include #include /************************************************************************ * Driver version ************************************************************************/ char ixv_driver_version[] = "2.0.1-k"; /************************************************************************ * PCI Device ID Table * * Used by probe to select devices to load on * Last field stores an index into ixv_strings * Last entry must be all 0s * * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } ************************************************************************/ static pci_vendor_info_t ixv_vendor_info_array[] = { PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, "Intel(R) X520 82599 Virtual Function"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, "Intel(R) X540 Virtual Function"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, "Intel(R) X550 Virtual Function"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, "Intel(R) X552 Virtual Function"), PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, "Intel(R) X553 Virtual Function"), /* required last entry */ PVID_END }; /************************************************************************ * Function prototypes ************************************************************************/ static void *ixv_register(device_t dev); static int ixv_if_attach_pre(if_ctx_t ctx); static int ixv_if_attach_post(if_ctx_t ctx); static int ixv_if_detach(if_ctx_t ctx); static int ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid); static int ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets); static int ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets); static void ixv_if_queues_free(if_ctx_t ctx); static void ixv_identify_hardware(if_ctx_t ctx); static void ixv_init_device_features(struct adapter *); static int ixv_allocate_pci_resources(if_ctx_t ctx); static void ixv_free_pci_resources(if_ctx_t ctx); static int ixv_setup_interface(if_ctx_t ctx); static void ixv_if_media_status(if_ctx_t , struct ifmediareq *); static int ixv_if_media_change(if_ctx_t ctx); static void ixv_if_update_admin_status(if_ctx_t ctx); static int ixv_if_msix_intr_assign(if_ctx_t ctx, int msix); static int ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu); static void ixv_if_init(if_ctx_t ctx); static void ixv_if_local_timer(if_ctx_t ctx, uint16_t qid); static void ixv_if_stop(if_ctx_t ctx); static int ixv_negotiate_api(struct adapter *); static void ixv_initialize_transmit_units(if_ctx_t ctx); static void ixv_initialize_receive_units(if_ctx_t ctx); static void ixv_initialize_rss_mapping(struct adapter *); static void ixv_setup_vlan_support(if_ctx_t ctx); static void ixv_configure_ivars(struct adapter *); static void ixv_if_enable_intr(if_ctx_t ctx); static void ixv_if_disable_intr(if_ctx_t ctx); static void ixv_if_multi_set(if_ctx_t ctx); static void ixv_if_register_vlan(if_ctx_t, u16); static void ixv_if_unregister_vlan(if_ctx_t, u16); static uint64_t ixv_if_get_counter(if_ctx_t, ift_counter); static bool ixv_if_needs_restart(if_ctx_t, enum iflib_restart_event); static void ixv_save_stats(struct adapter *); static void ixv_init_stats(struct adapter *); static void ixv_update_stats(struct adapter *); static void ixv_add_stats_sysctls(struct adapter *adapter); static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS); static void ixv_set_ivar(struct adapter *, u8, u8, s8); static u8 *ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); /* The MSI-X Interrupt handlers */ static int ixv_msix_que(void *); static int ixv_msix_mbx(void *); /************************************************************************ * FreeBSD Device Interface Entry Points ************************************************************************/ static device_method_t ixv_methods[] = { /* Device interface */ DEVMETHOD(device_register, ixv_register), DEVMETHOD(device_probe, iflib_device_probe), DEVMETHOD(device_attach, iflib_device_attach), DEVMETHOD(device_detach, iflib_device_detach), DEVMETHOD(device_shutdown, iflib_device_shutdown), DEVMETHOD_END }; static driver_t ixv_driver = { "ixv", ixv_methods, sizeof(struct adapter), }; devclass_t ixv_devclass; DRIVER_MODULE(ixv, pci, ixv_driver, ixv_devclass, 0, 0); IFLIB_PNP_INFO(pci, ixv_driver, ixv_vendor_info_array); MODULE_DEPEND(ixv, iflib, 1, 1, 1); MODULE_DEPEND(ixv, pci, 1, 1, 1); MODULE_DEPEND(ixv, ether, 1, 1, 1); static device_method_t ixv_if_methods[] = { DEVMETHOD(ifdi_attach_pre, ixv_if_attach_pre), DEVMETHOD(ifdi_attach_post, ixv_if_attach_post), DEVMETHOD(ifdi_detach, ixv_if_detach), DEVMETHOD(ifdi_init, ixv_if_init), DEVMETHOD(ifdi_stop, ixv_if_stop), DEVMETHOD(ifdi_msix_intr_assign, ixv_if_msix_intr_assign), DEVMETHOD(ifdi_intr_enable, ixv_if_enable_intr), DEVMETHOD(ifdi_intr_disable, ixv_if_disable_intr), DEVMETHOD(ifdi_tx_queue_intr_enable, ixv_if_rx_queue_intr_enable), DEVMETHOD(ifdi_rx_queue_intr_enable, ixv_if_rx_queue_intr_enable), DEVMETHOD(ifdi_tx_queues_alloc, ixv_if_tx_queues_alloc), DEVMETHOD(ifdi_rx_queues_alloc, ixv_if_rx_queues_alloc), DEVMETHOD(ifdi_queues_free, ixv_if_queues_free), DEVMETHOD(ifdi_update_admin_status, ixv_if_update_admin_status), DEVMETHOD(ifdi_multi_set, ixv_if_multi_set), DEVMETHOD(ifdi_mtu_set, ixv_if_mtu_set), DEVMETHOD(ifdi_media_status, ixv_if_media_status), DEVMETHOD(ifdi_media_change, ixv_if_media_change), DEVMETHOD(ifdi_timer, ixv_if_local_timer), DEVMETHOD(ifdi_vlan_register, ixv_if_register_vlan), DEVMETHOD(ifdi_vlan_unregister, ixv_if_unregister_vlan), DEVMETHOD(ifdi_get_counter, ixv_if_get_counter), DEVMETHOD(ifdi_needs_restart, ixv_if_needs_restart), DEVMETHOD_END }; static driver_t ixv_if_driver = { "ixv_if", ixv_if_methods, sizeof(struct adapter) }; /* * TUNEABLE PARAMETERS: */ /* Flow control setting, default to full */ static int ixv_flow_control = ixgbe_fc_full; TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control); /* * Header split: this causes the hardware to DMA * the header into a separate mbuf from the payload, * it can be a performance win in some workloads, but * in others it actually hurts, its off by default. */ -static int ixv_header_split = FALSE; +static int ixv_header_split = false; TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split); /* * Shadow VFTA table, this is needed because * the real filter table gets cleared during * a soft reset and we need to repopulate it. */ static u32 ixv_shadow_vfta[IXGBE_VFTA_SIZE]; extern struct if_txrx ixgbe_txrx; static struct if_shared_ctx ixv_sctx_init = { .isc_magic = IFLIB_MAGIC, .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */ .isc_tx_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header), .isc_tx_maxsegsize = PAGE_SIZE, .isc_tso_maxsize = IXGBE_TSO_SIZE + sizeof(struct ether_vlan_header), .isc_tso_maxsegsize = PAGE_SIZE, .isc_rx_maxsize = MJUM16BYTES, .isc_rx_nsegments = 1, .isc_rx_maxsegsize = MJUM16BYTES, .isc_nfl = 1, .isc_ntxqs = 1, .isc_nrxqs = 1, .isc_admin_intrcnt = 1, .isc_vendor_info = ixv_vendor_info_array, .isc_driver_version = ixv_driver_version, .isc_driver = &ixv_if_driver, .isc_flags = IFLIB_IS_VF | IFLIB_TSO_INIT_IP, .isc_nrxd_min = {MIN_RXD}, .isc_ntxd_min = {MIN_TXD}, .isc_nrxd_max = {MAX_RXD}, .isc_ntxd_max = {MAX_TXD}, .isc_nrxd_default = {DEFAULT_RXD}, .isc_ntxd_default = {DEFAULT_TXD}, }; static void * ixv_register(device_t dev) { return (&ixv_sctx_init); } /************************************************************************ * ixv_if_tx_queues_alloc ************************************************************************/ static int ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets) { struct adapter *adapter = iflib_get_softc(ctx); if_softc_ctx_t scctx = adapter->shared; struct ix_tx_queue *que; int i, j, error; MPASS(adapter->num_tx_queues == ntxqsets); MPASS(ntxqs == 1); /* Allocate queue structure memory */ adapter->tx_queues = (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets, M_DEVBUF, M_NOWAIT | M_ZERO); if (!adapter->tx_queues) { device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n"); return (ENOMEM); } for (i = 0, que = adapter->tx_queues; i < ntxqsets; i++, que++) { struct tx_ring *txr = &que->txr; txr->me = i; txr->adapter = que->adapter = adapter; /* Allocate report status array */ if (!(txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) { error = ENOMEM; goto fail; } for (j = 0; j < scctx->isc_ntxd[0]; j++) txr->tx_rsq[j] = QIDX_INVALID; /* get the virtual and physical address of the hardware queues */ txr->tail = IXGBE_VFTDT(txr->me); txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i*ntxqs]; txr->tx_paddr = paddrs[i*ntxqs]; txr->bytes = 0; txr->total_packets = 0; } device_printf(iflib_get_dev(ctx), "allocated for %d queues\n", adapter->num_tx_queues); return (0); fail: ixv_if_queues_free(ctx); return (error); } /* ixv_if_tx_queues_alloc */ /************************************************************************ * ixv_if_rx_queues_alloc ************************************************************************/ static int ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets) { struct adapter *adapter = iflib_get_softc(ctx); struct ix_rx_queue *que; int i, error; MPASS(adapter->num_rx_queues == nrxqsets); MPASS(nrxqs == 1); /* Allocate queue structure memory */ adapter->rx_queues = (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue) * nrxqsets, M_DEVBUF, M_NOWAIT | M_ZERO); if (!adapter->rx_queues) { device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n"); error = ENOMEM; goto fail; } for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) { struct rx_ring *rxr = &que->rxr; rxr->me = i; rxr->adapter = que->adapter = adapter; /* get the virtual and physical address of the hw queues */ rxr->tail = IXGBE_VFRDT(rxr->me); rxr->rx_base = (union ixgbe_adv_rx_desc *)vaddrs[i]; rxr->rx_paddr = paddrs[i*nrxqs]; rxr->bytes = 0; rxr->que = que; } device_printf(iflib_get_dev(ctx), "allocated for %d rx queues\n", adapter->num_rx_queues); return (0); fail: ixv_if_queues_free(ctx); return (error); } /* ixv_if_rx_queues_alloc */ /************************************************************************ * ixv_if_queues_free ************************************************************************/ static void ixv_if_queues_free(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); struct ix_tx_queue *que = adapter->tx_queues; int i; if (que == NULL) goto free; for (i = 0; i < adapter->num_tx_queues; i++, que++) { struct tx_ring *txr = &que->txr; if (txr->tx_rsq == NULL) break; free(txr->tx_rsq, M_DEVBUF); txr->tx_rsq = NULL; } if (adapter->tx_queues != NULL) free(adapter->tx_queues, M_DEVBUF); free: if (adapter->rx_queues != NULL) free(adapter->rx_queues, M_DEVBUF); adapter->tx_queues = NULL; adapter->rx_queues = NULL; } /* ixv_if_queues_free */ /************************************************************************ * ixv_if_attach_pre - Device initialization routine * * Called when the driver is being loaded. * Identifies the type of hardware, allocates all resources * and initializes the hardware. * * return 0 on success, positive on failure ************************************************************************/ static int ixv_if_attach_pre(if_ctx_t ctx) { struct adapter *adapter; device_t dev; if_softc_ctx_t scctx; struct ixgbe_hw *hw; int error = 0; INIT_DEBUGOUT("ixv_attach: begin"); /* Allocate, clear, and link in our adapter structure */ dev = iflib_get_dev(ctx); adapter = iflib_get_softc(ctx); adapter->dev = dev; adapter->ctx = ctx; adapter->hw.back = adapter; scctx = adapter->shared = iflib_get_softc_ctx(ctx); adapter->media = iflib_get_media(ctx); hw = &adapter->hw; /* Do base PCI setup - map BAR0 */ if (ixv_allocate_pci_resources(ctx)) { device_printf(dev, "ixv_allocate_pci_resources() failed!\n"); error = ENXIO; goto err_out; } /* SYSCTL APIs */ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, adapter, 0, ixv_sysctl_debug, "I", "Debug Info"); /* Determine hardware revision */ ixv_identify_hardware(ctx); ixv_init_device_features(adapter); /* Initialize the shared code */ error = ixgbe_init_ops_vf(hw); if (error) { device_printf(dev, "ixgbe_init_ops_vf() failed!\n"); error = EIO; goto err_out; } /* Setup the mailbox */ ixgbe_init_mbx_params_vf(hw); error = hw->mac.ops.reset_hw(hw); if (error == IXGBE_ERR_RESET_FAILED) device_printf(dev, "...reset_hw() failure: Reset Failed!\n"); else if (error) device_printf(dev, "...reset_hw() failed with error %d\n", error); if (error) { error = EIO; goto err_out; } error = hw->mac.ops.init_hw(hw); if (error) { device_printf(dev, "...init_hw() failed with error %d\n", error); error = EIO; goto err_out; } /* Negotiate mailbox API version */ error = ixv_negotiate_api(adapter); if (error) { device_printf(dev, "Mailbox API negotiation failed during attach!\n"); goto err_out; } /* If no mac address was assigned, make a random one */ if (!ixv_check_ether_addr(hw->mac.addr)) { ether_gen_addr(iflib_get_ifp(ctx), (struct ether_addr *)hw->mac.addr); bcopy(hw->mac.addr, hw->mac.perm_addr, sizeof(hw->mac.perm_addr)); } /* Most of the iflib initialization... */ iflib_set_mac(ctx, hw->mac.addr); switch (adapter->hw.mac.type) { case ixgbe_mac_X550_vf: case ixgbe_mac_X550EM_x_vf: case ixgbe_mac_X550EM_a_vf: scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 2; break; default: scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1; } scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] * sizeof(union ixgbe_adv_tx_desc) + sizeof(u32), DBA_ALIGN); scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN); /* XXX */ scctx->isc_tx_csum_flags = CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO | CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_TSO; scctx->isc_tx_nsegments = IXGBE_82599_SCATTER; scctx->isc_msix_bar = pci_msix_table_bar(dev); scctx->isc_tx_tso_segments_max = scctx->isc_tx_nsegments; scctx->isc_tx_tso_size_max = IXGBE_TSO_SIZE; scctx->isc_tx_tso_segsize_max = PAGE_SIZE; scctx->isc_txrx = &ixgbe_txrx; /* * Tell the upper layer(s) we support everything the PF * driver does except... * Wake-on-LAN */ scctx->isc_capabilities = IXGBE_CAPS; scctx->isc_capabilities ^= IFCAP_WOL; scctx->isc_capenable = scctx->isc_capabilities; INIT_DEBUGOUT("ixv_if_attach_pre: end"); return (0); err_out: ixv_free_pci_resources(ctx); return (error); } /* ixv_if_attach_pre */ static int ixv_if_attach_post(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); device_t dev = iflib_get_dev(ctx); int error = 0; /* Setup OS specific network interface */ error = ixv_setup_interface(ctx); if (error) { device_printf(dev, "Interface setup failed: %d\n", error); goto end; } /* Do the stats setup */ ixv_save_stats(adapter); ixv_init_stats(adapter); ixv_add_stats_sysctls(adapter); end: return error; } /* ixv_if_attach_post */ /************************************************************************ * ixv_detach - Device removal routine * * Called when the driver is being removed. * Stops the adapter and deallocates all the resources * that were allocated for driver operation. * * return 0 on success, positive on failure ************************************************************************/ static int ixv_if_detach(if_ctx_t ctx) { INIT_DEBUGOUT("ixv_detach: begin"); ixv_free_pci_resources(ctx); return (0); } /* ixv_if_detach */ /************************************************************************ * ixv_if_mtu_set ************************************************************************/ static int ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu) { struct adapter *adapter = iflib_get_softc(ctx); struct ifnet *ifp = iflib_get_ifp(ctx); int error = 0; IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); if (mtu > IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR) { error = EINVAL; } else { ifp->if_mtu = mtu; adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR; } return error; } /* ixv_if_mtu_set */ /************************************************************************ * ixv_if_init - Init entry point * * Used in two ways: It is used by the stack as an init entry * point in network interface structure. It is also used * by the driver as a hw/sw initialization routine to get * to a consistent state. * * return 0 on success, positive on failure ************************************************************************/ static void ixv_if_init(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); struct ifnet *ifp = iflib_get_ifp(ctx); device_t dev = iflib_get_dev(ctx); struct ixgbe_hw *hw = &adapter->hw; int error = 0; INIT_DEBUGOUT("ixv_if_init: begin"); - hw->adapter_stopped = FALSE; + hw->adapter_stopped = false; hw->mac.ops.stop_adapter(hw); /* reprogram the RAR[0] in case user changed it. */ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); /* Get the latest mac address, User can use a LAA */ bcopy(IF_LLADDR(ifp), hw->mac.addr, IXGBE_ETH_LENGTH_OF_ADDRESS); hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1); /* Reset VF and renegotiate mailbox API version */ hw->mac.ops.reset_hw(hw); hw->mac.ops.start_hw(hw); error = ixv_negotiate_api(adapter); if (error) { device_printf(dev, "Mailbox API negotiation failed in if_init!\n"); return; } ixv_initialize_transmit_units(ctx); /* Setup Multicast table */ ixv_if_multi_set(ctx); adapter->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx); /* Configure RX settings */ ixv_initialize_receive_units(ctx); /* Set up VLAN offload and filter */ ixv_setup_vlan_support(ctx); /* Set up MSI-X routing */ ixv_configure_ivars(adapter); /* Set up auto-mask */ IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE); /* Set moderation on the Link interrupt */ IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->vector), IXGBE_LINK_ITR); /* Stats init */ ixv_init_stats(adapter); /* Config/Enable Link */ hw->mac.ops.check_link(hw, &adapter->link_speed, &adapter->link_up, - FALSE); + false); /* And now turn on interrupts */ ixv_if_enable_intr(ctx); return; } /* ixv_if_init */ /************************************************************************ * ixv_enable_queue ************************************************************************/ static inline void ixv_enable_queue(struct adapter *adapter, u32 vector) { struct ixgbe_hw *hw = &adapter->hw; u32 queue = 1 << vector; u32 mask; mask = (IXGBE_EIMS_RTX_QUEUE & queue); IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); } /* ixv_enable_queue */ /************************************************************************ * ixv_disable_queue ************************************************************************/ static inline void ixv_disable_queue(struct adapter *adapter, u32 vector) { struct ixgbe_hw *hw = &adapter->hw; u64 queue = (u64)(1 << vector); u32 mask; mask = (IXGBE_EIMS_RTX_QUEUE & queue); IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask); } /* ixv_disable_queue */ /************************************************************************ * ixv_msix_que - MSI-X Queue Interrupt Service routine ************************************************************************/ static int ixv_msix_que(void *arg) { struct ix_rx_queue *que = arg; struct adapter *adapter = que->adapter; ixv_disable_queue(adapter, que->msix); ++que->irqs; return (FILTER_SCHEDULE_THREAD); } /* ixv_msix_que */ /************************************************************************ * ixv_msix_mbx ************************************************************************/ static int ixv_msix_mbx(void *arg) { struct adapter *adapter = arg; struct ixgbe_hw *hw = &adapter->hw; u32 reg; ++adapter->link_irq; /* First get the cause */ reg = IXGBE_READ_REG(hw, IXGBE_VTEICS); /* Clear interrupt with write */ IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg); /* Link status change */ if (reg & IXGBE_EICR_LSC) iflib_admin_intr_deferred(adapter->ctx); IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER); return (FILTER_HANDLED); } /* ixv_msix_mbx */ /************************************************************************ * ixv_media_status - Media Ioctl callback * * Called whenever the user queries the status of * the interface using ifconfig. ************************************************************************/ static void ixv_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr) { struct adapter *adapter = iflib_get_softc(ctx); INIT_DEBUGOUT("ixv_media_status: begin"); iflib_admin_intr_deferred(ctx); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (!adapter->link_active) return; ifmr->ifm_status |= IFM_ACTIVE; switch (adapter->link_speed) { case IXGBE_LINK_SPEED_1GB_FULL: ifmr->ifm_active |= IFM_1000_T | IFM_FDX; break; case IXGBE_LINK_SPEED_10GB_FULL: ifmr->ifm_active |= IFM_10G_T | IFM_FDX; break; case IXGBE_LINK_SPEED_100_FULL: ifmr->ifm_active |= IFM_100_TX | IFM_FDX; break; case IXGBE_LINK_SPEED_10_FULL: ifmr->ifm_active |= IFM_10_T | IFM_FDX; break; } } /* ixv_if_media_status */ /************************************************************************ * ixv_if_media_change - Media Ioctl callback * * Called when the user changes speed/duplex using * media/mediopt option with ifconfig. ************************************************************************/ static int ixv_if_media_change(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); struct ifmedia *ifm = iflib_get_media(ctx); INIT_DEBUGOUT("ixv_media_change: begin"); if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); switch (IFM_SUBTYPE(ifm->ifm_media)) { case IFM_AUTO: break; default: device_printf(adapter->dev, "Only auto media type\n"); return (EINVAL); } return (0); } /* ixv_if_media_change */ /************************************************************************ * ixv_negotiate_api * * Negotiate the Mailbox API with the PF; * start with the most featured API first. ************************************************************************/ static int ixv_negotiate_api(struct adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int mbx_api[] = { ixgbe_mbox_api_11, ixgbe_mbox_api_10, ixgbe_mbox_api_unknown }; int i = 0; while (mbx_api[i] != ixgbe_mbox_api_unknown) { if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0) return (0); i++; } return (EINVAL); } /* ixv_negotiate_api */ /************************************************************************ * ixv_if_multi_set - Multicast Update * * Called whenever multicast address list is updated. ************************************************************************/ static void ixv_if_multi_set(if_ctx_t ctx) { u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS]; struct adapter *adapter = iflib_get_softc(ctx); u8 *update_ptr; struct ifmultiaddr *ifma; if_t ifp = iflib_get_ifp(ctx); int mcnt = 0; IOCTL_DEBUGOUT("ixv_if_multi_set: begin"); CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS], IXGBE_ETH_LENGTH_OF_ADDRESS); mcnt++; } update_ptr = mta; adapter->hw.mac.ops.update_mc_addr_list(&adapter->hw, update_ptr, mcnt, - ixv_mc_array_itr, TRUE); + ixv_mc_array_itr, true); } /* ixv_if_multi_set */ /************************************************************************ * ixv_mc_array_itr * * An iterator function needed by the multicast shared code. * It feeds the shared code routine the addresses in the * array of ixv_set_multi() one by one. ************************************************************************/ static u8 * ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) { u8 *addr = *update_ptr; u8 *newptr; *vmdq = 0; newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS; *update_ptr = newptr; return addr; } /* ixv_mc_array_itr */ /************************************************************************ * ixv_if_local_timer - Timer routine * * Checks for link status, updates statistics, * and runs the watchdog check. ************************************************************************/ static void ixv_if_local_timer(if_ctx_t ctx, uint16_t qid) { if (qid != 0) return; /* Fire off the adminq task */ iflib_admin_intr_deferred(ctx); } /* ixv_if_local_timer */ /************************************************************************ * ixv_if_update_admin_status - Update OS on link state * * Note: Only updates the OS on the cached link state. * The real check of the hardware only happens with * a link interrupt. ************************************************************************/ static void ixv_if_update_admin_status(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); device_t dev = iflib_get_dev(ctx); s32 status; - adapter->hw.mac.get_link_status = TRUE; + adapter->hw.mac.get_link_status = true; status = ixgbe_check_link(&adapter->hw, &adapter->link_speed, - &adapter->link_up, FALSE); + &adapter->link_up, false); - if (status != IXGBE_SUCCESS && adapter->hw.adapter_stopped == FALSE) { + if (status != IXGBE_SUCCESS && adapter->hw.adapter_stopped == false) { /* Mailbox's Clear To Send status is lost or timeout occurred. * We need reinitialization. */ iflib_get_ifp(ctx)->if_init(ctx); } if (adapter->link_up) { - if (adapter->link_active == FALSE) { + if (adapter->link_active == false) { if (bootverbose) device_printf(dev, "Link is up %d Gbps %s \n", ((adapter->link_speed == 128) ? 10 : 1), "Full Duplex"); - adapter->link_active = TRUE; + adapter->link_active = true; iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10)); } } else { /* Link down */ - if (adapter->link_active == TRUE) { + if (adapter->link_active == true) { if (bootverbose) device_printf(dev, "Link is Down\n"); iflib_link_state_change(ctx, LINK_STATE_DOWN, 0); - adapter->link_active = FALSE; + adapter->link_active = false; } } /* Stats Update */ ixv_update_stats(adapter); } /* ixv_if_update_admin_status */ /************************************************************************ * ixv_if_stop - Stop the hardware * * Disables all traffic on the adapter by issuing a * global reset on the MAC and deallocates TX/RX buffers. ************************************************************************/ static void ixv_if_stop(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); struct ixgbe_hw *hw = &adapter->hw; INIT_DEBUGOUT("ixv_stop: begin\n"); ixv_if_disable_intr(ctx); hw->mac.ops.reset_hw(hw); - adapter->hw.adapter_stopped = FALSE; + adapter->hw.adapter_stopped = false; hw->mac.ops.stop_adapter(hw); /* Update the stack */ - adapter->link_up = FALSE; + adapter->link_up = false; ixv_if_update_admin_status(ctx); /* reprogram the RAR[0] in case user changed it. */ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); } /* ixv_if_stop */ /************************************************************************ * ixv_identify_hardware - Determine hardware revision. ************************************************************************/ static void ixv_identify_hardware(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); device_t dev = iflib_get_dev(ctx); struct ixgbe_hw *hw = &adapter->hw; /* Save off the information about this board */ hw->vendor_id = pci_get_vendor(dev); hw->device_id = pci_get_device(dev); hw->revision_id = pci_get_revid(dev); hw->subsystem_vendor_id = pci_get_subvendor(dev); hw->subsystem_device_id = pci_get_subdevice(dev); /* A subset of set_mac_type */ switch (hw->device_id) { case IXGBE_DEV_ID_82599_VF: hw->mac.type = ixgbe_mac_82599_vf; break; case IXGBE_DEV_ID_X540_VF: hw->mac.type = ixgbe_mac_X540_vf; break; case IXGBE_DEV_ID_X550_VF: hw->mac.type = ixgbe_mac_X550_vf; break; case IXGBE_DEV_ID_X550EM_X_VF: hw->mac.type = ixgbe_mac_X550EM_x_vf; break; case IXGBE_DEV_ID_X550EM_A_VF: hw->mac.type = ixgbe_mac_X550EM_a_vf; break; default: device_printf(dev, "unknown mac type\n"); hw->mac.type = ixgbe_mac_unknown; break; } } /* ixv_identify_hardware */ /************************************************************************ * ixv_if_msix_intr_assign - Setup MSI-X Interrupt resources and handlers ************************************************************************/ static int ixv_if_msix_intr_assign(if_ctx_t ctx, int msix) { struct adapter *adapter = iflib_get_softc(ctx); device_t dev = iflib_get_dev(ctx); struct ix_rx_queue *rx_que = adapter->rx_queues; struct ix_tx_queue *tx_que; int error, rid, vector = 0; char buf[16]; for (int i = 0; i < adapter->num_rx_queues; i++, vector++, rx_que++) { rid = vector + 1; snprintf(buf, sizeof(buf), "rxq%d", i); error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, IFLIB_INTR_RXTX, ixv_msix_que, rx_que, rx_que->rxr.me, buf); if (error) { device_printf(iflib_get_dev(ctx), "Failed to allocate que int %d err: %d", i, error); adapter->num_rx_queues = i + 1; goto fail; } rx_que->msix = vector; } for (int i = 0; i < adapter->num_tx_queues; i++) { snprintf(buf, sizeof(buf), "txq%d", i); tx_que = &adapter->tx_queues[i]; tx_que->msix = i % adapter->num_rx_queues; iflib_softirq_alloc_generic(ctx, &adapter->rx_queues[tx_que->msix].que_irq, IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf); } rid = vector + 1; error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid, IFLIB_INTR_ADMIN, ixv_msix_mbx, adapter, 0, "aq"); if (error) { device_printf(iflib_get_dev(ctx), "Failed to register admin handler"); return (error); } adapter->vector = vector; /* * Due to a broken design QEMU will fail to properly * enable the guest for MSIX unless the vectors in * the table are all set up, so we must rewrite the * ENABLE in the MSIX control register again at this * point to cause it to successfully initialize us. */ if (adapter->hw.mac.type == ixgbe_mac_82599_vf) { int msix_ctrl; pci_find_cap(dev, PCIY_MSIX, &rid); rid += PCIR_MSIX_CTRL; msix_ctrl = pci_read_config(dev, rid, 2); msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE; pci_write_config(dev, rid, msix_ctrl, 2); } return (0); fail: iflib_irq_free(ctx, &adapter->irq); rx_que = adapter->rx_queues; for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) iflib_irq_free(ctx, &rx_que->que_irq); return (error); } /* ixv_if_msix_intr_assign */ /************************************************************************ * ixv_allocate_pci_resources ************************************************************************/ static int ixv_allocate_pci_resources(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); device_t dev = iflib_get_dev(ctx); int rid; rid = PCIR_BAR(0); adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!(adapter->pci_mem)) { device_printf(dev, "Unable to allocate bus resource: memory\n"); return (ENXIO); } adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem); adapter->osdep.mem_bus_space_handle = rman_get_bushandle(adapter->pci_mem); adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle; return (0); } /* ixv_allocate_pci_resources */ /************************************************************************ * ixv_free_pci_resources ************************************************************************/ static void ixv_free_pci_resources(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); struct ix_rx_queue *que = adapter->rx_queues; device_t dev = iflib_get_dev(ctx); /* Release all MSI-X queue resources */ if (adapter->intr_type == IFLIB_INTR_MSIX) iflib_irq_free(ctx, &adapter->irq); if (que != NULL) { for (int i = 0; i < adapter->num_rx_queues; i++, que++) { iflib_irq_free(ctx, &que->que_irq); } } if (adapter->pci_mem != NULL) bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(adapter->pci_mem), adapter->pci_mem); } /* ixv_free_pci_resources */ /************************************************************************ * ixv_setup_interface * * Setup networking device structure and register an interface. ************************************************************************/ static int ixv_setup_interface(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); if_softc_ctx_t scctx = adapter->shared; struct ifnet *ifp = iflib_get_ifp(ctx); INIT_DEBUGOUT("ixv_setup_interface: begin"); if_setbaudrate(ifp, IF_Gbps(10)); ifp->if_snd.ifq_maxlen = scctx->isc_ntxd[0] - 2; adapter->max_frame_size = ifp->if_mtu + IXGBE_MTU_HDR; ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO); return 0; } /* ixv_setup_interface */ /************************************************************************ * ixv_if_get_counter ************************************************************************/ static uint64_t ixv_if_get_counter(if_ctx_t ctx, ift_counter cnt) { struct adapter *adapter = iflib_get_softc(ctx); if_t ifp = iflib_get_ifp(ctx); switch (cnt) { case IFCOUNTER_IPACKETS: return (adapter->ipackets); case IFCOUNTER_OPACKETS: return (adapter->opackets); case IFCOUNTER_IBYTES: return (adapter->ibytes); case IFCOUNTER_OBYTES: return (adapter->obytes); case IFCOUNTER_IMCASTS: return (adapter->imcasts); default: return (if_get_counter_default(ifp, cnt)); } } /* ixv_if_get_counter */ /* ixv_if_needs_restart - Tell iflib when the driver needs to be reinitialized * @ctx: iflib context * @event: event code to check * * Defaults to returning true for every event. * * @returns true if iflib needs to reinit the interface */ static bool ixv_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event) { switch (event) { case IFLIB_RESTART_VLAN_CONFIG: /* XXX: This may not need to return true */ default: return (true); } } /************************************************************************ * ixv_initialize_transmit_units - Enable transmit unit. ************************************************************************/ static void ixv_initialize_transmit_units(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); struct ixgbe_hw *hw = &adapter->hw; if_softc_ctx_t scctx = adapter->shared; struct ix_tx_queue *que = adapter->tx_queues; int i; for (i = 0; i < adapter->num_tx_queues; i++, que++) { struct tx_ring *txr = &que->txr; u64 tdba = txr->tx_paddr; u32 txctrl, txdctl; int j = txr->me; /* Set WTHRESH to 8, burst writeback */ txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); txdctl |= (8 << 16); IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); /* Set the HW Tx Head and Tail indices */ IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(j), 0); IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(j), 0); /* Set Tx Tail register */ txr->tail = IXGBE_VFTDT(j); txr->tx_rs_cidx = txr->tx_rs_pidx; /* Initialize the last processed descriptor to be the end of * the ring, rather than the start, so that we avoid an * off-by-one error when calculating how many descriptors are * done in the credits_update function. */ txr->tx_cidx_processed = scctx->isc_ntxd[0] - 1; for (int k = 0; k < scctx->isc_ntxd[0]; k++) txr->tx_rsq[k] = QIDX_INVALID; /* Set Ring parameters */ IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j), (tdba & 0x00000000ffffffffULL)); IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32)); IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), scctx->isc_ntxd[0] * sizeof(struct ixgbe_legacy_tx_desc)); txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j)); txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl); /* Now enable */ txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); txdctl |= IXGBE_TXDCTL_ENABLE; IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); } return; } /* ixv_initialize_transmit_units */ /************************************************************************ * ixv_initialize_rss_mapping ************************************************************************/ static void ixv_initialize_rss_mapping(struct adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 reta = 0, mrqc, rss_key[10]; int queue_id; int i, j; u32 rss_hash_config; if (adapter->feat_en & IXGBE_FEATURE_RSS) { /* Fetch the configured RSS key */ rss_getkey((uint8_t *)&rss_key); } else { /* set up random bits */ arc4rand(&rss_key, sizeof(rss_key), 0); } /* Now fill out hash function seeds */ for (i = 0; i < 10; i++) IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]); /* Set up the redirection table */ for (i = 0, j = 0; i < 64; i++, j++) { if (j == adapter->num_rx_queues) j = 0; if (adapter->feat_en & IXGBE_FEATURE_RSS) { /* * Fetch the RSS bucket id for the given indirection * entry. Cap it at the number of configured buckets * (which is num_rx_queues.) */ queue_id = rss_get_indirection_to_bucket(i); queue_id = queue_id % adapter->num_rx_queues; } else queue_id = j; /* * The low 8 bits are for hash value (n+0); * The next 8 bits are for hash value (n+1), etc. */ reta >>= 8; reta |= ((uint32_t)queue_id) << 24; if ((i & 3) == 3) { IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta); reta = 0; } } /* Perform hash on these packet types */ if (adapter->feat_en & IXGBE_FEATURE_RSS) rss_hash_config = rss_gethashconfig(); else { /* * Disable UDP - IP fragments aren't currently being handled * and so we end up with a mix of 2-tuple and 4-tuple * traffic. */ rss_hash_config = RSS_HASHTYPE_RSS_IPV4 | RSS_HASHTYPE_RSS_TCP_IPV4 | RSS_HASHTYPE_RSS_IPV6 | RSS_HASHTYPE_RSS_TCP_IPV6; } mrqc = IXGBE_MRQC_RSSEN; if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n", __func__); if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX) device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n", __func__); if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX) device_printf(adapter->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n", __func__); IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc); } /* ixv_initialize_rss_mapping */ /************************************************************************ * ixv_initialize_receive_units - Setup receive registers and features. ************************************************************************/ static void ixv_initialize_receive_units(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); if_softc_ctx_t scctx; struct ixgbe_hw *hw = &adapter->hw; struct ifnet *ifp = iflib_get_ifp(ctx); struct ix_rx_queue *que = adapter->rx_queues; u32 bufsz, psrtype; if (ifp->if_mtu > ETHERMTU) bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; else bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR | IXGBE_PSRTYPE_L2HDR; if (adapter->num_rx_queues > 1) psrtype |= 1 << 29; IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); /* Tell PF our max_frame size */ if (ixgbevf_rlpml_set_vf(hw, adapter->max_frame_size) != 0) { device_printf(adapter->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n"); } scctx = adapter->shared; for (int i = 0; i < adapter->num_rx_queues; i++, que++) { struct rx_ring *rxr = &que->rxr; u64 rdba = rxr->rx_paddr; u32 reg, rxdctl; int j = rxr->me; /* Disable the queue */ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)); rxdctl &= ~IXGBE_RXDCTL_ENABLE; IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl); for (int k = 0; k < 10; k++) { if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE) msec_delay(1); else break; } wmb(); /* Setup the Base and Length of the Rx Descriptor Ring */ IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j), (rdba & 0x00000000ffffffffULL)); IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32)); IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), scctx->isc_nrxd[0] * sizeof(union ixgbe_adv_rx_desc)); /* Reset the ring indices */ IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0); IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0); /* Set up the SRRCTL register */ reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(j)); reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; reg |= bufsz; reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(j), reg); /* Capture Rx Tail index */ rxr->tail = IXGBE_VFRDT(rxr->me); /* Do the queue enabling last */ rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME; IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl); for (int l = 0; l < 10; l++) { if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE) break; msec_delay(1); } wmb(); /* Set the Tail Pointer */ #ifdef DEV_NETMAP /* * In netmap mode, we must preserve the buffers made * available to userspace before the if_init() * (this is true by default on the TX side, because * init makes all buffers available to userspace). * * netmap_reset() and the device specific routines * (e.g. ixgbe_setup_receive_rings()) map these * buffers at the end of the NIC ring, so here we * must set the RDT (tail) register to make sure * they are not overwritten. * * In this driver the NIC ring starts at RDH = 0, * RDT points to the last slot available for reception (?), * so RDT = num_rx_desc - 1 means the whole ring is available. */ if (ifp->if_capenable & IFCAP_NETMAP) { struct netmap_adapter *na = NA(ifp); struct netmap_kring *kring = na->rx_rings[j]; int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring); IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), t); } else #endif /* DEV_NETMAP */ IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), scctx->isc_nrxd[0] - 1); } /* * Do not touch RSS and RETA settings for older hardware * as those are shared among PF and all VF. */ if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) ixv_initialize_rss_mapping(adapter); } /* ixv_initialize_receive_units */ /************************************************************************ * ixv_setup_vlan_support ************************************************************************/ static void ixv_setup_vlan_support(if_ctx_t ctx) { struct ifnet *ifp = iflib_get_ifp(ctx); struct adapter *adapter = iflib_get_softc(ctx); struct ixgbe_hw *hw = &adapter->hw; u32 ctrl, vid, vfta, retry; /* * We get here thru if_init, meaning * a soft reset, this has already cleared * the VFTA and other state, so if there * have been no vlan's registered do nothing. */ if (adapter->num_vlans == 0) return; if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { /* Enable the queues */ for (int i = 0; i < adapter->num_rx_queues; i++) { ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); ctrl |= IXGBE_RXDCTL_VME; IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl); /* * Let Rx path know that it needs to store VLAN tag * as part of extra mbuf info. */ - adapter->rx_queues[i].rxr.vtag_strip = TRUE; + adapter->rx_queues[i].rxr.vtag_strip = true; } } /* * If filtering VLAN tags is disabled, * there is no need to fill VLAN Filter Table Array (VFTA). */ if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0) return; /* * A soft reset zero's out the VFTA, so * we need to repopulate it now. */ for (int i = 0; i < IXGBE_VFTA_SIZE; i++) { if (ixv_shadow_vfta[i] == 0) continue; vfta = ixv_shadow_vfta[i]; /* * Reconstruct the vlan id's * based on the bits set in each * of the array ints. */ for (int j = 0; j < 32; j++) { retry = 0; if ((vfta & (1 << j)) == 0) continue; vid = (i * 32) + j; /* Call the shared code mailbox routine */ - while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) { + while (hw->mac.ops.set_vfta(hw, vid, 0, true, false)) { if (++retry > 5) break; } } } } /* ixv_setup_vlan_support */ /************************************************************************ * ixv_if_register_vlan * * Run via a vlan config EVENT, it enables us to use the * HW Filter table since we can get the vlan id. This just * creates the entry in the soft version of the VFTA, init * will repopulate the real table. ************************************************************************/ static void ixv_if_register_vlan(if_ctx_t ctx, u16 vtag) { struct adapter *adapter = iflib_get_softc(ctx); u16 index, bit; index = (vtag >> 5) & 0x7F; bit = vtag & 0x1F; ixv_shadow_vfta[index] |= (1 << bit); ++adapter->num_vlans; } /* ixv_if_register_vlan */ /************************************************************************ * ixv_if_unregister_vlan * * Run via a vlan unconfig EVENT, remove our entry * in the soft vfta. ************************************************************************/ static void ixv_if_unregister_vlan(if_ctx_t ctx, u16 vtag) { struct adapter *adapter = iflib_get_softc(ctx); u16 index, bit; index = (vtag >> 5) & 0x7F; bit = vtag & 0x1F; ixv_shadow_vfta[index] &= ~(1 << bit); --adapter->num_vlans; } /* ixv_if_unregister_vlan */ /************************************************************************ * ixv_if_enable_intr ************************************************************************/ static void ixv_if_enable_intr(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); struct ixgbe_hw *hw = &adapter->hw; struct ix_rx_queue *que = adapter->rx_queues; u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); mask = IXGBE_EIMS_ENABLE_MASK; mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask); for (int i = 0; i < adapter->num_rx_queues; i++, que++) ixv_enable_queue(adapter, que->msix); IXGBE_WRITE_FLUSH(hw); } /* ixv_if_enable_intr */ /************************************************************************ * ixv_if_disable_intr ************************************************************************/ static void ixv_if_disable_intr(if_ctx_t ctx) { struct adapter *adapter = iflib_get_softc(ctx); IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0); IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0); IXGBE_WRITE_FLUSH(&adapter->hw); } /* ixv_if_disable_intr */ /************************************************************************ * ixv_if_rx_queue_intr_enable ************************************************************************/ static int ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) { struct adapter *adapter = iflib_get_softc(ctx); struct ix_rx_queue *que = &adapter->rx_queues[rxqid]; ixv_enable_queue(adapter, que->rxr.me); return (0); } /* ixv_if_rx_queue_intr_enable */ /************************************************************************ * ixv_set_ivar * * Setup the correct IVAR register for a particular MSI-X interrupt * - entry is the register array entry * - vector is the MSI-X vector for this queue * - type is RX/TX/MISC ************************************************************************/ static void ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type) { struct ixgbe_hw *hw = &adapter->hw; u32 ivar, index; vector |= IXGBE_IVAR_ALLOC_VAL; if (type == -1) { /* MISC IVAR */ ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); ivar &= ~0xFF; ivar |= vector; IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); } else { /* RX/TX IVARS */ index = (16 * (entry & 1)) + (8 * type); ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1)); ivar &= ~(0xFF << index); ivar |= (vector << index); IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar); } } /* ixv_set_ivar */ /************************************************************************ * ixv_configure_ivars ************************************************************************/ static void ixv_configure_ivars(struct adapter *adapter) { struct ix_rx_queue *que = adapter->rx_queues; MPASS(adapter->num_rx_queues == adapter->num_tx_queues); for (int i = 0; i < adapter->num_rx_queues; i++, que++) { /* First the RX queue entry */ ixv_set_ivar(adapter, i, que->msix, 0); /* ... and the TX */ ixv_set_ivar(adapter, i, que->msix, 1); /* Set an initial value in EITR */ IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEITR(que->msix), IXGBE_EITR_DEFAULT); } /* For the mailbox interrupt */ ixv_set_ivar(adapter, 1, adapter->vector, -1); } /* ixv_configure_ivars */ /************************************************************************ * ixv_save_stats * * The VF stats registers never have a truly virgin * starting point, so this routine tries to make an * artificial one, marking ground zero on attach as * it were. ************************************************************************/ static void ixv_save_stats(struct adapter *adapter) { if (adapter->stats.vf.vfgprc || adapter->stats.vf.vfgptc) { adapter->stats.vf.saved_reset_vfgprc += adapter->stats.vf.vfgprc - adapter->stats.vf.base_vfgprc; adapter->stats.vf.saved_reset_vfgptc += adapter->stats.vf.vfgptc - adapter->stats.vf.base_vfgptc; adapter->stats.vf.saved_reset_vfgorc += adapter->stats.vf.vfgorc - adapter->stats.vf.base_vfgorc; adapter->stats.vf.saved_reset_vfgotc += adapter->stats.vf.vfgotc - adapter->stats.vf.base_vfgotc; adapter->stats.vf.saved_reset_vfmprc += adapter->stats.vf.vfmprc - adapter->stats.vf.base_vfmprc; } } /* ixv_save_stats */ /************************************************************************ * ixv_init_stats ************************************************************************/ static void ixv_init_stats(struct adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; adapter->stats.vf.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); adapter->stats.vf.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); adapter->stats.vf.last_vfgorc |= (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); adapter->stats.vf.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); adapter->stats.vf.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); adapter->stats.vf.last_vfgotc |= (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); adapter->stats.vf.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); adapter->stats.vf.base_vfgprc = adapter->stats.vf.last_vfgprc; adapter->stats.vf.base_vfgorc = adapter->stats.vf.last_vfgorc; adapter->stats.vf.base_vfgptc = adapter->stats.vf.last_vfgptc; adapter->stats.vf.base_vfgotc = adapter->stats.vf.last_vfgotc; adapter->stats.vf.base_vfmprc = adapter->stats.vf.last_vfmprc; } /* ixv_init_stats */ #define UPDATE_STAT_32(reg, last, count) \ { \ u32 current = IXGBE_READ_REG(hw, reg); \ if (current < last) \ count += 0x100000000LL; \ last = current; \ count &= 0xFFFFFFFF00000000LL; \ count |= current; \ } #define UPDATE_STAT_36(lsb, msb, last, count) \ { \ u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \ u64 cur_msb = IXGBE_READ_REG(hw, msb); \ u64 current = ((cur_msb << 32) | cur_lsb); \ if (current < last) \ count += 0x1000000000LL; \ last = current; \ count &= 0xFFFFFFF000000000LL; \ count |= current; \ } /************************************************************************ * ixv_update_stats - Update the board statistics counters. ************************************************************************/ void ixv_update_stats(struct adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; struct ixgbevf_hw_stats *stats = &adapter->stats.vf; UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.vf.last_vfgprc, adapter->stats.vf.vfgprc); UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.vf.last_vfgptc, adapter->stats.vf.vfgptc); UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, adapter->stats.vf.last_vfgorc, adapter->stats.vf.vfgorc); UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, adapter->stats.vf.last_vfgotc, adapter->stats.vf.vfgotc); UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.vf.last_vfmprc, adapter->stats.vf.vfmprc); /* Fill out the OS statistics structure */ IXGBE_SET_IPACKETS(adapter, stats->vfgprc); IXGBE_SET_OPACKETS(adapter, stats->vfgptc); IXGBE_SET_IBYTES(adapter, stats->vfgorc); IXGBE_SET_OBYTES(adapter, stats->vfgotc); IXGBE_SET_IMCASTS(adapter, stats->vfmprc); } /* ixv_update_stats */ /************************************************************************ * ixv_add_stats_sysctls - Add statistic sysctls for the VF. ************************************************************************/ static void ixv_add_stats_sysctls(struct adapter *adapter) { device_t dev = adapter->dev; struct ix_tx_queue *tx_que = adapter->tx_queues; struct ix_rx_queue *rx_que = adapter->rx_queues; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); struct sysctl_oid *tree = device_get_sysctl_tree(dev); struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); struct ixgbevf_hw_stats *stats = &adapter->stats.vf; struct sysctl_oid *stat_node, *queue_node; struct sysctl_oid_list *stat_list, *queue_list; #define QUEUE_NAME_LEN 32 char namebuf[QUEUE_NAME_LEN]; /* Driver Statistics */ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events", CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq", CTLFLAG_RD, &adapter->link_irq, "Link MSI-X IRQ Handled"); for (int i = 0; i < adapter->num_tx_queues; i++, tx_que++) { struct tx_ring *txr = &tx_que->txr; snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name"); queue_list = SYSCTL_CHILDREN(queue_node); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx", CTLFLAG_RD, &(txr->tso_tx), "TSO Packets"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", CTLFLAG_RD, &(txr->total_packets), "TX Packets"); } for (int i = 0; i < adapter->num_rx_queues; i++, rx_que++) { struct rx_ring *rxr = &rx_que->rxr; snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Queue Name"); queue_list = SYSCTL_CHILDREN(queue_node); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs", CTLFLAG_RD, &(rx_que->irqs), "IRQs on queue"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", CTLFLAG_RD, &(rxr->rx_packets), "RX packets"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", CTLFLAG_RD, &(rxr->rx_bytes), "RX bytes"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_discarded", CTLFLAG_RD, &(rxr->rx_discarded), "Discarded RX packets"); } stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "VF Statistics (read from HW registers)"); stat_list = SYSCTL_CHILDREN(stat_node); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd", CTLFLAG_RD, &stats->vfgprc, "Good Packets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd", CTLFLAG_RD, &stats->vfgorc, "Good Octets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd", CTLFLAG_RD, &stats->vfmprc, "Multicast Packets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", CTLFLAG_RD, &stats->vfgptc, "Good Packets Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", CTLFLAG_RD, &stats->vfgotc, "Good Octets Transmitted"); } /* ixv_add_stats_sysctls */ /************************************************************************ * ixv_print_debug_info * * Called only when em_display_debug_stats is enabled. * Provides a way to take a look at important statistics * maintained by the driver and hardware. ************************************************************************/ static void ixv_print_debug_info(struct adapter *adapter) { device_t dev = adapter->dev; struct ixgbe_hw *hw = &adapter->hw; device_printf(dev, "Error Byte Count = %u \n", IXGBE_READ_REG(hw, IXGBE_ERRBC)); device_printf(dev, "MBX IRQ Handled: %lu\n", (long)adapter->link_irq); } /* ixv_print_debug_info */ /************************************************************************ * ixv_sysctl_debug ************************************************************************/ static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS) { struct adapter *adapter; int error, result; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (result == 1) { adapter = (struct adapter *)arg1; ixv_print_debug_info(adapter); } return error; } /* ixv_sysctl_debug */ /************************************************************************ * ixv_init_device_features ************************************************************************/ static void ixv_init_device_features(struct adapter *adapter) { adapter->feat_cap = IXGBE_FEATURE_NETMAP | IXGBE_FEATURE_VF | IXGBE_FEATURE_LEGACY_TX; /* A tad short on feature flags for VFs, atm. */ switch (adapter->hw.mac.type) { case ixgbe_mac_82599_vf: break; case ixgbe_mac_X540_vf: break; case ixgbe_mac_X550_vf: case ixgbe_mac_X550EM_x_vf: case ixgbe_mac_X550EM_a_vf: adapter->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD; adapter->feat_cap |= IXGBE_FEATURE_RSS; break; default: break; } /* Enabled by default... */ /* Is a virtual function (VF) */ if (adapter->feat_cap & IXGBE_FEATURE_VF) adapter->feat_en |= IXGBE_FEATURE_VF; /* Netmap */ if (adapter->feat_cap & IXGBE_FEATURE_NETMAP) adapter->feat_en |= IXGBE_FEATURE_NETMAP; /* Receive-Side Scaling (RSS) */ if (adapter->feat_cap & IXGBE_FEATURE_RSS) adapter->feat_en |= IXGBE_FEATURE_RSS; /* Needs advanced context descriptor regardless of offloads req'd */ if (adapter->feat_cap & IXGBE_FEATURE_NEEDS_CTXD) adapter->feat_en |= IXGBE_FEATURE_NEEDS_CTXD; } /* ixv_init_device_features */ diff --git a/sys/dev/ixgbe/if_sriov.c b/sys/dev/ixgbe/if_sriov.c index 9a7d73a69d2d..3cc4d788ecd4 100644 --- a/sys/dev/ixgbe/if_sriov.c +++ b/sys/dev/ixgbe/if_sriov.c @@ -1,897 +1,897 @@ /****************************************************************************** Copyright (c) 2001-2017, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "ixgbe.h" #include "ixgbe_sriov.h" #ifdef PCI_IOV #include MALLOC_DEFINE(M_IXGBE_SRIOV, "ix_sriov", "ix SR-IOV allocations"); /************************************************************************ * ixgbe_pci_iov_detach ************************************************************************/ int ixgbe_pci_iov_detach(device_t dev) { return pci_iov_detach(dev); } /************************************************************************ * ixgbe_define_iov_schemas ************************************************************************/ void ixgbe_define_iov_schemas(device_t dev, int *error) { nvlist_t *pf_schema, *vf_schema; pf_schema = pci_iov_schema_alloc_node(); vf_schema = pci_iov_schema_alloc_node(); pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL); pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof", - IOV_SCHEMA_HASDEFAULT, TRUE); + IOV_SCHEMA_HASDEFAULT, true); pci_iov_schema_add_bool(vf_schema, "allow-set-mac", - IOV_SCHEMA_HASDEFAULT, FALSE); + IOV_SCHEMA_HASDEFAULT, false); pci_iov_schema_add_bool(vf_schema, "allow-promisc", - IOV_SCHEMA_HASDEFAULT, FALSE); + IOV_SCHEMA_HASDEFAULT, false); *error = pci_iov_attach(dev, pf_schema, vf_schema); if (*error != 0) { device_printf(dev, "Error %d setting up SR-IOV\n", *error); } } /* ixgbe_define_iov_schemas */ /************************************************************************ * ixgbe_align_all_queue_indices ************************************************************************/ inline void ixgbe_align_all_queue_indices(struct adapter *adapter) { int i; int index; for (i = 0; i < adapter->num_rx_queues; i++) { index = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, i); adapter->rx_queues[i].rxr.me = index; } for (i = 0; i < adapter->num_tx_queues; i++) { index = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, i); adapter->tx_queues[i].txr.me = index; } } /* Support functions for SR-IOV/VF management */ static inline void ixgbe_send_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg) { if (vf->flags & IXGBE_VF_CTS) msg |= IXGBE_VT_MSGTYPE_CTS; adapter->hw.mbx.ops.write(&adapter->hw, &msg, 1, vf->pool); } static inline void ixgbe_send_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg) { msg &= IXGBE_VT_MSG_MASK; ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_ACK); } static inline void ixgbe_send_vf_nack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg) { msg &= IXGBE_VT_MSG_MASK; ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_NACK); } static inline void ixgbe_process_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf) { if (!(vf->flags & IXGBE_VF_CTS)) ixgbe_send_vf_nack(adapter, vf, 0); } static inline boolean_t ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac) { return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0); } static inline int ixgbe_vf_queues(int mode) { switch (mode) { case IXGBE_64_VM: return (2); case IXGBE_32_VM: return (4); case IXGBE_NO_VM: default: return (0); } } inline int ixgbe_vf_que_index(int mode, int vfnum, int num) { return ((vfnum * ixgbe_vf_queues(mode)) + num); } static inline void ixgbe_update_max_frame(struct adapter * adapter, int max_frame) { if (adapter->max_frame_size < max_frame) adapter->max_frame_size = max_frame; } inline u32 ixgbe_get_mrqc(int iov_mode) { u32 mrqc; switch (iov_mode) { case IXGBE_64_VM: mrqc = IXGBE_MRQC_VMDQRSS64EN; break; case IXGBE_32_VM: mrqc = IXGBE_MRQC_VMDQRSS32EN; break; case IXGBE_NO_VM: mrqc = 0; break; default: panic("Unexpected SR-IOV mode %d", iov_mode); } return mrqc; } inline u32 ixgbe_get_mtqc(int iov_mode) { uint32_t mtqc; switch (iov_mode) { case IXGBE_64_VM: mtqc = IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA; break; case IXGBE_32_VM: mtqc = IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA; break; case IXGBE_NO_VM: mtqc = IXGBE_MTQC_64Q_1PB; break; default: panic("Unexpected SR-IOV mode %d", iov_mode); } return mtqc; } void ixgbe_ping_all_vfs(struct adapter *adapter) { struct ixgbe_vf *vf; for (int i = 0; i < adapter->num_vfs; i++) { vf = &adapter->vfs[i]; if (vf->flags & IXGBE_VF_ACTIVE) ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG); } } /* ixgbe_ping_all_vfs */ static void ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint16_t tag) { struct ixgbe_hw *hw; uint32_t vmolr, vmvir; hw = &adapter->hw; vf->vlan_tag = tag; vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool)); /* Do not receive packets that pass inexact filters. */ vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE); /* Disable Multicast Promicuous Mode. */ vmolr &= ~IXGBE_VMOLR_MPE; /* Accept broadcasts. */ vmolr |= IXGBE_VMOLR_BAM; if (tag == 0) { /* Accept non-vlan tagged traffic. */ vmolr |= IXGBE_VMOLR_AUPE; /* Allow VM to tag outgoing traffic; no default tag. */ vmvir = 0; } else { /* Require vlan-tagged traffic. */ vmolr &= ~IXGBE_VMOLR_AUPE; /* Tag all traffic with provided vlan tag. */ vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT); } IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr); IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir); } /* ixgbe_vf_set_default_vlan */ static boolean_t ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf) { /* * Frame size compatibility between PF and VF is only a problem on * 82599-based cards. X540 and later support any combination of jumbo * frames on PFs and VFs. */ if (adapter->hw.mac.type != ixgbe_mac_82599EB) - return (TRUE); + return (true); switch (vf->api_ver) { case IXGBE_API_VER_1_0: case IXGBE_API_VER_UNKNOWN: /* * On legacy (1.0 and older) VF versions, we don't support jumbo * frames on either the PF or the VF. */ if (adapter->max_frame_size > ETHER_MAX_LEN || vf->maximum_frame_size > ETHER_MAX_LEN) - return (FALSE); + return (false); - return (TRUE); + return (true); break; case IXGBE_API_VER_1_1: default: /* * 1.1 or later VF versions always work if they aren't using * jumbo frames. */ if (vf->maximum_frame_size <= ETHER_MAX_LEN) - return (TRUE); + return (true); /* * Jumbo frames only work with VFs if the PF is also using jumbo * frames. */ if (adapter->max_frame_size <= ETHER_MAX_LEN) - return (TRUE); + return (true); - return (FALSE); + return (false); } } /* ixgbe_vf_frame_size_compatible */ static void ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf) { ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan); // XXX clear multicast addresses ixgbe_clear_rar(&adapter->hw, vf->rar_index); vf->api_ver = IXGBE_API_VER_UNKNOWN; } /* ixgbe_process_vf_reset */ static void ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf) { struct ixgbe_hw *hw; uint32_t vf_index, vfte; hw = &adapter->hw; vf_index = IXGBE_VF_INDEX(vf->pool); vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index)); vfte |= IXGBE_VF_BIT(vf->pool); IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte); } /* ixgbe_vf_enable_transmit */ static void ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf) { struct ixgbe_hw *hw; uint32_t vf_index, vfre; hw = &adapter->hw; vf_index = IXGBE_VF_INDEX(vf->pool); vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index)); if (ixgbe_vf_frame_size_compatible(adapter, vf)) vfre |= IXGBE_VF_BIT(vf->pool); else vfre &= ~IXGBE_VF_BIT(vf->pool); IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre); } /* ixgbe_vf_enable_receive */ static void ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg) { struct ixgbe_hw *hw; uint32_t ack; uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN]; hw = &adapter->hw; ixgbe_process_vf_reset(adapter, vf); if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) { ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr, - vf->pool, TRUE); + vf->pool, true); ack = IXGBE_VT_MSGTYPE_ACK; } else ack = IXGBE_VT_MSGTYPE_NACK; ixgbe_vf_enable_transmit(adapter, vf); ixgbe_vf_enable_receive(adapter, vf); vf->flags |= IXGBE_VF_CTS; resp[0] = IXGBE_VF_RESET | ack | IXGBE_VT_MSGTYPE_CTS; bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN); resp[3] = hw->mac.mc_filter_type; hw->mbx.ops.write(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool); } /* ixgbe_vf_reset_msg */ static void ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg) { uint8_t *mac; mac = (uint8_t*)&msg[1]; /* Check that the VF has permission to change the MAC address. */ if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) { ixgbe_send_vf_nack(adapter, vf, msg[0]); return; } if (ixgbe_validate_mac_addr(mac) != 0) { ixgbe_send_vf_nack(adapter, vf, msg[0]); return; } bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN); ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr, vf->pool, - TRUE); + true); ixgbe_send_vf_ack(adapter, vf, msg[0]); } /* ixgbe_vf_set_mac */ /* * VF multicast addresses are set by using the appropriate bit in * 1 of 128 32 bit addresses (4096 possible). */ static void ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg) { u16 *list = (u16*)&msg[1]; int entries; u32 vmolr, vec_bit, vec_reg, mta_reg; entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; entries = min(entries, IXGBE_MAX_VF_MC); vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool)); vf->num_mc_hashes = entries; /* Set the appropriate MTA bit */ for (int i = 0; i < entries; i++) { vf->mc_hash[i] = list[i]; vec_reg = (vf->mc_hash[i] >> 5) & 0x7F; vec_bit = vf->mc_hash[i] & 0x1F; mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg)); mta_reg |= (1 << vec_bit); IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg); } vmolr |= IXGBE_VMOLR_ROMPE; IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr); ixgbe_send_vf_ack(adapter, vf, msg[0]); } /* ixgbe_vf_set_mc_addr */ static void ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg) { struct ixgbe_hw *hw; int enable; uint16_t tag; hw = &adapter->hw; enable = IXGBE_VT_MSGINFO(msg[0]); tag = msg[1] & IXGBE_VLVF_VLANID_MASK; if (!(vf->flags & IXGBE_VF_CAP_VLAN)) { ixgbe_send_vf_nack(adapter, vf, msg[0]); return; } /* It is illegal to enable vlan tag 0. */ if (tag == 0 && enable != 0) { ixgbe_send_vf_nack(adapter, vf, msg[0]); return; } ixgbe_set_vfta(hw, tag, vf->pool, enable, false); ixgbe_send_vf_ack(adapter, vf, msg[0]); } /* ixgbe_vf_set_vlan */ static void ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg) { struct ixgbe_hw *hw; uint32_t vf_max_size, pf_max_size, mhadd; hw = &adapter->hw; vf_max_size = msg[1]; if (vf_max_size < ETHER_CRC_LEN) { /* We intentionally ACK invalid LPE requests. */ ixgbe_send_vf_ack(adapter, vf, msg[0]); return; } vf_max_size -= ETHER_CRC_LEN; if (vf_max_size > IXGBE_MAX_FRAME_SIZE) { /* We intentionally ACK invalid LPE requests. */ ixgbe_send_vf_ack(adapter, vf, msg[0]); return; } vf->maximum_frame_size = vf_max_size; ixgbe_update_max_frame(adapter, vf->maximum_frame_size); /* * We might have to disable reception to this VF if the frame size is * not compatible with the config on the PF. */ ixgbe_vf_enable_receive(adapter, vf); mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT; if (pf_max_size < adapter->max_frame_size) { mhadd &= ~IXGBE_MHADD_MFS_MASK; mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT; IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); } ixgbe_send_vf_ack(adapter, vf, msg[0]); } /* ixgbe_vf_set_lpe */ static void ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg) { //XXX implement this ixgbe_send_vf_nack(adapter, vf, msg[0]); } /* ixgbe_vf_set_macvlan */ static void ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg) { switch (msg[1]) { case IXGBE_API_VER_1_0: case IXGBE_API_VER_1_1: vf->api_ver = msg[1]; ixgbe_send_vf_ack(adapter, vf, msg[0]); break; default: vf->api_ver = IXGBE_API_VER_UNKNOWN; ixgbe_send_vf_nack(adapter, vf, msg[0]); break; } } /* ixgbe_vf_api_negotiate */ static void ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg) { struct ixgbe_hw *hw; uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN]; int num_queues; hw = &adapter->hw; /* GET_QUEUES is not supported on pre-1.1 APIs. */ switch (msg[0]) { case IXGBE_API_VER_1_0: case IXGBE_API_VER_UNKNOWN: ixgbe_send_vf_nack(adapter, vf, msg[0]); return; } resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_CTS; num_queues = ixgbe_vf_queues(adapter->iov_mode); resp[IXGBE_VF_TX_QUEUES] = num_queues; resp[IXGBE_VF_RX_QUEUES] = num_queues; resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0); resp[IXGBE_VF_DEF_QUEUE] = 0; hw->mbx.ops.write(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool); } /* ixgbe_vf_get_queues */ static void ixgbe_process_vf_msg(if_ctx_t ctx, struct ixgbe_vf *vf) { struct adapter *adapter = iflib_get_softc(ctx); #ifdef KTR struct ifnet *ifp = iflib_get_ifp(ctx); #endif struct ixgbe_hw *hw; uint32_t msg[IXGBE_VFMAILBOX_SIZE]; int error; hw = &adapter->hw; error = hw->mbx.ops.read(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool); if (error != 0) return; CTR3(KTR_MALLOC, "%s: received msg %x from %d", ifp->if_xname, msg[0], vf->pool); if (msg[0] == IXGBE_VF_RESET) { ixgbe_vf_reset_msg(adapter, vf, msg); return; } if (!(vf->flags & IXGBE_VF_CTS)) { ixgbe_send_vf_nack(adapter, vf, msg[0]); return; } switch (msg[0] & IXGBE_VT_MSG_MASK) { case IXGBE_VF_SET_MAC_ADDR: ixgbe_vf_set_mac(adapter, vf, msg); break; case IXGBE_VF_SET_MULTICAST: ixgbe_vf_set_mc_addr(adapter, vf, msg); break; case IXGBE_VF_SET_VLAN: ixgbe_vf_set_vlan(adapter, vf, msg); break; case IXGBE_VF_SET_LPE: ixgbe_vf_set_lpe(adapter, vf, msg); break; case IXGBE_VF_SET_MACVLAN: ixgbe_vf_set_macvlan(adapter, vf, msg); break; case IXGBE_VF_API_NEGOTIATE: ixgbe_vf_api_negotiate(adapter, vf, msg); break; case IXGBE_VF_GET_QUEUES: ixgbe_vf_get_queues(adapter, vf, msg); break; default: ixgbe_send_vf_nack(adapter, vf, msg[0]); } } /* ixgbe_process_vf_msg */ /* Tasklet for handling VF -> PF mailbox messages */ void ixgbe_handle_mbx(void *context) { if_ctx_t ctx = context; struct adapter *adapter = iflib_get_softc(ctx); struct ixgbe_hw *hw; struct ixgbe_vf *vf; int i; hw = &adapter->hw; for (i = 0; i < adapter->num_vfs; i++) { vf = &adapter->vfs[i]; if (vf->flags & IXGBE_VF_ACTIVE) { if (hw->mbx.ops.check_for_rst(hw, vf->pool) == 0) ixgbe_process_vf_reset(adapter, vf); if (hw->mbx.ops.check_for_msg(hw, vf->pool) == 0) ixgbe_process_vf_msg(ctx, vf); if (hw->mbx.ops.check_for_ack(hw, vf->pool) == 0) ixgbe_process_vf_ack(adapter, vf); } } } /* ixgbe_handle_mbx */ int ixgbe_if_iov_init(if_ctx_t ctx, u16 num_vfs, const nvlist_t *config) { struct adapter *adapter; int retval = 0; adapter = iflib_get_softc(ctx); adapter->iov_mode = IXGBE_NO_VM; if (num_vfs == 0) { /* Would we ever get num_vfs = 0? */ retval = EINVAL; goto err_init_iov; } /* * We've got to reserve a VM's worth of queues for the PF, * thus we go into "64 VF mode" if 32+ VFs are requested. * With 64 VFs, you can only have two queues per VF. * With 32 VFs, you can have up to four queues per VF. */ if (num_vfs >= IXGBE_32_VM) adapter->iov_mode = IXGBE_64_VM; else adapter->iov_mode = IXGBE_32_VM; /* Again, reserving 1 VM's worth of queues for the PF */ adapter->pool = adapter->iov_mode - 1; if ((num_vfs > adapter->pool) || (num_vfs >= IXGBE_64_VM)) { retval = ENOSPC; goto err_init_iov; } adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE_SRIOV, M_NOWAIT | M_ZERO); if (adapter->vfs == NULL) { retval = ENOMEM; goto err_init_iov; } adapter->num_vfs = num_vfs; ixgbe_if_init(adapter->ctx); adapter->feat_en |= IXGBE_FEATURE_SRIOV; return (retval); err_init_iov: adapter->num_vfs = 0; adapter->pool = 0; adapter->iov_mode = IXGBE_NO_VM; return (retval); } /* ixgbe_if_iov_init */ void ixgbe_if_iov_uninit(if_ctx_t ctx) { struct ixgbe_hw *hw; struct adapter *adapter; uint32_t pf_reg, vf_reg; adapter = iflib_get_softc(ctx); hw = &adapter->hw; /* Enable rx/tx for the PF and disable it for all VFs. */ pf_reg = IXGBE_VF_INDEX(adapter->pool); IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), IXGBE_VF_BIT(adapter->pool)); IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg), IXGBE_VF_BIT(adapter->pool)); if (pf_reg == 0) vf_reg = 1; else vf_reg = 0; IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0); IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0); IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0); free(adapter->vfs, M_IXGBE_SRIOV); adapter->vfs = NULL; adapter->num_vfs = 0; adapter->feat_en &= ~IXGBE_FEATURE_SRIOV; } /* ixgbe_if_iov_uninit */ static void ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf) { struct ixgbe_hw *hw; uint32_t vf_index, pfmbimr; hw = &adapter->hw; if (!(vf->flags & IXGBE_VF_ACTIVE)) return; vf_index = IXGBE_VF_INDEX(vf->pool); pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index)); pfmbimr |= IXGBE_VF_BIT(vf->pool); IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr); ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag); // XXX multicast addresses if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) { ixgbe_set_rar(&adapter->hw, vf->rar_index, - vf->ether_addr, vf->pool, TRUE); + vf->ether_addr, vf->pool, true); } ixgbe_vf_enable_transmit(adapter, vf); ixgbe_vf_enable_receive(adapter, vf); ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG); } /* ixgbe_init_vf */ void ixgbe_initialize_iov(struct adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie; int i; if (adapter->iov_mode == IXGBE_NO_VM) return; /* RMW appropriate registers based on IOV mode */ /* Read... */ mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); /* Modify... */ mrqc &= ~IXGBE_MRQC_MRQE_MASK; mtqc = IXGBE_MTQC_VT_ENA; /* No initial MTQC read needed */ gcr_ext |= IXGBE_GCR_EXT_MSIX_EN; gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK; gpie &= ~IXGBE_GPIE_VTMODE_MASK; switch (adapter->iov_mode) { case IXGBE_64_VM: mrqc |= IXGBE_MRQC_VMDQRSS64EN; mtqc |= IXGBE_MTQC_64VF; gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64; gpie |= IXGBE_GPIE_VTMODE_64; break; case IXGBE_32_VM: mrqc |= IXGBE_MRQC_VMDQRSS32EN; mtqc |= IXGBE_MTQC_32VF; gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32; gpie |= IXGBE_GPIE_VTMODE_32; break; default: panic("Unexpected SR-IOV mode %d", adapter->iov_mode); } /* Write... */ IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc); IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); /* Enable rx/tx for the PF. */ vf_reg = IXGBE_VF_INDEX(adapter->pool); IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), IXGBE_VF_BIT(adapter->pool)); IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), IXGBE_VF_BIT(adapter->pool)); /* Allow VM-to-VM communication. */ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT); IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl); for (i = 0; i < adapter->num_vfs; i++) ixgbe_init_vf(adapter, &adapter->vfs[i]); } /* ixgbe_initialize_iov */ /* Check the max frame setting of all active VF's */ void ixgbe_recalculate_max_frame(struct adapter *adapter) { struct ixgbe_vf *vf; for (int i = 0; i < adapter->num_vfs; i++) { vf = &adapter->vfs[i]; if (vf->flags & IXGBE_VF_ACTIVE) ixgbe_update_max_frame(adapter, vf->maximum_frame_size); } } /* ixgbe_recalculate_max_frame */ int ixgbe_if_iov_vf_add(if_ctx_t ctx, u16 vfnum, const nvlist_t *config) { struct adapter *adapter; struct ixgbe_vf *vf; const void *mac; adapter = iflib_get_softc(ctx); KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d", vfnum, adapter->num_vfs)); vf = &adapter->vfs[vfnum]; vf->pool= vfnum; /* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */ vf->rar_index = vfnum + 1; vf->default_vlan = 0; vf->maximum_frame_size = ETHER_MAX_LEN; ixgbe_update_max_frame(adapter, vf->maximum_frame_size); if (nvlist_exists_binary(config, "mac-addr")) { mac = nvlist_get_binary(config, "mac-addr", NULL); bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN); if (nvlist_get_bool(config, "allow-set-mac")) vf->flags |= IXGBE_VF_CAP_MAC; } else /* * If the administrator has not specified a MAC address then * we must allow the VF to choose one. */ vf->flags |= IXGBE_VF_CAP_MAC; vf->flags |= IXGBE_VF_ACTIVE; ixgbe_init_vf(adapter, vf); return (0); } /* ixgbe_if_iov_vf_add */ #else void ixgbe_handle_mbx(void *context) { UNREFERENCED_PARAMETER(context); } /* ixgbe_handle_mbx */ #endif diff --git a/sys/dev/ixgbe/ix_txrx.c b/sys/dev/ixgbe/ix_txrx.c index fe38919fc594..2cecf79b3422 100644 --- a/sys/dev/ixgbe/ix_txrx.c +++ b/sys/dev/ixgbe/ix_txrx.c @@ -1,542 +1,542 @@ /****************************************************************************** Copyright (c) 2001-2017, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef IXGBE_STANDALONE_BUILD #include "opt_inet.h" #include "opt_inet6.h" #include "opt_rss.h" #endif #include "ixgbe.h" /************************************************************************ * Local Function prototypes ************************************************************************/ static int ixgbe_isc_txd_encap(void *arg, if_pkt_info_t pi); static void ixgbe_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx); static int ixgbe_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear); static void ixgbe_isc_rxd_refill(void *arg, if_rxd_update_t iru); static void ixgbe_isc_rxd_flush(void *arg, uint16_t qsidx, uint8_t flidx __unused, qidx_t pidx); static int ixgbe_isc_rxd_available(void *arg, uint16_t qsidx, qidx_t pidx, qidx_t budget); static int ixgbe_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri); static void ixgbe_rx_checksum(uint32_t staterr, if_rxd_info_t ri, uint32_t ptype); static int ixgbe_tx_ctx_setup(struct ixgbe_adv_tx_context_desc *, if_pkt_info_t); extern void ixgbe_if_enable_intr(if_ctx_t ctx); static int ixgbe_determine_rsstype(uint16_t pkt_info); struct if_txrx ixgbe_txrx = { .ift_txd_encap = ixgbe_isc_txd_encap, .ift_txd_flush = ixgbe_isc_txd_flush, .ift_txd_credits_update = ixgbe_isc_txd_credits_update, .ift_rxd_available = ixgbe_isc_rxd_available, .ift_rxd_pkt_get = ixgbe_isc_rxd_pkt_get, .ift_rxd_refill = ixgbe_isc_rxd_refill, .ift_rxd_flush = ixgbe_isc_rxd_flush, .ift_legacy_intr = NULL }; /************************************************************************ * ixgbe_tx_ctx_setup * * Advanced Context Descriptor setup for VLAN, CSUM or TSO * ************************************************************************/ static int ixgbe_tx_ctx_setup(struct ixgbe_adv_tx_context_desc *TXD, if_pkt_info_t pi) { uint32_t vlan_macip_lens, type_tucmd_mlhl; uint32_t olinfo_status, mss_l4len_idx, pktlen, offload; u8 ehdrlen; - offload = TRUE; + offload = true; olinfo_status = mss_l4len_idx = vlan_macip_lens = type_tucmd_mlhl = 0; /* VLAN MACLEN IPLEN */ vlan_macip_lens |= (htole16(pi->ipi_vtag) << IXGBE_ADVTXD_VLAN_SHIFT); /* * Some of our VF devices need a context descriptor for every * packet. That means the ehdrlen needs to be non-zero in order * for the host driver not to flag a malicious event. The stack * will most likely populate this for all other reasons of why * this function was called. */ if (pi->ipi_ehdrlen == 0) { ehdrlen = ETHER_HDR_LEN; ehdrlen += (pi->ipi_vtag != 0) ? ETHER_VLAN_ENCAP_LEN : 0; } else ehdrlen = pi->ipi_ehdrlen; vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT; pktlen = pi->ipi_len; /* First check if TSO is to be used */ if (pi->ipi_csum_flags & CSUM_TSO) { /* This is used in the transmit desc in encap */ pktlen = pi->ipi_len - ehdrlen - pi->ipi_ip_hlen - pi->ipi_tcp_hlen; mss_l4len_idx |= (pi->ipi_tso_segsz << IXGBE_ADVTXD_MSS_SHIFT); mss_l4len_idx |= (pi->ipi_tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT); } olinfo_status |= pktlen << IXGBE_ADVTXD_PAYLEN_SHIFT; if (pi->ipi_flags & IPI_TX_IPV4) { type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; /* Tell transmit desc to also do IPv4 checksum. */ if (pi->ipi_csum_flags & (CSUM_IP|CSUM_TSO)) olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8; } else if (pi->ipi_flags & IPI_TX_IPV6) type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6; else - offload = FALSE; + offload = false; vlan_macip_lens |= pi->ipi_ip_hlen; switch (pi->ipi_ipproto) { case IPPROTO_TCP: if (pi->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP | CSUM_TSO)) type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; else - offload = FALSE; + offload = false; break; case IPPROTO_UDP: if (pi->ipi_csum_flags & (CSUM_IP_UDP | CSUM_IP6_UDP)) type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP; else - offload = FALSE; + offload = false; break; case IPPROTO_SCTP: if (pi->ipi_csum_flags & (CSUM_IP_SCTP | CSUM_IP6_SCTP)) type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; else - offload = FALSE; + offload = false; break; default: - offload = FALSE; + offload = false; break; } /* Insert L4 checksum into data descriptors */ if (offload) olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8; type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; /* Now copy bits into descriptor */ TXD->vlan_macip_lens = htole32(vlan_macip_lens); TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); TXD->seqnum_seed = htole32(0); TXD->mss_l4len_idx = htole32(mss_l4len_idx); return (olinfo_status); } /* ixgbe_tx_ctx_setup */ /************************************************************************ * ixgbe_isc_txd_encap ************************************************************************/ static int ixgbe_isc_txd_encap(void *arg, if_pkt_info_t pi) { struct adapter *sc = arg; if_softc_ctx_t scctx = sc->shared; struct ix_tx_queue *que = &sc->tx_queues[pi->ipi_qsidx]; struct tx_ring *txr = &que->txr; int nsegs = pi->ipi_nsegs; bus_dma_segment_t *segs = pi->ipi_segs; union ixgbe_adv_tx_desc *txd = NULL; struct ixgbe_adv_tx_context_desc *TXD; int i, j, first, pidx_last; uint32_t olinfo_status, cmd, flags; qidx_t ntxd; cmd = (IXGBE_ADVTXD_DTYP_DATA | IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT); if (pi->ipi_mflags & M_VLANTAG) cmd |= IXGBE_ADVTXD_DCMD_VLE; i = first = pi->ipi_pidx; flags = (pi->ipi_flags & IPI_TX_INTR) ? IXGBE_TXD_CMD_RS : 0; ntxd = scctx->isc_ntxd[0]; TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[first]; if ((pi->ipi_csum_flags & CSUM_OFFLOAD) || (sc->feat_en & IXGBE_FEATURE_NEEDS_CTXD) || pi->ipi_vtag) { /********************************************* * Set up the appropriate offload context * this will consume the first descriptor *********************************************/ olinfo_status = ixgbe_tx_ctx_setup(TXD, pi); if (pi->ipi_csum_flags & CSUM_TSO) { cmd |= IXGBE_ADVTXD_DCMD_TSE; ++txr->tso_tx; } if (++i == scctx->isc_ntxd[0]) i = 0; } else { /* Indicate the whole packet as payload when not doing TSO */ olinfo_status = pi->ipi_len << IXGBE_ADVTXD_PAYLEN_SHIFT; } olinfo_status |= IXGBE_ADVTXD_CC; pidx_last = 0; for (j = 0; j < nsegs; j++) { bus_size_t seglen; txd = &txr->tx_base[i]; seglen = segs[j].ds_len; txd->read.buffer_addr = htole64(segs[j].ds_addr); txd->read.cmd_type_len = htole32(cmd | seglen); txd->read.olinfo_status = htole32(olinfo_status); pidx_last = i; if (++i == scctx->isc_ntxd[0]) { i = 0; } } if (flags) { txr->tx_rsq[txr->tx_rs_pidx] = pidx_last; txr->tx_rs_pidx = (txr->tx_rs_pidx + 1) & (ntxd - 1); } txd->read.cmd_type_len |= htole32(IXGBE_TXD_CMD_EOP | flags); txr->bytes += pi->ipi_len; pi->ipi_new_pidx = i; ++txr->total_packets; return (0); } /* ixgbe_isc_txd_encap */ /************************************************************************ * ixgbe_isc_txd_flush ************************************************************************/ static void ixgbe_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx) { struct adapter *sc = arg; struct ix_tx_queue *que = &sc->tx_queues[txqid]; struct tx_ring *txr = &que->txr; IXGBE_WRITE_REG(&sc->hw, txr->tail, pidx); } /* ixgbe_isc_txd_flush */ /************************************************************************ * ixgbe_isc_txd_credits_update ************************************************************************/ static int ixgbe_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear) { struct adapter *sc = arg; if_softc_ctx_t scctx = sc->shared; struct ix_tx_queue *que = &sc->tx_queues[txqid]; struct tx_ring *txr = &que->txr; qidx_t processed = 0; int updated; qidx_t cur, prev, ntxd, rs_cidx; int32_t delta; uint8_t status; rs_cidx = txr->tx_rs_cidx; if (rs_cidx == txr->tx_rs_pidx) return (0); cur = txr->tx_rsq[rs_cidx]; status = txr->tx_base[cur].wb.status; updated = !!(status & IXGBE_TXD_STAT_DD); if (!updated) return (0); /* If clear is false just let caller know that there * are descriptors to reclaim */ if (!clear) return (1); prev = txr->tx_cidx_processed; ntxd = scctx->isc_ntxd[0]; do { MPASS(prev != cur); delta = (int32_t)cur - (int32_t)prev; if (delta < 0) delta += ntxd; MPASS(delta > 0); processed += delta; prev = cur; rs_cidx = (rs_cidx + 1) & (ntxd - 1); if (rs_cidx == txr->tx_rs_pidx) break; cur = txr->tx_rsq[rs_cidx]; status = txr->tx_base[cur].wb.status; } while ((status & IXGBE_TXD_STAT_DD)); txr->tx_rs_cidx = rs_cidx; txr->tx_cidx_processed = prev; return (processed); } /* ixgbe_isc_txd_credits_update */ /************************************************************************ * ixgbe_isc_rxd_refill ************************************************************************/ static void ixgbe_isc_rxd_refill(void *arg, if_rxd_update_t iru) { struct adapter *sc = arg; struct ix_rx_queue *que = &sc->rx_queues[iru->iru_qsidx]; struct rx_ring *rxr = &que->rxr; uint64_t *paddrs; int i; uint32_t next_pidx, pidx; uint16_t count; paddrs = iru->iru_paddrs; pidx = iru->iru_pidx; count = iru->iru_count; for (i = 0, next_pidx = pidx; i < count; i++) { rxr->rx_base[next_pidx].read.pkt_addr = htole64(paddrs[i]); if (++next_pidx == sc->shared->isc_nrxd[0]) next_pidx = 0; } } /* ixgbe_isc_rxd_refill */ /************************************************************************ * ixgbe_isc_rxd_flush ************************************************************************/ static void ixgbe_isc_rxd_flush(void *arg, uint16_t qsidx, uint8_t flidx __unused, qidx_t pidx) { struct adapter *sc = arg; struct ix_rx_queue *que = &sc->rx_queues[qsidx]; struct rx_ring *rxr = &que->rxr; IXGBE_WRITE_REG(&sc->hw, rxr->tail, pidx); } /* ixgbe_isc_rxd_flush */ /************************************************************************ * ixgbe_isc_rxd_available ************************************************************************/ static int ixgbe_isc_rxd_available(void *arg, uint16_t qsidx, qidx_t pidx, qidx_t budget) { struct adapter *sc = arg; struct ix_rx_queue *que = &sc->rx_queues[qsidx]; struct rx_ring *rxr = &que->rxr; union ixgbe_adv_rx_desc *rxd; uint32_t staterr; int cnt, i, nrxd; nrxd = sc->shared->isc_nrxd[0]; for (cnt = 0, i = pidx; cnt < nrxd && cnt <= budget;) { rxd = &rxr->rx_base[i]; staterr = le32toh(rxd->wb.upper.status_error); if ((staterr & IXGBE_RXD_STAT_DD) == 0) break; if (++i == nrxd) i = 0; if (staterr & IXGBE_RXD_STAT_EOP) cnt++; } return (cnt); } /* ixgbe_isc_rxd_available */ /************************************************************************ * ixgbe_isc_rxd_pkt_get * * Routine sends data which has been dma'ed into host memory * to upper layer. Initialize ri structure. * * Returns 0 upon success, errno on failure ************************************************************************/ static int ixgbe_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri) { struct adapter *adapter = arg; struct ix_rx_queue *que = &adapter->rx_queues[ri->iri_qsidx]; struct rx_ring *rxr = &que->rxr; struct ifnet *ifp = iflib_get_ifp(adapter->ctx); union ixgbe_adv_rx_desc *rxd; uint16_t pkt_info, len, cidx, i; uint16_t vtag = 0; uint32_t ptype; uint32_t staterr = 0; bool eop; i = 0; cidx = ri->iri_cidx; do { rxd = &rxr->rx_base[cidx]; staterr = le32toh(rxd->wb.upper.status_error); pkt_info = le16toh(rxd->wb.lower.lo_dword.hs_rss.pkt_info); /* Error Checking then decrement count */ MPASS ((staterr & IXGBE_RXD_STAT_DD) != 0); len = le16toh(rxd->wb.upper.length); ptype = le32toh(rxd->wb.lower.lo_dword.data) & IXGBE_RXDADV_PKTTYPE_MASK; ri->iri_len += len; rxr->bytes += len; rxd->wb.upper.status_error = 0; eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0); if ( (rxr->vtag_strip) && (staterr & IXGBE_RXD_STAT_VP) ) { vtag = le16toh(rxd->wb.upper.vlan); } else { vtag = 0; } /* Make sure bad packets are discarded */ if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) { if (adapter->feat_en & IXGBE_FEATURE_VF) if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); rxr->rx_discarded++; return (EBADMSG); } ri->iri_frags[i].irf_flid = 0; ri->iri_frags[i].irf_idx = cidx; ri->iri_frags[i].irf_len = len; if (++cidx == adapter->shared->isc_nrxd[0]) cidx = 0; i++; /* even a 16K packet shouldn't consume more than 8 clusters */ MPASS(i < 9); } while (!eop); rxr->rx_packets++; rxr->packets++; rxr->rx_bytes += ri->iri_len; if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) ixgbe_rx_checksum(staterr, ri, ptype); ri->iri_flowid = le32toh(rxd->wb.lower.hi_dword.rss); ri->iri_rsstype = ixgbe_determine_rsstype(pkt_info); if ((adapter->feat_en & IXGBE_FEATURE_RSS) == 0) { if (ri->iri_rsstype == M_HASHTYPE_OPAQUE) ri->iri_rsstype = M_HASHTYPE_NONE; else ri->iri_rsstype = M_HASHTYPE_OPAQUE_HASH; } ri->iri_vtag = vtag; ri->iri_nfrags = i; if (vtag) ri->iri_flags |= M_VLANTAG; return (0); } /* ixgbe_isc_rxd_pkt_get */ /************************************************************************ * ixgbe_rx_checksum * * Verify that the hardware indicated that the checksum is valid. * Inform the stack about the status of checksum so that stack * doesn't spend time verifying the checksum. ************************************************************************/ static void ixgbe_rx_checksum(uint32_t staterr, if_rxd_info_t ri, uint32_t ptype) { uint16_t status = (uint16_t)staterr; uint8_t errors = (uint8_t)(staterr >> 24); /* If there is a layer 3 or 4 error we are done */ if (__predict_false(errors & (IXGBE_RXD_ERR_IPE | IXGBE_RXD_ERR_TCPE))) return; /* IP Checksum Good */ if (status & IXGBE_RXD_STAT_IPCS) ri->iri_csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID); /* Valid L4E checksum */ if (__predict_true(status & IXGBE_RXD_STAT_L4CS)) { /* SCTP header present. */ if (__predict_false((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 && (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)) { ri->iri_csum_flags |= CSUM_SCTP_VALID; } else { ri->iri_csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; ri->iri_csum_data = htons(0xffff); } } } /* ixgbe_rx_checksum */ /************************************************************************ * ixgbe_determine_rsstype * * Parse the packet type to determine the appropriate hash ************************************************************************/ static int ixgbe_determine_rsstype(uint16_t pkt_info) { switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) { case IXGBE_RXDADV_RSSTYPE_IPV4_TCP: return M_HASHTYPE_RSS_TCP_IPV4; case IXGBE_RXDADV_RSSTYPE_IPV4: return M_HASHTYPE_RSS_IPV4; case IXGBE_RXDADV_RSSTYPE_IPV6_TCP: return M_HASHTYPE_RSS_TCP_IPV6; case IXGBE_RXDADV_RSSTYPE_IPV6_EX: return M_HASHTYPE_RSS_IPV6_EX; case IXGBE_RXDADV_RSSTYPE_IPV6: return M_HASHTYPE_RSS_IPV6; case IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX: return M_HASHTYPE_RSS_TCP_IPV6_EX; case IXGBE_RXDADV_RSSTYPE_IPV4_UDP: return M_HASHTYPE_RSS_UDP_IPV4; case IXGBE_RXDADV_RSSTYPE_IPV6_UDP: return M_HASHTYPE_RSS_UDP_IPV6; case IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX: return M_HASHTYPE_RSS_UDP_IPV6_EX; default: return M_HASHTYPE_OPAQUE; } } /* ixgbe_determine_rsstype */ diff --git a/sys/dev/ixgbe/ixgbe.h b/sys/dev/ixgbe/ixgbe.h index 31d5cc41c066..93bfff427a27 100644 --- a/sys/dev/ixgbe/ixgbe.h +++ b/sys/dev/ixgbe/ixgbe.h @@ -1,594 +1,594 @@ /****************************************************************************** SPDX-License-Identifier: BSD-3-Clause Copyright (c) 2001-2017, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _IXGBE_H_ #define _IXGBE_H_ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ixgbe_api.h" #include "ixgbe_common.h" #include "ixgbe_phy.h" #include "ixgbe_vf.h" #include "ixgbe_features.h" /* Tunables */ /* * TxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the * number of transmit descriptors allocated by the driver. Increasing this * value allows the driver to queue more transmits. Each descriptor is 16 * bytes. Performance tests have show the 2K value to be optimal for top * performance. */ #define DEFAULT_TXD 2048 #define PERFORM_TXD 2048 #define MAX_TXD 4096 #define MIN_TXD 64 /* * RxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the * number of receive descriptors allocated for each RX queue. Increasing this * value allows the driver to buffer more incoming packets. Each descriptor * is 16 bytes. A receive buffer is also allocated for each descriptor. * * Note: with 8 rings and a dual port card, it is possible to bump up * against the system mbuf pool limit, you can tune nmbclusters * to adjust for this. */ #define DEFAULT_RXD 2048 #define PERFORM_RXD 2048 #define MAX_RXD 4096 #define MIN_RXD 64 /* Alignment for rings */ #define DBA_ALIGN 128 /* * This is the max watchdog interval, ie. the time that can * pass between any two TX clean operations, such only happening * when the TX hardware is functioning. */ #define IXGBE_WATCHDOG (10 * hz) /* * This parameters control when the driver calls the routine to reclaim * transmit descriptors. */ #define IXGBE_TX_CLEANUP_THRESHOLD(_a) ((_a)->num_tx_desc / 8) #define IXGBE_TX_OP_THRESHOLD(_a) ((_a)->num_tx_desc / 32) /* These defines are used in MTU calculations */ #define IXGBE_MAX_FRAME_SIZE 9728 #define IXGBE_MTU_HDR (ETHER_HDR_LEN + ETHER_CRC_LEN) #define IXGBE_MTU_HDR_VLAN (ETHER_HDR_LEN + ETHER_CRC_LEN + \ ETHER_VLAN_ENCAP_LEN) #define IXGBE_MAX_MTU (IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR) #define IXGBE_MAX_MTU_VLAN (IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR_VLAN) /* Flow control constants */ #define IXGBE_FC_PAUSE 0xFFFF #define IXGBE_FC_HI 0x20000 #define IXGBE_FC_LO 0x10000 /* * Used for optimizing small rx mbufs. Effort is made to keep the copy * small and aligned for the CPU L1 cache. * * MHLEN is typically 168 bytes, giving us 8-byte alignment. Getting * 32 byte alignment needed for the fast bcopy results in 8 bytes being * wasted. Getting 64 byte alignment, which _should_ be ideal for * modern Intel CPUs, results in 40 bytes wasted and a significant drop * in observed efficiency of the optimization, 97.9% -> 81.8%. */ #if __FreeBSD_version < 1002000 #define MPKTHSIZE (sizeof(struct m_hdr) + sizeof(struct pkthdr)) #endif #define IXGBE_RX_COPY_HDR_PADDED ((((MPKTHSIZE - 1) / 32) + 1) * 32) #define IXGBE_RX_COPY_LEN (MSIZE - IXGBE_RX_COPY_HDR_PADDED) #define IXGBE_RX_COPY_ALIGN (IXGBE_RX_COPY_HDR_PADDED - MPKTHSIZE) /* Keep older OS drivers building... */ #if !defined(SYSCTL_ADD_UQUAD) #define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD #endif /* Defines for printing debug information */ #define DEBUG_INIT 0 #define DEBUG_IOCTL 0 #define DEBUG_HW 0 #define INIT_DEBUGOUT(S) if (DEBUG_INIT) printf(S "\n") #define INIT_DEBUGOUT1(S, A) if (DEBUG_INIT) printf(S "\n", A) #define INIT_DEBUGOUT2(S, A, B) if (DEBUG_INIT) printf(S "\n", A, B) #define IOCTL_DEBUGOUT(S) if (DEBUG_IOCTL) printf(S "\n") #define IOCTL_DEBUGOUT1(S, A) if (DEBUG_IOCTL) printf(S "\n", A) #define IOCTL_DEBUGOUT2(S, A, B) if (DEBUG_IOCTL) printf(S "\n", A, B) #define HW_DEBUGOUT(S) if (DEBUG_HW) printf(S "\n") #define HW_DEBUGOUT1(S, A) if (DEBUG_HW) printf(S "\n", A) #define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B) #define MAX_NUM_MULTICAST_ADDRESSES 128 #define IXGBE_82598_SCATTER 100 #define IXGBE_82599_SCATTER 32 #define IXGBE_TSO_SIZE 262140 #define IXGBE_RX_HDR 128 #define IXGBE_VFTA_SIZE 128 #define IXGBE_BR_SIZE 4096 #define IXGBE_QUEUE_MIN_FREE 32 #define IXGBE_MAX_TX_BUSY 10 #define IXGBE_QUEUE_HUNG 0x80000000 #define IXGBE_EITR_DEFAULT 128 /* Supported offload bits in mbuf flag */ #if __FreeBSD_version >= 1000000 #define CSUM_OFFLOAD (CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP| \ CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \ CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP) #elif __FreeBSD_version >= 800000 #define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP) #else #define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP) #endif #define IXGBE_CAPS (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_TSO | \ IFCAP_LRO | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO | \ IFCAP_VLAN_HWCSUM | IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU | \ IFCAP_VLAN_HWFILTER | IFCAP_WOL) /* Backward compatibility items for very old versions */ #ifndef pci_find_cap #define pci_find_cap pci_find_extcap #endif #ifndef DEVMETHOD_END #define DEVMETHOD_END { NULL, NULL } #endif /* * Interrupt Moderation parameters */ #define IXGBE_LOW_LATENCY 128 #define IXGBE_AVE_LATENCY 400 #define IXGBE_BULK_LATENCY 1200 /* Using 1FF (the max value), the interval is ~1.05ms */ #define IXGBE_LINK_ITR_QUANTA 0x1FF #define IXGBE_LINK_ITR ((IXGBE_LINK_ITR_QUANTA << 3) & \ IXGBE_EITR_ITR_INT_MASK) /************************************************************************ * vendor_info_array * * Contains the list of Subvendor/Subdevice IDs on * which the driver should load. ************************************************************************/ typedef struct _ixgbe_vendor_info_t { unsigned int vendor_id; unsigned int device_id; unsigned int subvendor_id; unsigned int subdevice_id; unsigned int index; } ixgbe_vendor_info_t; struct ixgbe_bp_data { u32 low; u32 high; u32 log; }; /* */ struct ixgbe_dma_alloc { bus_addr_t dma_paddr; caddr_t dma_vaddr; bus_dma_tag_t dma_tag; bus_dmamap_t dma_map; bus_dma_segment_t dma_seg; bus_size_t dma_size; int dma_nseg; }; struct ixgbe_mc_addr { u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; u32 vmdq; }; /* * The transmit ring, one per queue */ struct tx_ring { struct adapter *adapter; union ixgbe_adv_tx_desc *tx_base; uint64_t tx_paddr; u32 tail; qidx_t *tx_rsq; qidx_t tx_rs_cidx; qidx_t tx_rs_pidx; qidx_t tx_cidx_processed; uint8_t me; /* Flow Director */ u16 atr_sample; u16 atr_count; u32 bytes; /* used for AIM */ u32 packets; /* Soft Stats */ u64 tso_tx; u64 total_packets; }; /* * The Receive ring, one per rx queue */ struct rx_ring { struct ix_rx_queue *que; struct adapter *adapter; u32 me; u32 tail; union ixgbe_adv_rx_desc *rx_base; bool hw_rsc; bool vtag_strip; uint64_t rx_paddr; bus_dma_tag_t ptag; u32 bytes; /* Used for AIM calc */ u32 packets; /* Soft stats */ u64 rx_irq; u64 rx_copies; u64 rx_packets; u64 rx_bytes; u64 rx_discarded; u64 rsc_num; /* Flow Director */ u64 flm; }; /* * Driver queue struct: this is the interrupt container * for the associated tx and rx ring. */ struct ix_rx_queue { struct adapter *adapter; u32 msix; /* This queue's MSIX vector */ u32 eitr_setting; struct resource *res; void *tag; int busy; struct rx_ring rxr; struct if_irq que_irq; u64 irqs; }; struct ix_tx_queue { struct adapter *adapter; u32 msix; /* This queue's MSIX vector */ struct tx_ring txr; }; #define IXGBE_MAX_VF_MC 30 /* Max number of multicast entries */ struct ixgbe_vf { u_int pool; u_int rar_index; u_int maximum_frame_size; uint32_t flags; uint8_t ether_addr[ETHER_ADDR_LEN]; uint16_t mc_hash[IXGBE_MAX_VF_MC]; uint16_t num_mc_hashes; uint16_t default_vlan; uint16_t vlan_tag; uint16_t api_ver; }; /* Our adapter structure */ struct adapter { struct ixgbe_hw hw; struct ixgbe_osdep osdep; if_ctx_t ctx; if_softc_ctx_t shared; #define num_tx_queues shared->isc_ntxqsets #define num_rx_queues shared->isc_nrxqsets #define max_frame_size shared->isc_max_frame_size #define intr_type shared->isc_intr device_t dev; struct ifnet *ifp; struct resource *pci_mem; /* * Interrupt resources: this set is * either used for legacy, or for Link * when doing MSI-X */ struct if_irq irq; void *tag; struct resource *res; struct ifmedia *media; int if_flags; int msix; u16 num_vlans; /* * Shadow VFTA table, this is needed because * the real vlan filter table gets cleared during * a soft reset and the driver needs to be able * to repopulate it. */ u32 shadow_vfta[IXGBE_VFTA_SIZE]; /* Info about the interface */ int advertise; /* link speeds */ int enable_aim; /* adaptive interrupt moderation */ bool link_active; u16 num_segs; u32 link_speed; bool link_up; u32 vector; u16 dmac; u32 phy_layer; /* Power management-related */ bool wol_support; u32 wufc; /* Mbuf cluster size */ u32 rx_mbuf_sz; /* Support for pluggable optics */ bool sfp_probe; /* Flow Director */ int fdir_reinit; u32 task_requests; /* * Queues: * This is the irq holder, it has * and RX/TX pair or rings associated * with it. */ struct ix_tx_queue *tx_queues; struct ix_rx_queue *rx_queues; /* Multicast array memory */ struct ixgbe_mc_addr *mta; /* SR-IOV */ int iov_mode; int num_vfs; int pool; struct ixgbe_vf *vfs; /* Bypass */ struct ixgbe_bp_data bypass; /* Misc stats maintained by the driver */ unsigned long dropped_pkts; unsigned long mbuf_header_failed; unsigned long mbuf_packet_failed; unsigned long watchdog_events; unsigned long link_irq; union { struct ixgbe_hw_stats pf; struct ixgbevf_hw_stats vf; } stats; #if __FreeBSD_version >= 1100036 /* counter(9) stats */ u64 ipackets; u64 ierrors; u64 opackets; u64 oerrors; u64 ibytes; u64 obytes; u64 imcasts; u64 omcasts; u64 iqdrops; u64 noproto; #endif /* Feature capable/enabled flags. See ixgbe_features.h */ u32 feat_cap; u32 feat_en; }; /* Precision Time Sync (IEEE 1588) defines */ #define ETHERTYPE_IEEE1588 0x88F7 #define PICOSECS_PER_TICK 20833 #define TSYNC_UDP_PORT 319 /* UDP port for the protocol */ #define IXGBE_ADVTXD_TSTAMP 0x00080000 /* For backward compatibility */ #if !defined(PCIER_LINK_STA) #define PCIER_LINK_STA PCIR_EXPRESS_LINK_STA #endif /* Stats macros */ #if __FreeBSD_version >= 1100036 #define IXGBE_SET_IPACKETS(sc, count) (sc)->ipackets = (count) #define IXGBE_SET_IERRORS(sc, count) (sc)->ierrors = (count) #define IXGBE_SET_OPACKETS(sc, count) (sc)->opackets = (count) #define IXGBE_SET_OERRORS(sc, count) (sc)->oerrors = (count) #define IXGBE_SET_COLLISIONS(sc, count) #define IXGBE_SET_IBYTES(sc, count) (sc)->ibytes = (count) #define IXGBE_SET_OBYTES(sc, count) (sc)->obytes = (count) #define IXGBE_SET_IMCASTS(sc, count) (sc)->imcasts = (count) #define IXGBE_SET_OMCASTS(sc, count) (sc)->omcasts = (count) #define IXGBE_SET_IQDROPS(sc, count) (sc)->iqdrops = (count) #else #define IXGBE_SET_IPACKETS(sc, count) (sc)->ifp->if_ipackets = (count) #define IXGBE_SET_IERRORS(sc, count) (sc)->ifp->if_ierrors = (count) #define IXGBE_SET_OPACKETS(sc, count) (sc)->ifp->if_opackets = (count) #define IXGBE_SET_OERRORS(sc, count) (sc)->ifp->if_oerrors = (count) #define IXGBE_SET_COLLISIONS(sc, count) (sc)->ifp->if_collisions = (count) #define IXGBE_SET_IBYTES(sc, count) (sc)->ifp->if_ibytes = (count) #define IXGBE_SET_OBYTES(sc, count) (sc)->ifp->if_obytes = (count) #define IXGBE_SET_IMCASTS(sc, count) (sc)->ifp->if_imcasts = (count) #define IXGBE_SET_OMCASTS(sc, count) (sc)->ifp->if_omcasts = (count) #define IXGBE_SET_IQDROPS(sc, count) (sc)->ifp->if_iqdrops = (count) #endif /* External PHY register addresses */ #define IXGBE_PHY_CURRENT_TEMP 0xC820 #define IXGBE_PHY_OVERTEMP_STATUS 0xC830 /* Sysctl help messages; displayed with sysctl -d */ #define IXGBE_SYSCTL_DESC_ADV_SPEED \ "\nControl advertised link speed using these flags:\n" \ "\t0x1 - advertise 100M\n" \ "\t0x2 - advertise 1G\n" \ "\t0x4 - advertise 10G\n" \ "\t0x8 - advertise 10M\n\n" \ "\t100M and 10M are only supported on certain adapters.\n" #define IXGBE_SYSCTL_DESC_SET_FC \ "\nSet flow control mode using these values:\n" \ "\t0 - off\n" \ "\t1 - rx pause\n" \ "\t2 - tx pause\n" \ "\t3 - tx and rx pause" #define IXGBE_SYSCTL_DESC_RX_ERRS \ "\nSum of the following RX errors counters:\n" \ " * CRC errors,\n" \ " * illegal byte error count,\n" \ " * checksum error count,\n" \ " * missed packet count,\n" \ " * length error count,\n" \ " * undersized packets count,\n" \ " * fragmented packets count,\n" \ " * oversized packets count,\n" \ " * jabber count." /* Workaround to make 8.0 buildable */ #if __FreeBSD_version >= 800000 && __FreeBSD_version < 800504 static __inline int drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br) { #ifdef ALTQ if (ALTQ_IS_ENABLED(&ifp->if_snd)) return (1); #endif return (!buf_ring_empty(br)); } #endif /* * This checks for a zero mac addr, something that will be likely * unless the Admin on the Host has created one. */ static inline bool ixv_check_ether_addr(u8 *addr) { - bool status = TRUE; + bool status = true; if ((addr[0] == 0 && addr[1]== 0 && addr[2] == 0 && addr[3] == 0 && addr[4]== 0 && addr[5] == 0)) - status = FALSE; + status = false; return (status); } /* Shared Prototypes */ int ixgbe_allocate_queues(struct adapter *); int ixgbe_setup_transmit_structures(struct adapter *); void ixgbe_free_transmit_structures(struct adapter *); int ixgbe_setup_receive_structures(struct adapter *); void ixgbe_free_receive_structures(struct adapter *); int ixgbe_get_regs(SYSCTL_HANDLER_ARGS); #include "ixgbe_bypass.h" #include "ixgbe_fdir.h" #include "ixgbe_rss.h" #endif /* _IXGBE_H_ */ diff --git a/sys/dev/ixgbe/ixgbe_82598.c b/sys/dev/ixgbe/ixgbe_82598.c index 7c821aea2d03..885dfbf869bc 100644 --- a/sys/dev/ixgbe/ixgbe_82598.c +++ b/sys/dev/ixgbe/ixgbe_82598.c @@ -1,1442 +1,1442 @@ /****************************************************************************** SPDX-License-Identifier: BSD-3-Clause Copyright (c) 2001-2017, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "ixgbe_type.h" #include "ixgbe_82598.h" #include "ixgbe_api.h" #include "ixgbe_common.h" #include "ixgbe_phy.h" #define IXGBE_82598_MAX_TX_QUEUES 32 #define IXGBE_82598_MAX_RX_QUEUES 64 #define IXGBE_82598_RAR_ENTRIES 16 #define IXGBE_82598_MC_TBL_SIZE 128 #define IXGBE_82598_VFT_TBL_SIZE 128 #define IXGBE_82598_RX_PB_SIZE 512 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *autoneg); static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw); static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, bool autoneg_wait_to_complete); static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *link_up, bool link_up_wait_to_complete); static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete); static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete); static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw); static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq); static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw); static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, u32 headroom, int strategy); static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset, u8 *sff8472_data); /** * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout * @hw: pointer to the HW structure * * The defaults for 82598 should be in the range of 50us to 50ms, * however the hardware default for these parts is 500us to 1ms which is less * than the 10ms recommended by the pci-e spec. To address this we need to * increase the value to either 10ms to 250ms for capability version 1 config, * or 16ms to 55ms for version 2. **/ void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw) { u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR); u16 pcie_devctl2; /* only take action if timeout value is defaulted to 0 */ if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK) goto out; /* * if capababilities version is type 1 we can write the * timeout of 10ms to 250ms through the GCR register */ if (!(gcr & IXGBE_GCR_CAP_VER2)) { gcr |= IXGBE_GCR_CMPL_TMOUT_10ms; goto out; } /* * for version 2 capabilities we need to write the config space * directly in order to set the completion timeout value for * 16ms to 55ms */ pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2); pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms; IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2); out: /* disable completion timeout resend */ gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND; IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr); } /** * ixgbe_init_ops_82598 - Inits func ptrs and MAC type * @hw: pointer to hardware structure * * Initialize the function pointers and assign the MAC type for 82598. * Does not touch the hardware. **/ s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; struct ixgbe_phy_info *phy = &hw->phy; s32 ret_val; DEBUGFUNC("ixgbe_init_ops_82598"); ret_val = ixgbe_init_phy_ops_generic(hw); ret_val = ixgbe_init_ops_generic(hw); /* PHY */ phy->ops.init = ixgbe_init_phy_ops_82598; /* MAC */ mac->ops.start_hw = ixgbe_start_hw_82598; mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_82598; mac->ops.reset_hw = ixgbe_reset_hw_82598; mac->ops.get_media_type = ixgbe_get_media_type_82598; mac->ops.get_supported_physical_layer = ixgbe_get_supported_physical_layer_82598; mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82598; mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82598; mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie_82598; mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82598; /* RAR, Multicast, VLAN */ mac->ops.set_vmdq = ixgbe_set_vmdq_82598; mac->ops.clear_vmdq = ixgbe_clear_vmdq_82598; mac->ops.set_vfta = ixgbe_set_vfta_82598; mac->ops.set_vlvf = NULL; mac->ops.clear_vfta = ixgbe_clear_vfta_82598; /* Flow Control */ mac->ops.fc_enable = ixgbe_fc_enable_82598; mac->mcft_size = IXGBE_82598_MC_TBL_SIZE; mac->vft_size = IXGBE_82598_VFT_TBL_SIZE; mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES; mac->rx_pb_size = IXGBE_82598_RX_PB_SIZE; mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES; mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES; mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); /* SFP+ Module */ phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_82598; phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_82598; /* Link */ mac->ops.check_link = ixgbe_check_mac_link_82598; mac->ops.setup_link = ixgbe_setup_mac_link_82598; mac->ops.flap_tx_laser = NULL; mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82598; mac->ops.setup_rxpba = ixgbe_set_rxpba_82598; /* Manageability interface */ mac->ops.set_fw_drv_ver = NULL; mac->ops.get_rtrup2tc = NULL; return ret_val; } /** * ixgbe_init_phy_ops_82598 - PHY/SFP specific init * @hw: pointer to hardware structure * * Initialize any function pointers that were not able to be * set during init_shared_code because the PHY/SFP type was * not known. Perform the SFP init if necessary. * **/ s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; struct ixgbe_phy_info *phy = &hw->phy; s32 ret_val = IXGBE_SUCCESS; u16 list_offset, data_offset; DEBUGFUNC("ixgbe_init_phy_ops_82598"); /* Identify the PHY */ phy->ops.identify(hw); /* Overwrite the link function pointers if copper PHY */ if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { mac->ops.setup_link = ixgbe_setup_copper_link_82598; mac->ops.get_link_capabilities = ixgbe_get_copper_link_capabilities_generic; } switch (hw->phy.type) { case ixgbe_phy_tn: phy->ops.setup_link = ixgbe_setup_phy_link_tnx; phy->ops.check_link = ixgbe_check_phy_link_tnx; phy->ops.get_firmware_version = ixgbe_get_phy_firmware_version_tnx; break; case ixgbe_phy_nl: phy->ops.reset = ixgbe_reset_phy_nl; /* Call SFP+ identify routine to get the SFP+ module type */ ret_val = phy->ops.identify_sfp(hw); if (ret_val != IXGBE_SUCCESS) goto out; else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) { ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; goto out; } /* Check to see if SFP+ module is supported */ ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, &data_offset); if (ret_val != IXGBE_SUCCESS) { ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; goto out; } break; default: break; } out: return ret_val; } /** * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx * @hw: pointer to hardware structure * * Starts the hardware using the generic start_hw function. * Disables relaxed ordering Then set pcie completion timeout * **/ s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) { u32 regval; u32 i; s32 ret_val = IXGBE_SUCCESS; DEBUGFUNC("ixgbe_start_hw_82598"); ret_val = ixgbe_start_hw_generic(hw); if (ret_val) return ret_val; /* Disable relaxed ordering */ for (i = 0; ((i < hw->mac.max_tx_queues) && (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval); } for (i = 0; ((i < hw->mac.max_rx_queues) && (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | IXGBE_DCA_RXCTRL_HEAD_WRO_EN); IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); } /* set the completion timeout for interface */ ixgbe_set_pcie_completion_timeout(hw); return ret_val; } /** * ixgbe_get_link_capabilities_82598 - Determines link capabilities * @hw: pointer to hardware structure * @speed: pointer to link speed * @autoneg: boolean auto-negotiation value * * Determines the link capabilities by reading the AUTOC register. **/ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *autoneg) { s32 status = IXGBE_SUCCESS; u32 autoc = 0; DEBUGFUNC("ixgbe_get_link_capabilities_82598"); /* * Determine link capabilities based on the stored value of AUTOC, * which represents EEPROM defaults. If AUTOC value has not been * stored, use the current register value. */ if (hw->mac.orig_link_settings_stored) autoc = hw->mac.orig_autoc; else autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); switch (autoc & IXGBE_AUTOC_LMS_MASK) { case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: *speed = IXGBE_LINK_SPEED_1GB_FULL; - *autoneg = FALSE; + *autoneg = false; break; case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: *speed = IXGBE_LINK_SPEED_10GB_FULL; - *autoneg = FALSE; + *autoneg = false; break; case IXGBE_AUTOC_LMS_1G_AN: *speed = IXGBE_LINK_SPEED_1GB_FULL; - *autoneg = TRUE; + *autoneg = true; break; case IXGBE_AUTOC_LMS_KX4_AN: case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: *speed = IXGBE_LINK_SPEED_UNKNOWN; if (autoc & IXGBE_AUTOC_KX4_SUPP) *speed |= IXGBE_LINK_SPEED_10GB_FULL; if (autoc & IXGBE_AUTOC_KX_SUPP) *speed |= IXGBE_LINK_SPEED_1GB_FULL; - *autoneg = TRUE; + *autoneg = true; break; default: status = IXGBE_ERR_LINK_SETUP; break; } return status; } /** * ixgbe_get_media_type_82598 - Determines media type * @hw: pointer to hardware structure * * Returns the media type (fiber, copper, backplane) **/ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) { enum ixgbe_media_type media_type; DEBUGFUNC("ixgbe_get_media_type_82598"); /* Detect if there is a copper PHY attached. */ switch (hw->phy.type) { case ixgbe_phy_cu_unknown: case ixgbe_phy_tn: media_type = ixgbe_media_type_copper; goto out; default: break; } /* Media type for I82598 is based on device ID */ switch (hw->device_id) { case IXGBE_DEV_ID_82598: case IXGBE_DEV_ID_82598_BX: /* Default device ID is mezzanine card KX/KX4 */ media_type = ixgbe_media_type_backplane; break; case IXGBE_DEV_ID_82598AF_DUAL_PORT: case IXGBE_DEV_ID_82598AF_SINGLE_PORT: case IXGBE_DEV_ID_82598_DA_DUAL_PORT: case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: case IXGBE_DEV_ID_82598EB_XF_LR: case IXGBE_DEV_ID_82598EB_SFP_LOM: media_type = ixgbe_media_type_fiber; break; case IXGBE_DEV_ID_82598EB_CX4: case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: media_type = ixgbe_media_type_cx4; break; case IXGBE_DEV_ID_82598AT: case IXGBE_DEV_ID_82598AT2: media_type = ixgbe_media_type_copper; break; default: media_type = ixgbe_media_type_unknown; break; } out: return media_type; } /** * ixgbe_fc_enable_82598 - Enable flow control * @hw: pointer to hardware structure * * Enable flow control according to the current settings. **/ s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) { s32 ret_val = IXGBE_SUCCESS; u32 fctrl_reg; u32 rmcs_reg; u32 reg; u32 fcrtl, fcrth; u32 link_speed = 0; int i; bool link_up; DEBUGFUNC("ixgbe_fc_enable_82598"); /* Validate the water mark configuration */ if (!hw->fc.pause_time) { ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; goto out; } /* Low water mark of zero causes XOFF floods */ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && hw->fc.high_water[i]) { if (!hw->fc.low_water[i] || hw->fc.low_water[i] >= hw->fc.high_water[i]) { DEBUGOUT("Invalid water mark configuration\n"); ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; goto out; } } } /* * On 82598 having Rx FC on causes resets while doing 1G * so if it's on turn it off once we know link_speed. For * more details see 82598 Specification update. */ - hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE); + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) { switch (hw->fc.requested_mode) { case ixgbe_fc_full: hw->fc.requested_mode = ixgbe_fc_tx_pause; break; case ixgbe_fc_rx_pause: hw->fc.requested_mode = ixgbe_fc_none; break; default: /* no change */ break; } } /* Negotiate the fc mode to use */ ixgbe_fc_autoneg(hw); /* Disable any previous flow control settings */ fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE); rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS); rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X); /* * The possible values of fc.current_mode are: * 0: Flow control is completely disabled * 1: Rx flow control is enabled (we can receive pause frames, * but not send pause frames). * 2: Tx flow control is enabled (we can send pause frames but * we do not support receiving pause frames). * 3: Both Rx and Tx flow control (symmetric) are enabled. * other: Invalid. */ switch (hw->fc.current_mode) { case ixgbe_fc_none: /* * Flow control is disabled by software override or autoneg. * The code below will actually disable it in the HW. */ break; case ixgbe_fc_rx_pause: /* * Rx Flow control is enabled and Tx Flow control is * disabled by software override. Since there really * isn't a way to advertise that we are capable of RX * Pause ONLY, we will advertise that we support both * symmetric and asymmetric Rx PAUSE. Later, we will * disable the adapter's ability to send PAUSE frames. */ fctrl_reg |= IXGBE_FCTRL_RFCE; break; case ixgbe_fc_tx_pause: /* * Tx Flow control is enabled, and Rx Flow control is * disabled by software override. */ rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; break; case ixgbe_fc_full: /* Flow control (both Rx and Tx) is enabled by SW override. */ fctrl_reg |= IXGBE_FCTRL_RFCE; rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; break; default: DEBUGOUT("Flow control param set incorrectly\n"); ret_val = IXGBE_ERR_CONFIG; goto out; break; } /* Set 802.3x based flow control settings. */ fctrl_reg |= IXGBE_FCTRL_DPF; IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg); IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); /* Set up and enable Rx high/low water mark thresholds, enable XON. */ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && hw->fc.high_water[i]) { fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl); IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth); } else { IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0); IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0); } } /* Configure pause time (2 TCs per register) */ reg = hw->fc.pause_time * 0x00010001; for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); /* Configure flow control refresh threshold value */ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); out: return ret_val; } /** * ixgbe_start_mac_link_82598 - Configures MAC link settings * @hw: pointer to hardware structure - * @autoneg_wait_to_complete: TRUE when waiting for completion is needed + * @autoneg_wait_to_complete: true when waiting for completion is needed * * Configures link settings based on values in the ixgbe_hw struct. * Restarts the link. Performs autonegotiation if needed. **/ static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, bool autoneg_wait_to_complete) { u32 autoc_reg; u32 links_reg; u32 i; s32 status = IXGBE_SUCCESS; DEBUGFUNC("ixgbe_start_mac_link_82598"); /* Restart link */ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); autoc_reg |= IXGBE_AUTOC_AN_RESTART; IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); /* Only poll for autoneg to complete if specified to do so */ if (autoneg_wait_to_complete) { if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == IXGBE_AUTOC_LMS_KX4_AN || (autoc_reg & IXGBE_AUTOC_LMS_MASK) == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { links_reg = 0; /* Just in case Autoneg time = 0 */ for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); if (links_reg & IXGBE_LINKS_KX_AN_COMP) break; msec_delay(100); } if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; DEBUGOUT("Autonegotiation did not complete.\n"); } } } /* Add delay to filter out noises during initial link setup */ msec_delay(50); return status; } /** * ixgbe_validate_link_ready - Function looks for phy link * @hw: pointer to hardware structure * * Function indicates success when phy link is available. If phy is not ready * within 5 seconds of MAC indicating link, the function returns error. **/ static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw) { u32 timeout; u16 an_reg; if (hw->device_id != IXGBE_DEV_ID_82598AT2) return IXGBE_SUCCESS; for (timeout = 0; timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) { hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg); if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) && (an_reg & IXGBE_MII_AUTONEG_LINK_UP)) break; msec_delay(100); } if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) { DEBUGOUT("Link was indicated but link is down\n"); return IXGBE_ERR_LINK_SETUP; } return IXGBE_SUCCESS; } /** * ixgbe_check_mac_link_82598 - Get link/speed status * @hw: pointer to hardware structure * @speed: pointer to link speed - * @link_up: TRUE is link is up, FALSE otherwise + * @link_up: true is link is up, false otherwise * @link_up_wait_to_complete: bool used to wait for link up or not * * Reads the links register to determine if link is up and the current speed **/ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *link_up, bool link_up_wait_to_complete) { u32 links_reg; u32 i; u16 link_reg, adapt_comp_reg; DEBUGFUNC("ixgbe_check_mac_link_82598"); /* * SERDES PHY requires us to read link status from undocumented * register 0xC79F. Bit 0 set indicates link is up/ready; clear * indicates link down. OxC00C is read to check that the XAUI lanes * are active. Bit 0 clear indicates active; set indicates inactive. */ if (hw->phy.type == ixgbe_phy_nl) { hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg); hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg); hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV, &adapt_comp_reg); if (link_up_wait_to_complete) { for (i = 0; i < hw->mac.max_link_up_time; i++) { if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0)) { - *link_up = TRUE; + *link_up = true; break; } else { - *link_up = FALSE; + *link_up = false; } msec_delay(100); hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg); hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV, &adapt_comp_reg); } } else { if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0)) - *link_up = TRUE; + *link_up = true; else - *link_up = FALSE; + *link_up = false; } - if (*link_up == FALSE) + if (*link_up == false) goto out; } links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); if (link_up_wait_to_complete) { for (i = 0; i < hw->mac.max_link_up_time; i++) { if (links_reg & IXGBE_LINKS_UP) { - *link_up = TRUE; + *link_up = true; break; } else { - *link_up = FALSE; + *link_up = false; } msec_delay(100); links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); } } else { if (links_reg & IXGBE_LINKS_UP) - *link_up = TRUE; + *link_up = true; else - *link_up = FALSE; + *link_up = false; } if (links_reg & IXGBE_LINKS_SPEED) *speed = IXGBE_LINK_SPEED_10GB_FULL; else *speed = IXGBE_LINK_SPEED_1GB_FULL; - if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == TRUE) && + if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == true) && (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS)) - *link_up = FALSE; + *link_up = false; out: return IXGBE_SUCCESS; } /** * ixgbe_setup_mac_link_82598 - Set MAC link speed * @hw: pointer to hardware structure * @speed: new link speed - * @autoneg_wait_to_complete: TRUE when waiting for completion is needed + * @autoneg_wait_to_complete: true when waiting for completion is needed * * Set the link speed in the AUTOC register and restarts link. **/ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete) { - bool autoneg = FALSE; + bool autoneg = false; s32 status = IXGBE_SUCCESS; ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); u32 autoc = curr_autoc; u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; DEBUGFUNC("ixgbe_setup_mac_link_82598"); /* Check to see if speed passed in is supported. */ ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg); speed &= link_capabilities; if (speed == IXGBE_LINK_SPEED_UNKNOWN) status = IXGBE_ERR_LINK_SETUP; /* Set KX4/KX support according to speed requested */ else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN || link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK; if (speed & IXGBE_LINK_SPEED_10GB_FULL) autoc |= IXGBE_AUTOC_KX4_SUPP; if (speed & IXGBE_LINK_SPEED_1GB_FULL) autoc |= IXGBE_AUTOC_KX_SUPP; if (autoc != curr_autoc) IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); } if (status == IXGBE_SUCCESS) { /* * Setup and restart the link based on the new values in * ixgbe_hw This will write the AUTOC register based on the new * stored values */ status = ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); } return status; } /** * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field * @hw: pointer to hardware structure * @speed: new link speed - * @autoneg_wait_to_complete: TRUE if waiting is needed to complete + * @autoneg_wait_to_complete: true if waiting is needed to complete * * Sets the link speed in the AUTOC register in the MAC and restarts link. **/ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete) { s32 status; DEBUGFUNC("ixgbe_setup_copper_link_82598"); /* Setup the PHY according to input speed */ status = hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete); /* Set up MAC */ ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); return status; } /** * ixgbe_reset_hw_82598 - Performs hardware reset * @hw: pointer to hardware structure * * Resets the hardware by resetting the transmit and receive units, masks and * clears all interrupts, performing a PHY reset, and performing a link (MAC) * reset. **/ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) { s32 status = IXGBE_SUCCESS; s32 phy_status = IXGBE_SUCCESS; u32 ctrl; u32 gheccr; u32 i; u32 autoc; u8 analog_val; DEBUGFUNC("ixgbe_reset_hw_82598"); /* Call adapter stop to disable tx/rx and clear interrupts */ status = hw->mac.ops.stop_adapter(hw); if (status != IXGBE_SUCCESS) goto reset_hw_out; /* * Power up the Atlas Tx lanes if they are currently powered down. * Atlas Tx lanes are powered down for MAC loopback tests, but * they are not automatically restored on reset. */ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) { /* Enable Tx Atlas so packets can be transmitted again */ hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN; hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, analog_val); hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &analog_val); analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL; hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, analog_val); hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &analog_val); analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL; hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, analog_val); hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &analog_val); analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL; hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, analog_val); } /* Reset PHY */ - if (hw->phy.reset_disable == FALSE) { + if (hw->phy.reset_disable == false) { /* PHY ops must be identified and initialized prior to reset */ /* Init PHY and function pointers, perform SFP setup */ phy_status = hw->phy.ops.init(hw); if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED) goto reset_hw_out; if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT) goto mac_reset_top; hw->phy.ops.reset(hw); } mac_reset_top: /* * Issue global reset to the MAC. This needs to be a SW reset. * If link reset is used, it might reset the MAC when mng is using it */ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST; IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); IXGBE_WRITE_FLUSH(hw); /* Poll for reset bit to self-clear indicating reset is complete */ for (i = 0; i < 10; i++) { usec_delay(1); ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); if (!(ctrl & IXGBE_CTRL_RST)) break; } if (ctrl & IXGBE_CTRL_RST) { status = IXGBE_ERR_RESET_FAILED; DEBUGOUT("Reset polling failed to complete.\n"); } msec_delay(50); /* * Double resets are required for recovery from certain error * conditions. Between resets, it is necessary to stall to allow time * for any pending HW events to complete. */ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; goto mac_reset_top; } gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR); gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6)); IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr); /* * Store the original AUTOC value if it has not been * stored off yet. Otherwise restore the stored original * AUTOC value since the reset operation sets back to deaults. */ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); - if (hw->mac.orig_link_settings_stored == FALSE) { + if (hw->mac.orig_link_settings_stored == false) { hw->mac.orig_autoc = autoc; - hw->mac.orig_link_settings_stored = TRUE; + hw->mac.orig_link_settings_stored = true; } else if (autoc != hw->mac.orig_autoc) { IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc); } /* Store the permanent mac address */ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); /* * Store MAC address from RAR0, clear receive address registers, and * clear the multicast table */ hw->mac.ops.init_rx_addrs(hw); reset_hw_out: if (phy_status != IXGBE_SUCCESS) status = phy_status; return status; } /** * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address * @hw: pointer to hardware struct * @rar: receive address register index to associate with a VMDq index * @vmdq: VMDq set index **/ s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) { u32 rar_high; u32 rar_entries = hw->mac.num_rar_entries; DEBUGFUNC("ixgbe_set_vmdq_82598"); /* Make sure we are using a valid rar index range */ if (rar >= rar_entries) { DEBUGOUT1("RAR index %d is out of range.\n", rar); return IXGBE_ERR_INVALID_ARGUMENT; } rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); rar_high &= ~IXGBE_RAH_VIND_MASK; rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK); IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); return IXGBE_SUCCESS; } /** * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address * @hw: pointer to hardware struct * @rar: receive address register index to associate with a VMDq index * @vmdq: VMDq clear index (not used in 82598, but elsewhere) **/ static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) { u32 rar_high; u32 rar_entries = hw->mac.num_rar_entries; UNREFERENCED_1PARAMETER(vmdq); /* Make sure we are using a valid rar index range */ if (rar >= rar_entries) { DEBUGOUT1("RAR index %d is out of range.\n", rar); return IXGBE_ERR_INVALID_ARGUMENT; } rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); if (rar_high & IXGBE_RAH_VIND_MASK) { rar_high &= ~IXGBE_RAH_VIND_MASK; IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); } return IXGBE_SUCCESS; } /** * ixgbe_set_vfta_82598 - Set VLAN filter table * @hw: pointer to hardware structure * @vlan: VLAN id to write to VLAN filter * @vind: VMDq output index that maps queue to VLAN id in VFTA * @vlan_on: boolean flag to turn on/off VLAN in VFTA * @vlvf_bypass: boolean flag - unused * * Turn on/off specified VLAN in the VLAN filter table. **/ s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on, bool vlvf_bypass) { u32 regindex; u32 bitindex; u32 bits; u32 vftabyte; UNREFERENCED_1PARAMETER(vlvf_bypass); DEBUGFUNC("ixgbe_set_vfta_82598"); if (vlan > 4095) return IXGBE_ERR_PARAM; /* Determine 32-bit word position in array */ regindex = (vlan >> 5) & 0x7F; /* upper seven bits */ /* Determine the location of the (VMD) queue index */ vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */ bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */ /* Set the nibble for VMD queue index */ bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex)); bits &= (~(0x0F << bitindex)); bits |= (vind << bitindex); IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits); /* Determine the location of the bit for this VLAN id */ bitindex = vlan & 0x1F; /* lower five bits */ bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); if (vlan_on) /* Turn on this VLAN id */ bits |= (1 << bitindex); else /* Turn off this VLAN id */ bits &= ~(1 << bitindex); IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits); return IXGBE_SUCCESS; } /** * ixgbe_clear_vfta_82598 - Clear VLAN filter table * @hw: pointer to hardware structure * * Clears the VLAN filer table, and the VMDq index associated with the filter **/ static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) { u32 offset; u32 vlanbyte; DEBUGFUNC("ixgbe_clear_vfta_82598"); for (offset = 0; offset < hw->mac.vft_size; offset++) IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); for (vlanbyte = 0; vlanbyte < 4; vlanbyte++) for (offset = 0; offset < hw->mac.vft_size; offset++) IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset), 0); return IXGBE_SUCCESS; } /** * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register * @hw: pointer to hardware structure * @reg: analog register to read * @val: read value * * Performs read operation to Atlas analog register specified. **/ s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val) { u32 atlas_ctl; DEBUGFUNC("ixgbe_read_analog_reg8_82598"); IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, IXGBE_ATLASCTL_WRITE_CMD | (reg << 8)); IXGBE_WRITE_FLUSH(hw); usec_delay(10); atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); *val = (u8)atlas_ctl; return IXGBE_SUCCESS; } /** * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register * @hw: pointer to hardware structure * @reg: atlas register to write * @val: value to write * * Performs write operation to Atlas analog register specified. **/ s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val) { u32 atlas_ctl; DEBUGFUNC("ixgbe_write_analog_reg8_82598"); atlas_ctl = (reg << 8) | val; IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl); IXGBE_WRITE_FLUSH(hw); usec_delay(10); return IXGBE_SUCCESS; } /** * ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface. * @hw: pointer to hardware structure * @dev_addr: address to read from * @byte_offset: byte offset to read from dev_addr * @eeprom_data: value read * * Performs 8 byte read operation to SFP module's EEPROM over I2C interface. **/ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr, u8 byte_offset, u8 *eeprom_data) { s32 status = IXGBE_SUCCESS; u16 sfp_addr = 0; u16 sfp_data = 0; u16 sfp_stat = 0; u16 gssr; u32 i; DEBUGFUNC("ixgbe_read_i2c_phy_82598"); if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) gssr = IXGBE_GSSR_PHY1_SM; else gssr = IXGBE_GSSR_PHY0_SM; if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != IXGBE_SUCCESS) return IXGBE_ERR_SWFW_SYNC; if (hw->phy.type == ixgbe_phy_nl) { /* * NetLogic phy SDA/SCL registers are at addresses 0xC30A to * 0xC30D. These registers are used to talk to the SFP+ * module's EEPROM through the SDA/SCL (I2C) interface. */ sfp_addr = (dev_addr << 8) + byte_offset; sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK); hw->phy.ops.write_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR, IXGBE_MDIO_PMA_PMD_DEV_TYPE, sfp_addr); /* Poll status */ for (i = 0; i < 100; i++) { hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT, IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_stat); sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK; if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS) break; msec_delay(10); } if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) { DEBUGOUT("EEPROM read did not pass.\n"); status = IXGBE_ERR_SFP_NOT_PRESENT; goto out; } /* Read data */ hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA, IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data); *eeprom_data = (u8)(sfp_data >> 8); } else { status = IXGBE_ERR_PHY; } out: hw->mac.ops.release_swfw_sync(hw, gssr); return status; } /** * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface. * @hw: pointer to hardware structure * @byte_offset: EEPROM byte offset to read * @eeprom_data: value read * * Performs 8 byte read operation to SFP module's EEPROM over I2C interface. **/ s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data) { return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR, byte_offset, eeprom_data); } /** * ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface. * @hw: pointer to hardware structure * @byte_offset: byte offset at address 0xA2 * @sff8472_data: value read * * Performs 8 byte read operation to SFP module's SFF-8472 data over I2C **/ static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset, u8 *sff8472_data) { return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2, byte_offset, sff8472_data); } /** * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type * @hw: pointer to hardware structure * * Determines physical layer capabilities of the current configuration. **/ u64 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw) { u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; u16 ext_ability = 0; DEBUGFUNC("ixgbe_get_supported_physical_layer_82598"); hw->phy.ops.identify(hw); /* Copper PHY must be checked before AUTOC LMS to determine correct * physical layer because 10GBase-T PHYs use LMS = KX4/KX */ switch (hw->phy.type) { case ixgbe_phy_tn: case ixgbe_phy_cu_unknown: hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; goto out; default: break; } switch (autoc & IXGBE_AUTOC_LMS_MASK) { case IXGBE_AUTOC_LMS_1G_AN: case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: if (pma_pmd_1g == IXGBE_AUTOC_1G_KX) physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX; else physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX; break; case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4) physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4) physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; else /* XAUI */ physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; break; case IXGBE_AUTOC_LMS_KX4_AN: case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: if (autoc & IXGBE_AUTOC_KX_SUPP) physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; if (autoc & IXGBE_AUTOC_KX4_SUPP) physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; break; default: break; } if (hw->phy.type == ixgbe_phy_nl) { hw->phy.ops.identify_sfp(hw); switch (hw->phy.sfp_type) { case ixgbe_sfp_type_da_cu: physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; break; case ixgbe_sfp_type_sr: physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; break; case ixgbe_sfp_type_lr: physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; break; default: physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; break; } } switch (hw->device_id) { case IXGBE_DEV_ID_82598_DA_DUAL_PORT: physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; break; case IXGBE_DEV_ID_82598AF_DUAL_PORT: case IXGBE_DEV_ID_82598AF_SINGLE_PORT: case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; break; case IXGBE_DEV_ID_82598EB_XF_LR: physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; break; default: break; } out: return physical_layer; } /** * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple * port devices. * @hw: pointer to the HW structure * * Calls common function and corrects issue with some single port devices * that enable LAN1 but not LAN0. **/ void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw) { struct ixgbe_bus_info *bus = &hw->bus; u16 pci_gen = 0; u16 pci_ctrl2 = 0; DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598"); ixgbe_set_lan_id_multi_port_pcie(hw); /* check if LAN0 is disabled */ hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen); if ((pci_gen != 0) && (pci_gen != 0xFFFF)) { hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2); /* if LAN0 is completely disabled force function to 0 */ if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) && !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) && !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) { bus->func = 0; } } } /** * ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering * @hw: pointer to hardware structure * **/ void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw) { u32 regval; u32 i; DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598"); /* Enable relaxed ordering */ for (i = 0; ((i < hw->mac.max_tx_queues) && (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN; IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval); } for (i = 0; ((i < hw->mac.max_rx_queues) && (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN | IXGBE_DCA_RXCTRL_HEAD_WRO_EN; IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); } } /** * ixgbe_set_rxpba_82598 - Initialize RX packet buffer * @hw: pointer to hardware structure * @num_pb: number of packet buffers to allocate * @headroom: reserve n KB of headroom * @strategy: packet buffer allocation strategy **/ static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, u32 headroom, int strategy) { u32 rxpktsize = IXGBE_RXPBSIZE_64KB; u8 i = 0; UNREFERENCED_1PARAMETER(headroom); if (!num_pb) return; /* Setup Rx packet buffer sizes */ switch (strategy) { case PBA_STRATEGY_WEIGHTED: /* Setup the first four at 80KB */ rxpktsize = IXGBE_RXPBSIZE_80KB; for (; i < 4; i++) IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); /* Setup the last four at 48KB...don't re-init i */ rxpktsize = IXGBE_RXPBSIZE_48KB; /* Fall Through */ case PBA_STRATEGY_EQUAL: default: /* Divide the remaining Rx packet buffer evenly among the TCs */ for (; i < IXGBE_MAX_PACKET_BUFFERS; i++) IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); break; } /* Setup Tx packet buffer sizes */ for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB); } /** * ixgbe_enable_rx_dma_82598 - Enable the Rx DMA unit * @hw: pointer to hardware structure * @regval: register value to write to RXCTRL * * Enables the Rx DMA unit **/ s32 ixgbe_enable_rx_dma_82598(struct ixgbe_hw *hw, u32 regval) { DEBUGFUNC("ixgbe_enable_rx_dma_82598"); IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); return IXGBE_SUCCESS; } diff --git a/sys/dev/ixgbe/ixgbe_82599.c b/sys/dev/ixgbe/ixgbe_82599.c index 1f81f6109f1b..d682dfbc9ab8 100644 --- a/sys/dev/ixgbe/ixgbe_82599.c +++ b/sys/dev/ixgbe/ixgbe_82599.c @@ -1,2640 +1,2640 @@ /****************************************************************************** SPDX-License-Identifier: BSD-3-Clause Copyright (c) 2001-2017, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "ixgbe_type.h" #include "ixgbe_82599.h" #include "ixgbe_api.h" #include "ixgbe_common.h" #include "ixgbe_phy.h" #define IXGBE_82599_MAX_TX_QUEUES 128 #define IXGBE_82599_MAX_RX_QUEUES 128 #define IXGBE_82599_RAR_ENTRIES 128 #define IXGBE_82599_MC_TBL_SIZE 128 #define IXGBE_82599_VFT_TBL_SIZE 128 #define IXGBE_82599_RX_PB_SIZE 512 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete); static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, u16 offset, u16 *data); static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data); static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 *data); static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 data); void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; DEBUGFUNC("ixgbe_init_mac_link_ops_82599"); /* * enable the laser control functions for SFP+ fiber * and MNG not enabled */ if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) && !ixgbe_mng_enabled(hw)) { mac->ops.disable_tx_laser = ixgbe_disable_tx_laser_multispeed_fiber; mac->ops.enable_tx_laser = ixgbe_enable_tx_laser_multispeed_fiber; mac->ops.flap_tx_laser = ixgbe_flap_tx_laser_multispeed_fiber; } else { mac->ops.disable_tx_laser = NULL; mac->ops.enable_tx_laser = NULL; mac->ops.flap_tx_laser = NULL; } if (hw->phy.multispeed_fiber) { /* Set up dual speed SFP+ support */ mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber; mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599; mac->ops.set_rate_select_speed = ixgbe_set_hard_rate_select_speed; if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber_fixed) mac->ops.set_rate_select_speed = ixgbe_set_soft_rate_select_speed; } else { if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) && (hw->phy.smart_speed == ixgbe_smart_speed_auto || hw->phy.smart_speed == ixgbe_smart_speed_on) && !ixgbe_verify_lesm_fw_enabled_82599(hw)) { mac->ops.setup_link = ixgbe_setup_mac_link_smartspeed; } else { mac->ops.setup_link = ixgbe_setup_mac_link_82599; } } } /** * ixgbe_init_phy_ops_82599 - PHY/SFP specific init * @hw: pointer to hardware structure * * Initialize any function pointers that were not able to be * set during init_shared_code because the PHY/SFP type was * not known. Perform the SFP init if necessary. * **/ s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; struct ixgbe_phy_info *phy = &hw->phy; s32 ret_val = IXGBE_SUCCESS; u32 esdp; DEBUGFUNC("ixgbe_init_phy_ops_82599"); if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) { /* Store flag indicating I2C bus access control unit. */ - hw->phy.qsfp_shared_i2c_bus = TRUE; + hw->phy.qsfp_shared_i2c_bus = true; /* Initialize access to QSFP+ I2C bus */ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); esdp |= IXGBE_ESDP_SDP0_DIR; esdp &= ~IXGBE_ESDP_SDP1_DIR; esdp &= ~IXGBE_ESDP_SDP0; esdp &= ~IXGBE_ESDP_SDP0_NATIVE; esdp &= ~IXGBE_ESDP_SDP1_NATIVE; IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); IXGBE_WRITE_FLUSH(hw); phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_82599; phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_82599; } /* Identify the PHY or SFP module */ ret_val = phy->ops.identify(hw); if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED) goto init_phy_ops_out; /* Setup function pointers based on detected SFP module and speeds */ ixgbe_init_mac_link_ops_82599(hw); if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) hw->phy.ops.reset = NULL; /* If copper media, overwrite with copper function pointers */ if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { mac->ops.setup_link = ixgbe_setup_copper_link_82599; mac->ops.get_link_capabilities = ixgbe_get_copper_link_capabilities_generic; } /* Set necessary function pointers based on PHY type */ switch (hw->phy.type) { case ixgbe_phy_tn: phy->ops.setup_link = ixgbe_setup_phy_link_tnx; phy->ops.check_link = ixgbe_check_phy_link_tnx; phy->ops.get_firmware_version = ixgbe_get_phy_firmware_version_tnx; break; default: break; } init_phy_ops_out: return ret_val; } s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) { s32 ret_val = IXGBE_SUCCESS; u16 list_offset, data_offset, data_value; DEBUGFUNC("ixgbe_setup_sfp_modules_82599"); if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { ixgbe_init_mac_link_ops_82599(hw); hw->phy.ops.reset = NULL; ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, &data_offset); if (ret_val != IXGBE_SUCCESS) goto setup_sfp_out; /* PHY config will finish before releasing the semaphore */ ret_val = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); if (ret_val != IXGBE_SUCCESS) { ret_val = IXGBE_ERR_SWFW_SYNC; goto setup_sfp_out; } if (hw->eeprom.ops.read(hw, ++data_offset, &data_value)) goto setup_sfp_err; while (data_value != 0xffff) { IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value); IXGBE_WRITE_FLUSH(hw); if (hw->eeprom.ops.read(hw, ++data_offset, &data_value)) goto setup_sfp_err; } /* Release the semaphore */ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); /* Delay obtaining semaphore again to allow FW access * prot_autoc_write uses the semaphore too. */ msec_delay(hw->eeprom.semaphore_delay); /* Restart DSP and set SFI mode */ ret_val = hw->mac.ops.prot_autoc_write(hw, hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL, - FALSE); + false); if (ret_val) { DEBUGOUT("sfp module setup not complete\n"); ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; goto setup_sfp_out; } } setup_sfp_out: return ret_val; setup_sfp_err: /* Release the semaphore */ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); /* Delay obtaining semaphore again to allow FW access */ msec_delay(hw->eeprom.semaphore_delay); ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, "eeprom read at offset %d failed", data_offset); return IXGBE_ERR_PHY; } /** * prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read * @hw: pointer to hardware structure * @locked: Return the if we locked for this read. * @reg_val: Value we read from AUTOC * * For this part (82599) we need to wrap read-modify-writes with a possible * FW/SW lock. It is assumed this lock will be freed with the next * prot_autoc_write_82599(). */ s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val) { s32 ret_val; - *locked = FALSE; + *locked = false; /* If LESM is on then we need to hold the SW/FW semaphore. */ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { ret_val = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); if (ret_val != IXGBE_SUCCESS) return IXGBE_ERR_SWFW_SYNC; - *locked = TRUE; + *locked = true; } *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC); return IXGBE_SUCCESS; } /** * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write * @hw: pointer to hardware structure * @autoc: value to write to AUTOC * @locked: bool to indicate whether the SW/FW lock was already taken by * previous proc_autoc_read_82599. * * This part (82599) may need to hold the SW/FW lock around all writes to * AUTOC. Likewise after a write we need to do a pipeline reset. */ s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked) { s32 ret_val = IXGBE_SUCCESS; /* Blocked by MNG FW so bail */ if (ixgbe_check_reset_blocked(hw)) goto out; /* We only need to get the lock if: * - We didn't do it already (in the read part of a read-modify-write) * - LESM is enabled. */ if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) { ret_val = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); if (ret_val != IXGBE_SUCCESS) return IXGBE_ERR_SWFW_SYNC; - locked = TRUE; + locked = true; } IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); ret_val = ixgbe_reset_pipeline_82599(hw); out: /* Free the SW/FW semaphore as we either grabbed it here or * already had it when this function was called. */ if (locked) hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); return ret_val; } /** * ixgbe_init_ops_82599 - Inits func ptrs and MAC type * @hw: pointer to hardware structure * * Initialize the function pointers and assign the MAC type for 82599. * Does not touch the hardware. **/ s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; struct ixgbe_phy_info *phy = &hw->phy; struct ixgbe_eeprom_info *eeprom = &hw->eeprom; s32 ret_val; DEBUGFUNC("ixgbe_init_ops_82599"); ixgbe_init_phy_ops_generic(hw); ret_val = ixgbe_init_ops_generic(hw); /* PHY */ phy->ops.identify = ixgbe_identify_phy_82599; phy->ops.init = ixgbe_init_phy_ops_82599; /* MAC */ mac->ops.reset_hw = ixgbe_reset_hw_82599; mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_gen2; mac->ops.get_media_type = ixgbe_get_media_type_82599; mac->ops.get_supported_physical_layer = ixgbe_get_supported_physical_layer_82599; mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic; mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic; mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82599; mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82599; mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82599; mac->ops.start_hw = ixgbe_start_hw_82599; mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic; mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic; mac->ops.get_device_caps = ixgbe_get_device_caps_generic; mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic; mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic; mac->ops.prot_autoc_read = prot_autoc_read_82599; mac->ops.prot_autoc_write = prot_autoc_write_82599; /* RAR, Multicast, VLAN */ mac->ops.set_vmdq = ixgbe_set_vmdq_generic; mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic; mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic; mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic; mac->rar_highwater = 1; mac->ops.set_vfta = ixgbe_set_vfta_generic; mac->ops.set_vlvf = ixgbe_set_vlvf_generic; mac->ops.clear_vfta = ixgbe_clear_vfta_generic; mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic; mac->ops.setup_sfp = ixgbe_setup_sfp_modules_82599; mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing; mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing; /* Link */ mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82599; mac->ops.check_link = ixgbe_check_mac_link_generic; mac->ops.setup_rxpba = ixgbe_set_rxpba_generic; ixgbe_init_mac_link_ops_82599(hw); mac->mcft_size = IXGBE_82599_MC_TBL_SIZE; mac->vft_size = IXGBE_82599_VFT_TBL_SIZE; mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES; mac->rx_pb_size = IXGBE_82599_RX_PB_SIZE; mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES; mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES; mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)) & IXGBE_FWSM_MODE_MASK); hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf; /* EEPROM */ eeprom->ops.read = ixgbe_read_eeprom_82599; eeprom->ops.read_buffer = ixgbe_read_eeprom_buffer_82599; /* Manageability interface */ mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic; mac->ops.bypass_rw = ixgbe_bypass_rw_generic; mac->ops.bypass_valid_rd = ixgbe_bypass_valid_rd_generic; mac->ops.bypass_set = ixgbe_bypass_set_generic; mac->ops.bypass_rd_eep = ixgbe_bypass_rd_eep_generic; mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic; return ret_val; } /** * ixgbe_get_link_capabilities_82599 - Determines link capabilities * @hw: pointer to hardware structure * @speed: pointer to link speed - * @autoneg: TRUE when autoneg or autotry is enabled + * @autoneg: true when autoneg or autotry is enabled * * Determines the link capabilities by reading the AUTOC register. **/ s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *autoneg) { s32 status = IXGBE_SUCCESS; u32 autoc = 0; DEBUGFUNC("ixgbe_get_link_capabilities_82599"); /* Check if 1G SFP module. */ if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 || hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) { *speed = IXGBE_LINK_SPEED_1GB_FULL; - *autoneg = TRUE; + *autoneg = true; goto out; } /* * Determine link capabilities based on the stored value of AUTOC, * which represents EEPROM defaults. If AUTOC value has not * been stored, use the current register values. */ if (hw->mac.orig_link_settings_stored) autoc = hw->mac.orig_autoc; else autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); switch (autoc & IXGBE_AUTOC_LMS_MASK) { case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: *speed = IXGBE_LINK_SPEED_1GB_FULL; - *autoneg = FALSE; + *autoneg = false; break; case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: *speed = IXGBE_LINK_SPEED_10GB_FULL; - *autoneg = FALSE; + *autoneg = false; break; case IXGBE_AUTOC_LMS_1G_AN: *speed = IXGBE_LINK_SPEED_1GB_FULL; - *autoneg = TRUE; + *autoneg = true; break; case IXGBE_AUTOC_LMS_10G_SERIAL: *speed = IXGBE_LINK_SPEED_10GB_FULL; - *autoneg = FALSE; + *autoneg = false; break; case IXGBE_AUTOC_LMS_KX4_KX_KR: case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: *speed = IXGBE_LINK_SPEED_UNKNOWN; if (autoc & IXGBE_AUTOC_KR_SUPP) *speed |= IXGBE_LINK_SPEED_10GB_FULL; if (autoc & IXGBE_AUTOC_KX4_SUPP) *speed |= IXGBE_LINK_SPEED_10GB_FULL; if (autoc & IXGBE_AUTOC_KX_SUPP) *speed |= IXGBE_LINK_SPEED_1GB_FULL; - *autoneg = TRUE; + *autoneg = true; break; case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII: *speed = IXGBE_LINK_SPEED_100_FULL; if (autoc & IXGBE_AUTOC_KR_SUPP) *speed |= IXGBE_LINK_SPEED_10GB_FULL; if (autoc & IXGBE_AUTOC_KX4_SUPP) *speed |= IXGBE_LINK_SPEED_10GB_FULL; if (autoc & IXGBE_AUTOC_KX_SUPP) *speed |= IXGBE_LINK_SPEED_1GB_FULL; - *autoneg = TRUE; + *autoneg = true; break; case IXGBE_AUTOC_LMS_SGMII_1G_100M: *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL; - *autoneg = FALSE; + *autoneg = false; break; default: status = IXGBE_ERR_LINK_SETUP; goto out; break; } if (hw->phy.multispeed_fiber) { *speed |= IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL; /* QSFP must not enable full auto-negotiation * Limited autoneg is enabled at 1G */ if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp) - *autoneg = FALSE; + *autoneg = false; else - *autoneg = TRUE; + *autoneg = true; } out: return status; } /** * ixgbe_get_media_type_82599 - Get media type * @hw: pointer to hardware structure * * Returns the media type (fiber, copper, backplane) **/ enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) { enum ixgbe_media_type media_type; DEBUGFUNC("ixgbe_get_media_type_82599"); /* Detect if there is a copper PHY attached. */ switch (hw->phy.type) { case ixgbe_phy_cu_unknown: case ixgbe_phy_tn: media_type = ixgbe_media_type_copper; goto out; default: break; } switch (hw->device_id) { case IXGBE_DEV_ID_82599_KX4: case IXGBE_DEV_ID_82599_KX4_MEZZ: case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: case IXGBE_DEV_ID_82599_KR: case IXGBE_DEV_ID_82599_BACKPLANE_FCOE: case IXGBE_DEV_ID_82599_XAUI_LOM: /* Default device ID is mezzanine card KX/KX4 */ media_type = ixgbe_media_type_backplane; break; case IXGBE_DEV_ID_82599_SFP: case IXGBE_DEV_ID_82599_SFP_FCOE: case IXGBE_DEV_ID_82599_SFP_EM: case IXGBE_DEV_ID_82599_SFP_SF2: case IXGBE_DEV_ID_82599_SFP_SF_QP: case IXGBE_DEV_ID_82599EN_SFP: media_type = ixgbe_media_type_fiber; break; case IXGBE_DEV_ID_82599_CX4: media_type = ixgbe_media_type_cx4; break; case IXGBE_DEV_ID_82599_T3_LOM: media_type = ixgbe_media_type_copper; break; case IXGBE_DEV_ID_82599_QSFP_SF_QP: media_type = ixgbe_media_type_fiber_qsfp; break; case IXGBE_DEV_ID_82599_BYPASS: media_type = ixgbe_media_type_fiber_fixed; - hw->phy.multispeed_fiber = TRUE; + hw->phy.multispeed_fiber = true; break; default: media_type = ixgbe_media_type_unknown; break; } out: return media_type; } /** * ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3 * @hw: pointer to hardware structure * * Disables link during D3 power down sequence. * **/ void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw) { u32 autoc2_reg; u16 ee_ctrl_2 = 0; DEBUGFUNC("ixgbe_stop_mac_link_on_d3_82599"); ixgbe_read_eeprom(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2); if (!ixgbe_mng_present(hw) && !hw->wol_enabled && ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) { autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2); autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK; IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg); } } /** * ixgbe_start_mac_link_82599 - Setup MAC link settings * @hw: pointer to hardware structure - * @autoneg_wait_to_complete: TRUE when waiting for completion is needed + * @autoneg_wait_to_complete: true when waiting for completion is needed * * Configures link settings based on values in the ixgbe_hw struct. * Restarts the link. Performs autonegotiation if needed. **/ s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, bool autoneg_wait_to_complete) { u32 autoc_reg; u32 links_reg; u32 i; s32 status = IXGBE_SUCCESS; - bool got_lock = FALSE; + bool got_lock = false; DEBUGFUNC("ixgbe_start_mac_link_82599"); /* reset_pipeline requires us to hold this lock as it writes to * AUTOC. */ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); if (status != IXGBE_SUCCESS) goto out; - got_lock = TRUE; + got_lock = true; } /* Restart link */ ixgbe_reset_pipeline_82599(hw); if (got_lock) hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); /* Only poll for autoneg to complete if specified to do so */ if (autoneg_wait_to_complete) { autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == IXGBE_AUTOC_LMS_KX4_KX_KR || (autoc_reg & IXGBE_AUTOC_LMS_MASK) == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || (autoc_reg & IXGBE_AUTOC_LMS_MASK) == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { links_reg = 0; /* Just in case Autoneg time = 0 */ for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); if (links_reg & IXGBE_LINKS_KX_AN_COMP) break; msec_delay(100); } if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; DEBUGOUT("Autoneg did not complete.\n"); } } } /* Add delay to filter out noises during initial link setup */ msec_delay(50); out: return status; } /** * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser * @hw: pointer to hardware structure * * The base drivers may require better control over SFP+ module * PHY states. This includes selectively shutting down the Tx * laser on the PHY, effectively halting physical link. **/ void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) { u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); /* Blocked by MNG FW so bail */ if (ixgbe_check_reset_blocked(hw)) return; /* Disable Tx laser; allow 100us to go dark per spec */ esdp_reg |= IXGBE_ESDP_SDP3; IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); IXGBE_WRITE_FLUSH(hw); usec_delay(100); } /** * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser * @hw: pointer to hardware structure * * The base drivers may require better control over SFP+ module * PHY states. This includes selectively turning on the Tx * laser on the PHY, effectively starting physical link. **/ void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) { u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); /* Enable Tx laser; allow 100ms to light up */ esdp_reg &= ~IXGBE_ESDP_SDP3; IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); IXGBE_WRITE_FLUSH(hw); msec_delay(100); } /** * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser * @hw: pointer to hardware structure * * When the driver changes the link speeds that it can support, - * it sets autotry_restart to TRUE to indicate that we need to + * it sets autotry_restart to true to indicate that we need to * initiate a new autotry session with the link partner. To do * so, we set the speed then disable and re-enable the Tx laser, to * alert the link partner that it also needs to restart autotry on its - * end. This is consistent with TRUE clause 37 autoneg, which also + * end. This is consistent with true clause 37 autoneg, which also * involves a loss of signal. **/ void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) { DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber"); /* Blocked by MNG FW so bail */ if (ixgbe_check_reset_blocked(hw)) return; if (hw->mac.autotry_restart) { ixgbe_disable_tx_laser_multispeed_fiber(hw); ixgbe_enable_tx_laser_multispeed_fiber(hw); - hw->mac.autotry_restart = FALSE; + hw->mac.autotry_restart = false; } } /** * ixgbe_set_hard_rate_select_speed - Set module link speed * @hw: pointer to hardware structure * @speed: link speed to set * * Set module link speed via RS0/RS1 rate select pins. */ void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed) { u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); switch (speed) { case IXGBE_LINK_SPEED_10GB_FULL: esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); break; case IXGBE_LINK_SPEED_1GB_FULL: esdp_reg &= ~IXGBE_ESDP_SDP5; esdp_reg |= IXGBE_ESDP_SDP5_DIR; break; default: DEBUGOUT("Invalid fixed module speed\n"); return; } IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); IXGBE_WRITE_FLUSH(hw); } /** * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed * @hw: pointer to hardware structure * @speed: new link speed - * @autoneg_wait_to_complete: TRUE when waiting for completion is needed + * @autoneg_wait_to_complete: true when waiting for completion is needed * * Implements the Intel SmartSpeed algorithm. **/ s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete) { s32 status = IXGBE_SUCCESS; ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; s32 i, j; - bool link_up = FALSE; + bool link_up = false; u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); DEBUGFUNC("ixgbe_setup_mac_link_smartspeed"); /* Set autoneg_advertised value based on input link speed */ hw->phy.autoneg_advertised = 0; if (speed & IXGBE_LINK_SPEED_10GB_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; if (speed & IXGBE_LINK_SPEED_1GB_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; if (speed & IXGBE_LINK_SPEED_100_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; /* * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the * autoneg advertisement if link is unable to be established at the * highest negotiated rate. This can sometimes happen due to integrity * issues with the physical media connection. */ /* First, try to get link with full advertisement */ - hw->phy.smart_speed_active = FALSE; + hw->phy.smart_speed_active = false; for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) { status = ixgbe_setup_mac_link_82599(hw, speed, autoneg_wait_to_complete); if (status != IXGBE_SUCCESS) goto out; /* * Wait for the controller to acquire link. Per IEEE 802.3ap, * Section 73.10.2, we may have to wait up to 500ms if KR is * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per * Table 9 in the AN MAS. */ for (i = 0; i < 5; i++) { msec_delay(100); /* If we have link, just jump out */ status = ixgbe_check_link(hw, &link_speed, &link_up, - FALSE); + false); if (status != IXGBE_SUCCESS) goto out; if (link_up) goto out; } } /* * We didn't get link. If we advertised KR plus one of KX4/KX * (or BX4/BX), then disable KR and try again. */ if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) || ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0)) goto out; /* Turn SmartSpeed on to disable KR support */ - hw->phy.smart_speed_active = TRUE; + hw->phy.smart_speed_active = true; status = ixgbe_setup_mac_link_82599(hw, speed, autoneg_wait_to_complete); if (status != IXGBE_SUCCESS) goto out; /* * Wait for the controller to acquire link. 600ms will allow for * the AN link_fail_inhibit_timer as well for multiple cycles of * parallel detect, both 10g and 1g. This allows for the maximum * connect attempts as defined in the AN MAS table 73-7. */ for (i = 0; i < 6; i++) { msec_delay(100); /* If we have link, just jump out */ - status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); + status = ixgbe_check_link(hw, &link_speed, &link_up, false); if (status != IXGBE_SUCCESS) goto out; if (link_up) goto out; } /* We didn't get link. Turn SmartSpeed back off. */ - hw->phy.smart_speed_active = FALSE; + hw->phy.smart_speed_active = false; status = ixgbe_setup_mac_link_82599(hw, speed, autoneg_wait_to_complete); out: if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) DEBUGOUT("Smartspeed has downgraded the link speed " "from the maximum advertised\n"); return status; } /** * ixgbe_setup_mac_link_82599 - Set MAC link speed * @hw: pointer to hardware structure * @speed: new link speed - * @autoneg_wait_to_complete: TRUE when waiting for completion is needed + * @autoneg_wait_to_complete: true when waiting for completion is needed * * Set the link speed in the AUTOC register and restarts link. **/ s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete) { - bool autoneg = FALSE; + bool autoneg = false; s32 status = IXGBE_SUCCESS; u32 pma_pmd_1g, link_mode; u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); /* holds the value of AUTOC register at this current point in time */ u32 orig_autoc = 0; /* holds the cached value of AUTOC register */ u32 autoc = current_autoc; /* Temporary variable used for comparison purposes */ u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; u32 links_reg; u32 i; ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; DEBUGFUNC("ixgbe_setup_mac_link_82599"); /* Check to see if speed passed in is supported. */ status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg); if (status) goto out; speed &= link_capabilities; if (speed == IXGBE_LINK_SPEED_UNKNOWN) { status = IXGBE_ERR_LINK_SETUP; goto out; } /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/ if (hw->mac.orig_link_settings_stored) orig_autoc = hw->mac.orig_autoc; else orig_autoc = autoc; link_mode = autoc & IXGBE_AUTOC_LMS_MASK; pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { /* Set KX4/KX/KR support according to speed requested */ autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP); if (speed & IXGBE_LINK_SPEED_10GB_FULL) { if (orig_autoc & IXGBE_AUTOC_KX4_SUPP) autoc |= IXGBE_AUTOC_KX4_SUPP; if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) && - (hw->phy.smart_speed_active == FALSE)) + (hw->phy.smart_speed_active == false)) autoc |= IXGBE_AUTOC_KR_SUPP; } if (speed & IXGBE_LINK_SPEED_1GB_FULL) autoc |= IXGBE_AUTOC_KX_SUPP; } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) && (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN || link_mode == IXGBE_AUTOC_LMS_1G_AN)) { /* Switch from 1G SFI to 10G SFI if requested */ if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) { autoc &= ~IXGBE_AUTOC_LMS_MASK; autoc |= IXGBE_AUTOC_LMS_10G_SERIAL; } } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) && (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) { /* Switch from 10G SFI to 1G SFI if requested */ if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) { autoc &= ~IXGBE_AUTOC_LMS_MASK; if (autoneg || hw->phy.type == ixgbe_phy_qsfp_intel) autoc |= IXGBE_AUTOC_LMS_1G_AN; else autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN; } } if (autoc != current_autoc) { /* Restart link */ - status = hw->mac.ops.prot_autoc_write(hw, autoc, FALSE); + status = hw->mac.ops.prot_autoc_write(hw, autoc, false); if (status != IXGBE_SUCCESS) goto out; /* Only poll for autoneg to complete if specified to do so */ if (autoneg_wait_to_complete) { if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { links_reg = 0; /*Just in case Autoneg time=0*/ for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); if (links_reg & IXGBE_LINKS_KX_AN_COMP) break; msec_delay(100); } if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; DEBUGOUT("Autoneg did not complete.\n"); } } } /* Add delay to filter out noises during initial link setup */ msec_delay(50); } out: return status; } /** * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field * @hw: pointer to hardware structure * @speed: new link speed - * @autoneg_wait_to_complete: TRUE if waiting is needed to complete + * @autoneg_wait_to_complete: true if waiting is needed to complete * * Restarts link on PHY and MAC based on settings passed in. **/ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete) { s32 status; DEBUGFUNC("ixgbe_setup_copper_link_82599"); /* Setup the PHY according to input speed */ status = hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete); /* Set up MAC */ ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete); return status; } /** * ixgbe_reset_hw_82599 - Perform hardware reset * @hw: pointer to hardware structure * * Resets the hardware by resetting the transmit and receive units, masks * and clears all interrupts, perform a PHY reset, and perform a link (MAC) * reset. **/ s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) { ixgbe_link_speed link_speed; s32 status; u32 ctrl = 0; u32 i, autoc, autoc2; u32 curr_lms; - bool link_up = FALSE; + bool link_up = false; DEBUGFUNC("ixgbe_reset_hw_82599"); /* Call adapter stop to disable tx/rx and clear interrupts */ status = hw->mac.ops.stop_adapter(hw); if (status != IXGBE_SUCCESS) goto reset_hw_out; /* flush pending Tx transactions */ ixgbe_clear_tx_pending(hw); /* PHY ops must be identified and initialized prior to reset */ /* Identify PHY and related function pointers */ status = hw->phy.ops.init(hw); if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) goto reset_hw_out; /* Setup SFP module if there is one present. */ if (hw->phy.sfp_setup_needed) { status = hw->mac.ops.setup_sfp(hw); - hw->phy.sfp_setup_needed = FALSE; + hw->phy.sfp_setup_needed = false; } if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) goto reset_hw_out; /* Reset PHY */ - if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL) + if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL) hw->phy.ops.reset(hw); /* remember AUTOC from before we reset */ curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK; mac_reset_top: /* * Issue global reset to the MAC. Needs to be SW reset if link is up. * If link reset is used when link is up, it might reset the PHY when * mng is using it. If link is down or the flag to force full link * reset is set, then perform link reset. */ ctrl = IXGBE_CTRL_LNK_RST; if (!hw->force_full_reset) { - hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE); + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); if (link_up) ctrl = IXGBE_CTRL_RST; } ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); IXGBE_WRITE_FLUSH(hw); /* Poll for reset bit to self-clear meaning reset is complete */ for (i = 0; i < 10; i++) { usec_delay(1); ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); if (!(ctrl & IXGBE_CTRL_RST_MASK)) break; } if (ctrl & IXGBE_CTRL_RST_MASK) { status = IXGBE_ERR_RESET_FAILED; DEBUGOUT("Reset polling failed to complete.\n"); } msec_delay(50); /* * Double resets are required for recovery from certain error * conditions. Between resets, it is necessary to stall to * allow time for any pending HW events to complete. */ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; goto mac_reset_top; } /* * Store the original AUTOC/AUTOC2 values if they have not been * stored off yet. Otherwise restore the stored original * values since the reset operation sets back to defaults. */ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); /* Enable link if disabled in NVM */ if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) { autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK; IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); IXGBE_WRITE_FLUSH(hw); } - if (hw->mac.orig_link_settings_stored == FALSE) { + if (hw->mac.orig_link_settings_stored == false) { hw->mac.orig_autoc = autoc; hw->mac.orig_autoc2 = autoc2; - hw->mac.orig_link_settings_stored = TRUE; + hw->mac.orig_link_settings_stored = true; } else { /* If MNG FW is running on a multi-speed device that * doesn't autoneg with out driver support we need to * leave LMS in the state it was before we MAC reset. * Likewise if we support WoL we don't want change the * LMS state. */ if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) || hw->wol_enabled) hw->mac.orig_autoc = (hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) | curr_lms; if (autoc != hw->mac.orig_autoc) { status = hw->mac.ops.prot_autoc_write(hw, hw->mac.orig_autoc, - FALSE); + false); if (status != IXGBE_SUCCESS) goto reset_hw_out; } if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK; autoc2 |= (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK); IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); } } /* Store the permanent mac address */ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); /* * Store MAC address from RAR0, clear receive address registers, and * clear the multicast table. Also reset num_rar_entries to 128, * since we modify this value when programming the SAN MAC address. */ hw->mac.num_rar_entries = 128; hw->mac.ops.init_rx_addrs(hw); /* Store the permanent SAN mac address */ hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); /* Add the SAN MAC address to the RAR only if it's a valid address */ if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { /* Save the SAN MAC RAR index */ hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index, hw->mac.san_addr, 0, IXGBE_RAH_AV); /* clear VMDq pool/queue selection for this RAR */ hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index, IXGBE_CLEAR_VMDQ_ALL); /* Reserve the last RAR for the SAN MAC address */ hw->mac.num_rar_entries--; } /* Store the alternative WWNN/WWPN prefix */ hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, &hw->mac.wwpn_prefix); reset_hw_out: return status; } /** * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete * @hw: pointer to hardware structure * @fdircmd: current value of FDIRCMD register */ static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd) { int i; for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) { *fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD); if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK)) return IXGBE_SUCCESS; usec_delay(10); } return IXGBE_ERR_FDIR_CMD_INCOMPLETE; } /** * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables. * @hw: pointer to hardware structure **/ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) { s32 err; int i; u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); u32 fdircmd; fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE; DEBUGFUNC("ixgbe_reinit_fdir_tables_82599"); /* * Before starting reinitialization process, * FDIRCMD.CMD must be zero. */ err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); if (err) { DEBUGOUT("Flow Director previous command did not complete, aborting table re-initialization.\n"); return err; } IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0); IXGBE_WRITE_FLUSH(hw); /* * 82599 adapters flow director init flow cannot be restarted, * Workaround 82599 silicon errata by performing the following steps * before re-writing the FDIRCTRL control register with the same value. * - write 1 to bit 8 of FDIRCMD register & * - write 0 to bit 8 of FDIRCMD register */ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | IXGBE_FDIRCMD_CLEARHT)); IXGBE_WRITE_FLUSH(hw); IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & ~IXGBE_FDIRCMD_CLEARHT)); IXGBE_WRITE_FLUSH(hw); /* * Clear FDIR Hash register to clear any leftover hashes * waiting to be programmed. */ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00); IXGBE_WRITE_FLUSH(hw); IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); IXGBE_WRITE_FLUSH(hw); /* Poll init-done after we write FDIRCTRL register */ for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & IXGBE_FDIRCTRL_INIT_DONE) break; msec_delay(1); } if (i >= IXGBE_FDIR_INIT_DONE_POLL) { DEBUGOUT("Flow Director Signature poll time exceeded!\n"); return IXGBE_ERR_FDIR_REINIT_FAILED; } /* Clear FDIR statistics registers (read to clear) */ IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT); IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT); IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); IXGBE_READ_REG(hw, IXGBE_FDIRMISS); IXGBE_READ_REG(hw, IXGBE_FDIRLEN); return IXGBE_SUCCESS; } /** * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers * @hw: pointer to hardware structure * @fdirctrl: value to write to flow director control register **/ static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl) { int i; DEBUGFUNC("ixgbe_fdir_enable_82599"); /* Prime the keys for hashing */ IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); /* * Poll init-done after we write the register. Estimated times: * 10G: PBALLOC = 11b, timing is 60us * 1G: PBALLOC = 11b, timing is 600us * 100M: PBALLOC = 11b, timing is 6ms * * Multiple these timings by 4 if under full Rx load * * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for * 1 msec per poll time. If we're at line rate and drop to 100M, then * this might not finish in our poll time, but we can live with that * for now. */ IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); IXGBE_WRITE_FLUSH(hw); for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & IXGBE_FDIRCTRL_INIT_DONE) break; msec_delay(1); } if (i >= IXGBE_FDIR_INIT_DONE_POLL) DEBUGOUT("Flow Director poll time exceeded!\n"); } /** * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters * @hw: pointer to hardware structure * @fdirctrl: value to write to flow director control register, initially * contains just the value of the Rx packet buffer allocation **/ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl) { DEBUGFUNC("ixgbe_init_fdir_signature_82599"); /* * Continue setup of fdirctrl register bits: * Move the flexible bytes to use the ethertype - shift 6 words * Set the maximum length per hash bucket to 0xA filters * Send interrupt when 64 filters are left */ fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); /* write hashes and fdirctrl register, poll for completion */ ixgbe_fdir_enable_82599(hw, fdirctrl); return IXGBE_SUCCESS; } /** * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters * @hw: pointer to hardware structure * @fdirctrl: value to write to flow director control register, initially * contains just the value of the Rx packet buffer allocation - * @cloud_mode: TRUE - cloud mode, FALSE - other mode + * @cloud_mode: true - cloud mode, false - other mode **/ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl, bool cloud_mode) { UNREFERENCED_1PARAMETER(cloud_mode); DEBUGFUNC("ixgbe_init_fdir_perfect_82599"); /* * Continue setup of fdirctrl register bits: * Turn perfect match filtering on * Report hash in RSS field of Rx wb descriptor * Initialize the drop queue to queue 127 * Move the flexible bytes to use the ethertype - shift 6 words * Set the maximum length per hash bucket to 0xA filters * Send interrupt when 64 (0x4 * 16) filters are left */ fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH | IXGBE_FDIRCTRL_REPORT_STATUS | (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) | (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); if (cloud_mode) fdirctrl |=(IXGBE_FDIRCTRL_FILTERMODE_CLOUD << IXGBE_FDIRCTRL_FILTERMODE_SHIFT); /* write hashes and fdirctrl register, poll for completion */ ixgbe_fdir_enable_82599(hw, fdirctrl); return IXGBE_SUCCESS; } /** * ixgbe_set_fdir_drop_queue_82599 - Set Flow Director drop queue * @hw: pointer to hardware structure * @dropqueue: Rx queue index used for the dropped packets **/ void ixgbe_set_fdir_drop_queue_82599(struct ixgbe_hw *hw, u8 dropqueue) { u32 fdirctrl; DEBUGFUNC("ixgbe_set_fdir_drop_queue_82599"); /* Clear init done bit and drop queue field */ fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); fdirctrl &= ~(IXGBE_FDIRCTRL_DROP_Q_MASK | IXGBE_FDIRCTRL_INIT_DONE); /* Set drop queue */ fdirctrl |= (dropqueue << IXGBE_FDIRCTRL_DROP_Q_SHIFT); if ((hw->mac.type == ixgbe_mac_X550) || (hw->mac.type == ixgbe_mac_X550EM_x) || (hw->mac.type == ixgbe_mac_X550EM_a)) fdirctrl |= IXGBE_FDIRCTRL_DROP_NO_MATCH; IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | IXGBE_FDIRCMD_CLEARHT)); IXGBE_WRITE_FLUSH(hw); IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & ~IXGBE_FDIRCMD_CLEARHT)); IXGBE_WRITE_FLUSH(hw); /* write hashes and fdirctrl register, poll for completion */ ixgbe_fdir_enable_82599(hw, fdirctrl); } /* * These defines allow us to quickly generate all of the necessary instructions * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION * for values 0 through 15 */ #define IXGBE_ATR_COMMON_HASH_KEY \ (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY) #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ do { \ u32 n = (_n); \ if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \ common_hash ^= lo_hash_dword >> n; \ else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ bucket_hash ^= lo_hash_dword >> n; \ else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \ sig_hash ^= lo_hash_dword << (16 - n); \ if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \ common_hash ^= hi_hash_dword >> n; \ else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ bucket_hash ^= hi_hash_dword >> n; \ else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ sig_hash ^= hi_hash_dword << (16 - n); \ } while (0) /** * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash * @input: input bitstream to compute the hash on * @common: compressed common input dword * * This function is almost identical to the function above but contains * several optimizations such as unwinding all of the loops, letting the * compiler work out all of the conditional ifs since the keys are static * defines, and computing two keys at once since the hashed dword stream * will be the same for both keys. **/ u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, union ixgbe_atr_hash_dword common) { u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; u32 sig_hash = 0, bucket_hash = 0, common_hash = 0; /* record the flow_vm_vlan bits as they are a key part to the hash */ flow_vm_vlan = IXGBE_NTOHL(input.dword); /* generate common hash dword */ hi_hash_dword = IXGBE_NTOHL(common.dword); /* low dword is word swapped version of common */ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); /* apply flow ID/VM pool/VLAN ID bits to hash words */ hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); /* Process bits 0 and 16 */ IXGBE_COMPUTE_SIG_HASH_ITERATION(0); /* * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to * delay this because bit 0 of the stream should not be processed * so we do not add the VLAN until after bit 0 was processed */ lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); /* Process remaining 30 bit of the key */ IXGBE_COMPUTE_SIG_HASH_ITERATION(1); IXGBE_COMPUTE_SIG_HASH_ITERATION(2); IXGBE_COMPUTE_SIG_HASH_ITERATION(3); IXGBE_COMPUTE_SIG_HASH_ITERATION(4); IXGBE_COMPUTE_SIG_HASH_ITERATION(5); IXGBE_COMPUTE_SIG_HASH_ITERATION(6); IXGBE_COMPUTE_SIG_HASH_ITERATION(7); IXGBE_COMPUTE_SIG_HASH_ITERATION(8); IXGBE_COMPUTE_SIG_HASH_ITERATION(9); IXGBE_COMPUTE_SIG_HASH_ITERATION(10); IXGBE_COMPUTE_SIG_HASH_ITERATION(11); IXGBE_COMPUTE_SIG_HASH_ITERATION(12); IXGBE_COMPUTE_SIG_HASH_ITERATION(13); IXGBE_COMPUTE_SIG_HASH_ITERATION(14); IXGBE_COMPUTE_SIG_HASH_ITERATION(15); /* combine common_hash result with signature and bucket hashes */ bucket_hash ^= common_hash; bucket_hash &= IXGBE_ATR_HASH_MASK; sig_hash ^= common_hash << 16; sig_hash &= IXGBE_ATR_HASH_MASK << 16; /* return completed signature hash */ return sig_hash ^ bucket_hash; } /** * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter * @hw: pointer to hardware structure * @input: unique input dword * @common: compressed common input dword * @queue: queue index to direct traffic to * * Note that the tunnel bit in input must not be set when the hardware * tunneling support does not exist. **/ void ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_hash_dword input, union ixgbe_atr_hash_dword common, u8 queue) { u64 fdirhashcmd; u8 flow_type; bool tunnel; u32 fdircmd; DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599"); /* * Get the flow_type in order to program FDIRCMD properly * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 * fifth is FDIRCMD.TUNNEL_FILTER */ tunnel = !!(input.formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK); flow_type = input.formatted.flow_type & (IXGBE_ATR_L4TYPE_TUNNEL_MASK - 1); switch (flow_type) { case IXGBE_ATR_FLOW_TYPE_TCPV4: case IXGBE_ATR_FLOW_TYPE_UDPV4: case IXGBE_ATR_FLOW_TYPE_SCTPV4: case IXGBE_ATR_FLOW_TYPE_TCPV6: case IXGBE_ATR_FLOW_TYPE_UDPV6: case IXGBE_ATR_FLOW_TYPE_SCTPV6: break; default: DEBUGOUT(" Error on flow type input\n"); return; } /* configure FDIRCMD register */ fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; fdircmd |= (u32)flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; if (tunnel) fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER; /* * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH. */ fdirhashcmd = (u64)fdircmd << 32; fdirhashcmd |= (u64)ixgbe_atr_compute_sig_hash_82599(input, common); IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); return; } #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \ do { \ u32 n = (_n); \ if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ bucket_hash ^= lo_hash_dword >> n; \ if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ bucket_hash ^= hi_hash_dword >> n; \ } while (0) /** * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash * @input: input bitstream to compute the hash on * @input_mask: mask for the input bitstream * * This function serves two main purposes. First it applies the input_mask * to the atr_input resulting in a cleaned up atr_input data stream. * Secondly it computes the hash and stores it in the bkt_hash field at * the end of the input byte stream. This way it will be available for * future use without needing to recompute the hash. **/ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, union ixgbe_atr_input *input_mask) { u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; u32 bucket_hash = 0; u32 hi_dword = 0; u32 i = 0; /* Apply masks to input data */ for (i = 0; i < 14; i++) input->dword_stream[i] &= input_mask->dword_stream[i]; /* record the flow_vm_vlan bits as they are a key part to the hash */ flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]); /* generate common hash dword */ for (i = 1; i <= 13; i++) hi_dword ^= input->dword_stream[i]; hi_hash_dword = IXGBE_NTOHL(hi_dword); /* low dword is word swapped version of common */ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); /* apply flow ID/VM pool/VLAN ID bits to hash words */ hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); /* Process bits 0 and 16 */ IXGBE_COMPUTE_BKT_HASH_ITERATION(0); /* * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to * delay this because bit 0 of the stream should not be processed * so we do not add the VLAN until after bit 0 was processed */ lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); /* Process remaining 30 bit of the key */ for (i = 1; i <= 15; i++) IXGBE_COMPUTE_BKT_HASH_ITERATION(i); /* * Limit hash to 13 bits since max bucket count is 8K. * Store result at the end of the input stream. */ input->formatted.bkt_hash = bucket_hash & 0x1FFF; } /** * ixgbe_get_fdirtcpm_82599 - generate a TCP port from atr_input_masks * @input_mask: mask to be bit swapped * * The source and destination port masks for flow director are bit swapped * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to * generate a correctly swapped value we need to bit swap the mask and that * is what is accomplished by this function. **/ static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask) { u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port); mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT; mask |= (u32)IXGBE_NTOHS(input_mask->formatted.src_port); mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4); return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8); } /* * These two macros are meant to address the fact that we have registers * that are either all or in part big-endian. As a result on big-endian * systems we will end up byte swapping the value to little-endian before * it is byte swapped again and written to the hardware in the original * big-endian format. */ #define IXGBE_STORE_AS_BE32(_value) \ (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \ (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24)) #define IXGBE_WRITE_REG_BE32(a, reg, value) \ IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value))) #define IXGBE_STORE_AS_BE16(_value) \ IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8)) s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, union ixgbe_atr_input *input_mask, bool cloud_mode) { /* mask IPv6 since it is currently not supported */ u32 fdirm = IXGBE_FDIRM_DIPv6; u32 fdirtcpm; u32 fdirip6m; UNREFERENCED_1PARAMETER(cloud_mode); DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599"); /* * Program the relevant mask registers. If src/dst_port or src/dst_addr * are zero, then assume a full mask for that field. Also assume that * a VLAN of 0 is unspecified, so mask that out as well. L4type * cannot be masked out in this implementation. * * This also assumes IPv4 only. IPv6 masking isn't supported at this * point in time. */ /* verify bucket hash is cleared on hash generation */ if (input_mask->formatted.bkt_hash) DEBUGOUT(" bucket hash should always be 0 in mask\n"); /* Program FDIRM and verify partial masks */ switch (input_mask->formatted.vm_pool & 0x7F) { case 0x0: fdirm |= IXGBE_FDIRM_POOL; case 0x7F: break; default: DEBUGOUT(" Error on vm pool mask\n"); return IXGBE_ERR_CONFIG; } switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) { case 0x0: fdirm |= IXGBE_FDIRM_L4P; if (input_mask->formatted.dst_port || input_mask->formatted.src_port) { DEBUGOUT(" Error on src/dst port mask\n"); return IXGBE_ERR_CONFIG; } case IXGBE_ATR_L4TYPE_MASK: break; default: DEBUGOUT(" Error on flow type mask\n"); return IXGBE_ERR_CONFIG; } switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) { case 0x0000: /* mask VLAN ID */ fdirm |= IXGBE_FDIRM_VLANID; /* FALLTHROUGH */ case 0x0FFF: /* mask VLAN priority */ fdirm |= IXGBE_FDIRM_VLANP; break; case 0xE000: /* mask VLAN ID only */ fdirm |= IXGBE_FDIRM_VLANID; /* fall through */ case 0xEFFF: /* no VLAN fields masked */ break; default: DEBUGOUT(" Error on VLAN mask\n"); return IXGBE_ERR_CONFIG; } switch (input_mask->formatted.flex_bytes & 0xFFFF) { case 0x0000: /* Mask Flex Bytes */ fdirm |= IXGBE_FDIRM_FLEX; /* fall through */ case 0xFFFF: break; default: DEBUGOUT(" Error on flexible byte mask\n"); return IXGBE_ERR_CONFIG; } if (cloud_mode) { fdirm |= IXGBE_FDIRM_L3P; fdirip6m = ((u32) 0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT); fdirip6m |= IXGBE_FDIRIP6M_ALWAYS_MASK; switch (input_mask->formatted.inner_mac[0] & 0xFF) { case 0x00: /* Mask inner MAC, fall through */ fdirip6m |= IXGBE_FDIRIP6M_INNER_MAC; case 0xFF: break; default: DEBUGOUT(" Error on inner_mac byte mask\n"); return IXGBE_ERR_CONFIG; } switch (input_mask->formatted.tni_vni & 0xFFFFFFFF) { case 0x0: /* Mask vxlan id */ fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI; break; case 0x00FFFFFF: fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI_24; break; case 0xFFFFFFFF: break; default: DEBUGOUT(" Error on TNI/VNI byte mask\n"); return IXGBE_ERR_CONFIG; } switch (input_mask->formatted.tunnel_type & 0xFFFF) { case 0x0: /* Mask turnnel type, fall through */ fdirip6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE; case 0xFFFF: break; default: DEBUGOUT(" Error on tunnel type byte mask\n"); return IXGBE_ERR_CONFIG; } IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, fdirip6m); /* Set all bits in FDIRTCPM, FDIRUDPM, FDIRSCTPM, * FDIRSIP4M and FDIRDIP4M in cloud mode to allow * L3/L3 packets to tunnel. */ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF); IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF); IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF); IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF); switch (hw->mac.type) { case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_X550EM_a: IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF); break; default: break; } } /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); if (!cloud_mode) { /* store the TCP/UDP port masks, bit reversed from port * layout */ fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask); /* write both the same so that UDP and TCP use the same mask */ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); /* also use it for SCTP */ switch (hw->mac.type) { case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_X550EM_a: IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm); break; default: break; } /* store source and destination IP masks (big-enian) */ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, ~input_mask->formatted.src_ip[0]); IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, ~input_mask->formatted.dst_ip[0]); IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, 0xFFFFFFFF); } return IXGBE_SUCCESS; } s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_input *input, u16 soft_id, u8 queue, bool cloud_mode) { u32 fdirport, fdirvlan, fdirhash, fdircmd; u32 addr_low, addr_high; u32 cloud_type = 0; s32 err; UNREFERENCED_1PARAMETER(cloud_mode); DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599"); if (!cloud_mode) { /* currently IPv6 is not supported, must be programmed with 0 */ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), input->formatted.src_ip[0]); IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), input->formatted.src_ip[1]); IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), input->formatted.src_ip[2]); /* record the source address (big-endian) */ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]); /* record the first 32 bits of the destination address * (big-endian) */ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]); /* record source and destination port (little-endian)*/ fdirport = IXGBE_NTOHS(input->formatted.dst_port); fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; fdirport |= (u32)IXGBE_NTOHS(input->formatted.src_port); IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); } /* record VLAN (little-endian) and flex_bytes(big-endian) */ fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes); fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT; fdirvlan |= (u32)IXGBE_NTOHS(input->formatted.vlan_id); IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan); if (cloud_mode) { if (input->formatted.tunnel_type != 0) cloud_type = 0x80000000; addr_low = ((u32)input->formatted.inner_mac[0] | ((u32)input->formatted.inner_mac[1] << 8) | ((u32)input->formatted.inner_mac[2] << 16) | ((u32)input->formatted.inner_mac[3] << 24)); addr_high = ((u32)input->formatted.inner_mac[4] | ((u32)input->formatted.inner_mac[5] << 8)); cloud_type |= addr_high; IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), addr_low); IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), cloud_type); IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), input->formatted.tni_vni); } /* configure FDIRHASH register */ fdirhash = input->formatted.bkt_hash; fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); /* * flush all previous writes to make certain registers are * programmed prior to issuing the command */ IXGBE_WRITE_FLUSH(hw); /* configure FDIRCMD register */ fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; if (queue == IXGBE_FDIR_DROP_QUEUE) fdircmd |= IXGBE_FDIRCMD_DROP; if (input->formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK) fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER; fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT; IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); if (err) { DEBUGOUT("Flow Director command did not complete!\n"); return err; } return IXGBE_SUCCESS; } s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_input *input, u16 soft_id) { u32 fdirhash; u32 fdircmd; s32 err; /* configure FDIRHASH register */ fdirhash = input->formatted.bkt_hash; fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); /* flush hash to HW */ IXGBE_WRITE_FLUSH(hw); /* Query if filter is present */ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT); err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); if (err) { DEBUGOUT("Flow Director command did not complete!\n"); return err; } /* if filter exists in hardware then remove it */ if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) { IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); IXGBE_WRITE_FLUSH(hw); IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_REMOVE_FLOW); } return IXGBE_SUCCESS; } /** * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter * @hw: pointer to hardware structure * @input: input bitstream * @input_mask: mask for the input bitstream * @soft_id: software index for the filters * @queue: queue index to direct traffic to * @cloud_mode: unused * * Note that the caller to this function must lock before calling, since the * hardware writes must be protected from one another. **/ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_input *input, union ixgbe_atr_input *input_mask, u16 soft_id, u8 queue, bool cloud_mode) { s32 err = IXGBE_ERR_CONFIG; UNREFERENCED_1PARAMETER(cloud_mode); DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599"); /* * Check flow_type formatting, and bail out before we touch the hardware * if there's a configuration issue */ switch (input->formatted.flow_type) { case IXGBE_ATR_FLOW_TYPE_IPV4: case IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4: input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK; if (input->formatted.dst_port || input->formatted.src_port) { DEBUGOUT(" Error on src/dst port\n"); return IXGBE_ERR_CONFIG; } break; case IXGBE_ATR_FLOW_TYPE_SCTPV4: case IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4: if (input->formatted.dst_port || input->formatted.src_port) { DEBUGOUT(" Error on src/dst port\n"); return IXGBE_ERR_CONFIG; } /* FALLTHROUGH */ case IXGBE_ATR_FLOW_TYPE_TCPV4: case IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4: case IXGBE_ATR_FLOW_TYPE_UDPV4: case IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4: input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | IXGBE_ATR_L4TYPE_MASK; break; default: DEBUGOUT(" Error on flow type input\n"); return err; } /* program input mask into the HW */ err = ixgbe_fdir_set_input_mask_82599(hw, input_mask, cloud_mode); if (err) return err; /* apply mask and compute/store hash */ ixgbe_atr_compute_perfect_hash_82599(input, input_mask); /* program filters to filter memory */ return ixgbe_fdir_write_perfect_filter_82599(hw, input, soft_id, queue, cloud_mode); } /** * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register * @hw: pointer to hardware structure * @reg: analog register to read * @val: read value * * Performs read operation to Omer analog register specified. **/ s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val) { u32 core_ctl; DEBUGFUNC("ixgbe_read_analog_reg8_82599"); IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD | (reg << 8)); IXGBE_WRITE_FLUSH(hw); usec_delay(10); core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL); *val = (u8)core_ctl; return IXGBE_SUCCESS; } /** * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register * @hw: pointer to hardware structure * @reg: atlas register to write * @val: value to write * * Performs write operation to Omer analog register specified. **/ s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val) { u32 core_ctl; DEBUGFUNC("ixgbe_write_analog_reg8_82599"); core_ctl = (reg << 8) | val; IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl); IXGBE_WRITE_FLUSH(hw); usec_delay(10); return IXGBE_SUCCESS; } /** * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx * @hw: pointer to hardware structure * * Starts the hardware using the generic start_hw function * and the generation start_hw function. * Then performs revision-specific operations, if any. **/ s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw) { s32 ret_val = IXGBE_SUCCESS; DEBUGFUNC("ixgbe_start_hw_82599"); ret_val = ixgbe_start_hw_generic(hw); if (ret_val != IXGBE_SUCCESS) goto out; ret_val = ixgbe_start_hw_gen2(hw); if (ret_val != IXGBE_SUCCESS) goto out; /* We need to run link autotry after the driver loads */ - hw->mac.autotry_restart = TRUE; + hw->mac.autotry_restart = true; if (ret_val == IXGBE_SUCCESS) ret_val = ixgbe_verify_fw_version_82599(hw); out: return ret_val; } /** * ixgbe_identify_phy_82599 - Get physical layer module * @hw: pointer to hardware structure * * Determines the physical layer module found on the current adapter. * If PHY already detected, maintains current PHY type in hw struct, * otherwise executes the PHY detection routine. **/ s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) { s32 status; DEBUGFUNC("ixgbe_identify_phy_82599"); /* Detect PHY if not unknown - returns success if already detected. */ status = ixgbe_identify_phy_generic(hw); if (status != IXGBE_SUCCESS) { /* 82599 10GBASE-T requires an external PHY */ if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) return status; else status = ixgbe_identify_module_generic(hw); } /* Set PHY type none if no PHY detected */ if (hw->phy.type == ixgbe_phy_unknown) { hw->phy.type = ixgbe_phy_none; return IXGBE_SUCCESS; } /* Return error if SFP module has been detected but is not supported */ if (hw->phy.type == ixgbe_phy_sfp_unsupported) return IXGBE_ERR_SFP_NOT_SUPPORTED; return status; } /** * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type * @hw: pointer to hardware structure * * Determines physical layer capabilities of the current configuration. **/ u64 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw) { u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; u16 ext_ability = 0; DEBUGFUNC("ixgbe_get_support_physical_layer_82599"); hw->phy.ops.identify(hw); switch (hw->phy.type) { case ixgbe_phy_tn: case ixgbe_phy_cu_unknown: hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; goto out; default: break; } switch (autoc & IXGBE_AUTOC_LMS_MASK) { case IXGBE_AUTOC_LMS_1G_AN: case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) { physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX | IXGBE_PHYSICAL_LAYER_1000BASE_BX; goto out; } else /* SFI mode so read SFP module */ goto sfp_check; break; case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4) physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4) physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI) physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI; goto out; break; case IXGBE_AUTOC_LMS_10G_SERIAL: if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) { physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR; goto out; } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) goto sfp_check; break; case IXGBE_AUTOC_LMS_KX4_KX_KR: case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: if (autoc & IXGBE_AUTOC_KX_SUPP) physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; if (autoc & IXGBE_AUTOC_KX4_SUPP) physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; if (autoc & IXGBE_AUTOC_KR_SUPP) physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR; goto out; break; default: goto out; break; } sfp_check: /* SFP check must be done last since DA modules are sometimes used to * test KR mode - we need to id KR mode correctly before SFP module. * Call identify_sfp because the pluggable module may have changed */ physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw); out: return physical_layer; } /** * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599 * @hw: pointer to hardware structure * @regval: register value to write to RXCTRL * * Enables the Rx DMA unit for 82599 **/ s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) { DEBUGFUNC("ixgbe_enable_rx_dma_82599"); /* * Workaround for 82599 silicon errata when enabling the Rx datapath. * If traffic is incoming before we enable the Rx unit, it could hang * the Rx DMA unit. Therefore, make sure the security engine is * completely disabled prior to enabling the Rx unit. */ hw->mac.ops.disable_sec_rx_path(hw); if (regval & IXGBE_RXCTRL_RXEN) ixgbe_enable_rx(hw); else ixgbe_disable_rx(hw); hw->mac.ops.enable_sec_rx_path(hw); return IXGBE_SUCCESS; } /** * ixgbe_verify_fw_version_82599 - verify FW version for 82599 * @hw: pointer to hardware structure * * Verifies that installed the firmware version is 0.6 or higher * for SFI devices. All 82599 SFI devices should have version 0.6 or higher. * * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or * if the FW version is not supported. **/ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) { s32 status = IXGBE_ERR_EEPROM_VERSION; u16 fw_offset, fw_ptp_cfg_offset; u16 fw_version; DEBUGFUNC("ixgbe_verify_fw_version_82599"); /* firmware check is only necessary for SFI devices */ if (hw->phy.media_type != ixgbe_media_type_fiber) { status = IXGBE_SUCCESS; goto fw_version_out; } /* get the offset to the Firmware Module block */ if (hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset)) { ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, "eeprom read at offset %d failed", IXGBE_FW_PTR); return IXGBE_ERR_EEPROM_VERSION; } if ((fw_offset == 0) || (fw_offset == 0xFFFF)) goto fw_version_out; /* get the offset to the Pass Through Patch Configuration block */ if (hw->eeprom.ops.read(hw, (fw_offset + IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR), &fw_ptp_cfg_offset)) { ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, "eeprom read at offset %d failed", fw_offset + IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR); return IXGBE_ERR_EEPROM_VERSION; } if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF)) goto fw_version_out; /* get the firmware version */ if (hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4), &fw_version)) { ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, "eeprom read at offset %d failed", fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4); return IXGBE_ERR_EEPROM_VERSION; } if (fw_version > 0x5) status = IXGBE_SUCCESS; fw_version_out: return status; } /** * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state. * @hw: pointer to hardware structure * - * Returns TRUE if the LESM FW module is present and enabled. Otherwise - * returns FALSE. Smart Speed must be disabled if LESM FW module is enabled. + * Returns true if the LESM FW module is present and enabled. Otherwise + * returns false. Smart Speed must be disabled if LESM FW module is enabled. **/ bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) { - bool lesm_enabled = FALSE; + bool lesm_enabled = false; u16 fw_offset, fw_lesm_param_offset, fw_lesm_state; s32 status; DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599"); /* get the offset to the Firmware Module block */ status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); if ((status != IXGBE_SUCCESS) || (fw_offset == 0) || (fw_offset == 0xFFFF)) goto out; /* get the offset to the LESM Parameters block */ status = hw->eeprom.ops.read(hw, (fw_offset + IXGBE_FW_LESM_PARAMETERS_PTR), &fw_lesm_param_offset); if ((status != IXGBE_SUCCESS) || (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF)) goto out; /* get the LESM state word */ status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset + IXGBE_FW_LESM_STATE_1), &fw_lesm_state); if ((status == IXGBE_SUCCESS) && (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED)) - lesm_enabled = TRUE; + lesm_enabled = true; out: return lesm_enabled; } /** * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using * fastest available method * * @hw: pointer to hardware structure * @offset: offset of word in EEPROM to read * @words: number of words * @data: word(s) read from the EEPROM * * Retrieves 16 bit word(s) read from EEPROM **/ static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { struct ixgbe_eeprom_info *eeprom = &hw->eeprom; s32 ret_val = IXGBE_ERR_CONFIG; DEBUGFUNC("ixgbe_read_eeprom_buffer_82599"); /* * If EEPROM is detected and can be addressed using 14 bits, * use EERD otherwise use bit bang */ if ((eeprom->type == ixgbe_eeprom_spi) && (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR)) ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words, data); else ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset, words, data); return ret_val; } /** * ixgbe_read_eeprom_82599 - Read EEPROM word using * fastest available method * * @hw: pointer to hardware structure * @offset: offset of word in the EEPROM to read * @data: word read from the EEPROM * * Reads a 16 bit word from the EEPROM **/ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, u16 offset, u16 *data) { struct ixgbe_eeprom_info *eeprom = &hw->eeprom; s32 ret_val = IXGBE_ERR_CONFIG; DEBUGFUNC("ixgbe_read_eeprom_82599"); /* * If EEPROM is detected and can be addressed using 14 bits, * use EERD otherwise use bit bang */ if ((eeprom->type == ixgbe_eeprom_spi) && (offset <= IXGBE_EERD_MAX_ADDR)) ret_val = ixgbe_read_eerd_generic(hw, offset, data); else ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data); return ret_val; } /** * ixgbe_reset_pipeline_82599 - perform pipeline reset * * @hw: pointer to hardware structure * * Reset pipeline by asserting Restart_AN together with LMS change to ensure * full pipeline reset. This function assumes the SW/FW lock is held. **/ s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw) { s32 ret_val; u32 anlp1_reg = 0; u32 i, autoc_reg, autoc2_reg; /* Enable link if disabled in NVM */ autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2); if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) { autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK; IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg); IXGBE_WRITE_FLUSH(hw); } autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); autoc_reg |= IXGBE_AUTOC_AN_RESTART; /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT)); /* Wait for AN to leave state 0 */ for (i = 0; i < 10; i++) { msec_delay(4); anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK) break; } if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) { DEBUGOUT("auto negotiation not completed\n"); ret_val = IXGBE_ERR_RESET_FAILED; goto reset_pipeline_out; } ret_val = IXGBE_SUCCESS; reset_pipeline_out: /* Write AUTOC register with original LMS field and Restart_AN */ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); IXGBE_WRITE_FLUSH(hw); return ret_val; } /** * ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C * @hw: pointer to hardware structure * @byte_offset: byte offset to read * @dev_addr: address to read from * @data: value read * * Performs byte read operation to SFP module's EEPROM over I2C interface at * a specified device address. **/ static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 *data) { u32 esdp; s32 status; s32 timeout = 200; DEBUGFUNC("ixgbe_read_i2c_byte_82599"); - if (hw->phy.qsfp_shared_i2c_bus == TRUE) { + if (hw->phy.qsfp_shared_i2c_bus == true) { /* Acquire I2C bus ownership. */ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); esdp |= IXGBE_ESDP_SDP0; IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); IXGBE_WRITE_FLUSH(hw); while (timeout) { esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); if (esdp & IXGBE_ESDP_SDP1) break; msec_delay(5); timeout--; } if (!timeout) { DEBUGOUT("Driver can't access resource," " acquiring I2C bus timeout.\n"); status = IXGBE_ERR_I2C; goto release_i2c_access; } } status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data); release_i2c_access: - if (hw->phy.qsfp_shared_i2c_bus == TRUE) { + if (hw->phy.qsfp_shared_i2c_bus == true) { /* Release I2C bus ownership. */ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); esdp &= ~IXGBE_ESDP_SDP0; IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); IXGBE_WRITE_FLUSH(hw); } return status; } /** * ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C * @hw: pointer to hardware structure * @byte_offset: byte offset to write * @dev_addr: address to read from * @data: value to write * * Performs byte write operation to SFP module's EEPROM over I2C interface at * a specified device address. **/ static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 data) { u32 esdp; s32 status; s32 timeout = 200; DEBUGFUNC("ixgbe_write_i2c_byte_82599"); - if (hw->phy.qsfp_shared_i2c_bus == TRUE) { + if (hw->phy.qsfp_shared_i2c_bus == true) { /* Acquire I2C bus ownership. */ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); esdp |= IXGBE_ESDP_SDP0; IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); IXGBE_WRITE_FLUSH(hw); while (timeout) { esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); if (esdp & IXGBE_ESDP_SDP1) break; msec_delay(5); timeout--; } if (!timeout) { DEBUGOUT("Driver can't access resource," " acquiring I2C bus timeout.\n"); status = IXGBE_ERR_I2C; goto release_i2c_access; } } status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data); release_i2c_access: - if (hw->phy.qsfp_shared_i2c_bus == TRUE) { + if (hw->phy.qsfp_shared_i2c_bus == true) { /* Release I2C bus ownership. */ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); esdp &= ~IXGBE_ESDP_SDP0; IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); IXGBE_WRITE_FLUSH(hw); } return status; } diff --git a/sys/dev/ixgbe/ixgbe_api.c b/sys/dev/ixgbe/ixgbe_api.c index 5e662dac93f9..2ae2db78d0ed 100644 --- a/sys/dev/ixgbe/ixgbe_api.c +++ b/sys/dev/ixgbe/ixgbe_api.c @@ -1,1730 +1,1730 @@ /****************************************************************************** SPDX-License-Identifier: BSD-3-Clause Copyright (c) 2001-2017, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "ixgbe_api.h" #include "ixgbe_common.h" #define IXGBE_EMPTY_PARAM static const u32 ixgbe_mvals_base[IXGBE_MVALS_IDX_LIMIT] = { IXGBE_MVALS_INIT(IXGBE_EMPTY_PARAM) }; static const u32 ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = { IXGBE_MVALS_INIT(_X540) }; static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = { IXGBE_MVALS_INIT(_X550) }; static const u32 ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = { IXGBE_MVALS_INIT(_X550EM_x) }; static const u32 ixgbe_mvals_X550EM_a[IXGBE_MVALS_IDX_LIMIT] = { IXGBE_MVALS_INIT(_X550EM_a) }; /** * ixgbe_dcb_get_rtrup2tc - read rtrup2tc reg * @hw: pointer to hardware structure * @map: pointer to u8 arr for returning map * * Read the rtrup2tc HW register and resolve its content into map **/ void ixgbe_dcb_get_rtrup2tc(struct ixgbe_hw *hw, u8 *map) { if (hw->mac.ops.get_rtrup2tc) hw->mac.ops.get_rtrup2tc(hw, map); } /** * ixgbe_init_shared_code - Initialize the shared code * @hw: pointer to hardware structure * * This will assign function pointers and assign the MAC type and PHY code. * Does not touch the hardware. This function must be called prior to any * other function in the shared code. The ixgbe_hw structure should be * memset to 0 prior to calling this function. The following fields in * hw structure should be filled in prior to calling this function: * hw_addr, back, device_id, vendor_id, subsystem_device_id, * subsystem_vendor_id, and revision_id **/ s32 ixgbe_init_shared_code(struct ixgbe_hw *hw) { s32 status; DEBUGFUNC("ixgbe_init_shared_code"); /* * Set the mac type */ ixgbe_set_mac_type(hw); switch (hw->mac.type) { case ixgbe_mac_82598EB: status = ixgbe_init_ops_82598(hw); break; case ixgbe_mac_82599EB: status = ixgbe_init_ops_82599(hw); break; case ixgbe_mac_X540: status = ixgbe_init_ops_X540(hw); break; case ixgbe_mac_X550: status = ixgbe_init_ops_X550(hw); break; case ixgbe_mac_X550EM_x: status = ixgbe_init_ops_X550EM_x(hw); break; case ixgbe_mac_X550EM_a: status = ixgbe_init_ops_X550EM_a(hw); break; default: status = IXGBE_ERR_DEVICE_NOT_SUPPORTED; break; } hw->mac.max_link_up_time = IXGBE_LINK_UP_TIME; return status; } /** * ixgbe_set_mac_type - Sets MAC type * @hw: pointer to the HW structure * * This function sets the mac type of the adapter based on the * vendor ID and device ID stored in the hw structure. **/ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw) { s32 ret_val = IXGBE_SUCCESS; DEBUGFUNC("ixgbe_set_mac_type\n"); if (hw->vendor_id != IXGBE_INTEL_VENDOR_ID) { ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED, "Unsupported vendor id: %x", hw->vendor_id); return IXGBE_ERR_DEVICE_NOT_SUPPORTED; } hw->mvals = ixgbe_mvals_base; switch (hw->device_id) { case IXGBE_DEV_ID_82598: case IXGBE_DEV_ID_82598_BX: case IXGBE_DEV_ID_82598AF_SINGLE_PORT: case IXGBE_DEV_ID_82598AF_DUAL_PORT: case IXGBE_DEV_ID_82598AT: case IXGBE_DEV_ID_82598AT2: case IXGBE_DEV_ID_82598EB_CX4: case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: case IXGBE_DEV_ID_82598_DA_DUAL_PORT: case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: case IXGBE_DEV_ID_82598EB_XF_LR: case IXGBE_DEV_ID_82598EB_SFP_LOM: hw->mac.type = ixgbe_mac_82598EB; break; case IXGBE_DEV_ID_82599_KX4: case IXGBE_DEV_ID_82599_KX4_MEZZ: case IXGBE_DEV_ID_82599_XAUI_LOM: case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: case IXGBE_DEV_ID_82599_KR: case IXGBE_DEV_ID_82599_SFP: case IXGBE_DEV_ID_82599_BACKPLANE_FCOE: case IXGBE_DEV_ID_82599_SFP_FCOE: case IXGBE_DEV_ID_82599_SFP_EM: case IXGBE_DEV_ID_82599_SFP_SF2: case IXGBE_DEV_ID_82599_SFP_SF_QP: case IXGBE_DEV_ID_82599_QSFP_SF_QP: case IXGBE_DEV_ID_82599EN_SFP: case IXGBE_DEV_ID_82599_CX4: case IXGBE_DEV_ID_82599_BYPASS: case IXGBE_DEV_ID_82599_T3_LOM: hw->mac.type = ixgbe_mac_82599EB; break; case IXGBE_DEV_ID_X540T: case IXGBE_DEV_ID_X540T1: case IXGBE_DEV_ID_X540_BYPASS: hw->mac.type = ixgbe_mac_X540; hw->mvals = ixgbe_mvals_X540; break; case IXGBE_DEV_ID_X550T: case IXGBE_DEV_ID_X550T1: hw->mac.type = ixgbe_mac_X550; hw->mvals = ixgbe_mvals_X550; break; case IXGBE_DEV_ID_X550EM_X_KX4: case IXGBE_DEV_ID_X550EM_X_KR: case IXGBE_DEV_ID_X550EM_X_10G_T: case IXGBE_DEV_ID_X550EM_X_1G_T: case IXGBE_DEV_ID_X550EM_X_SFP: case IXGBE_DEV_ID_X550EM_X_XFI: hw->mac.type = ixgbe_mac_X550EM_x; hw->mvals = ixgbe_mvals_X550EM_x; break; case IXGBE_DEV_ID_X550EM_A_KR: case IXGBE_DEV_ID_X550EM_A_KR_L: case IXGBE_DEV_ID_X550EM_A_SFP_N: case IXGBE_DEV_ID_X550EM_A_SGMII: case IXGBE_DEV_ID_X550EM_A_SGMII_L: case IXGBE_DEV_ID_X550EM_A_1G_T: case IXGBE_DEV_ID_X550EM_A_1G_T_L: case IXGBE_DEV_ID_X550EM_A_10G_T: case IXGBE_DEV_ID_X550EM_A_QSFP: case IXGBE_DEV_ID_X550EM_A_QSFP_N: case IXGBE_DEV_ID_X550EM_A_SFP: hw->mac.type = ixgbe_mac_X550EM_a; hw->mvals = ixgbe_mvals_X550EM_a; break; default: ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED; ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED, "Unsupported device id: %x", hw->device_id); break; } DEBUGOUT2("ixgbe_set_mac_type found mac: %d, returns: %d\n", hw->mac.type, ret_val); return ret_val; } /** * ixgbe_init_hw - Initialize the hardware * @hw: pointer to hardware structure * * Initialize the hardware by resetting and then starting the hardware **/ s32 ixgbe_init_hw(struct ixgbe_hw *hw) { return ixgbe_call_func(hw, hw->mac.ops.init_hw, (hw), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_reset_hw - Performs a hardware reset * @hw: pointer to hardware structure * * Resets the hardware by resetting the transmit and receive units, masks and * clears all interrupts, performs a PHY reset, and performs a MAC reset **/ s32 ixgbe_reset_hw(struct ixgbe_hw *hw) { return ixgbe_call_func(hw, hw->mac.ops.reset_hw, (hw), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_start_hw - Prepares hardware for Rx/Tx * @hw: pointer to hardware structure * * Starts the hardware by filling the bus info structure and media type, * clears all on chip counters, initializes receive address registers, * multicast table, VLAN filter table, calls routine to setup link and * flow control settings, and leaves transmit and receive units disabled * and uninitialized. **/ s32 ixgbe_start_hw(struct ixgbe_hw *hw) { return ixgbe_call_func(hw, hw->mac.ops.start_hw, (hw), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_enable_relaxed_ordering - Enables tx relaxed ordering, * which is disabled by default in ixgbe_start_hw(); * * @hw: pointer to hardware structure * * Enable relaxed ordering; **/ void ixgbe_enable_relaxed_ordering(struct ixgbe_hw *hw) { if (hw->mac.ops.enable_relaxed_ordering) hw->mac.ops.enable_relaxed_ordering(hw); } /** * ixgbe_clear_hw_cntrs - Clear hardware counters * @hw: pointer to hardware structure * * Clears all hardware statistics counters by reading them from the hardware * Statistics counters are clear on read. **/ s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw) { return ixgbe_call_func(hw, hw->mac.ops.clear_hw_cntrs, (hw), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_get_media_type - Get media type * @hw: pointer to hardware structure * * Returns the media type (fiber, copper, backplane) **/ enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw) { return ixgbe_call_func(hw, hw->mac.ops.get_media_type, (hw), ixgbe_media_type_unknown); } /** * ixgbe_get_mac_addr - Get MAC address * @hw: pointer to hardware structure * @mac_addr: Adapter MAC address * * Reads the adapter's MAC address from the first Receive Address Register * (RAR0) A reset of the adapter must have been performed prior to calling * this function in order for the MAC address to have been loaded from the * EEPROM into RAR0 **/ s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr) { return ixgbe_call_func(hw, hw->mac.ops.get_mac_addr, (hw, mac_addr), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_get_san_mac_addr - Get SAN MAC address * @hw: pointer to hardware structure * @san_mac_addr: SAN MAC address * * Reads the SAN MAC address from the EEPROM, if it's available. This is * per-port, so set_lan_id() must be called before reading the addresses. **/ s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr) { return ixgbe_call_func(hw, hw->mac.ops.get_san_mac_addr, (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_set_san_mac_addr - Write a SAN MAC address * @hw: pointer to hardware structure * @san_mac_addr: SAN MAC address * * Writes A SAN MAC address to the EEPROM. **/ s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr) { return ixgbe_call_func(hw, hw->mac.ops.set_san_mac_addr, (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_get_device_caps - Get additional device capabilities * @hw: pointer to hardware structure * @device_caps: the EEPROM word for device capabilities * * Reads the extra device capabilities from the EEPROM **/ s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps) { return ixgbe_call_func(hw, hw->mac.ops.get_device_caps, (hw, device_caps), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_get_wwn_prefix - Get alternative WWNN/WWPN prefix from the EEPROM * @hw: pointer to hardware structure * @wwnn_prefix: the alternative WWNN prefix * @wwpn_prefix: the alternative WWPN prefix * * This function will read the EEPROM from the alternative SAN MAC address * block to check the support for the alternative WWNN/WWPN prefix support. **/ s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix, u16 *wwpn_prefix) { return ixgbe_call_func(hw, hw->mac.ops.get_wwn_prefix, (hw, wwnn_prefix, wwpn_prefix), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_get_fcoe_boot_status - Get FCOE boot status from EEPROM * @hw: pointer to hardware structure * @bs: the fcoe boot status * * This function will read the FCOE boot status from the iSCSI FCOE block **/ s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs) { return ixgbe_call_func(hw, hw->mac.ops.get_fcoe_boot_status, (hw, bs), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_get_bus_info - Set PCI bus info * @hw: pointer to hardware structure * * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure **/ s32 ixgbe_get_bus_info(struct ixgbe_hw *hw) { return ixgbe_call_func(hw, hw->mac.ops.get_bus_info, (hw), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_get_num_of_tx_queues - Get Tx queues * @hw: pointer to hardware structure * * Returns the number of transmit queues for the given adapter. **/ u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw) { return hw->mac.max_tx_queues; } /** * ixgbe_get_num_of_rx_queues - Get Rx queues * @hw: pointer to hardware structure * * Returns the number of receive queues for the given adapter. **/ u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw) { return hw->mac.max_rx_queues; } /** * ixgbe_stop_adapter - Disable Rx/Tx units * @hw: pointer to hardware structure * * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, * disables transmit and receive units. The adapter_stopped flag is used by * the shared code and drivers to determine if the adapter is in a stopped * state and should not touch the hardware. **/ s32 ixgbe_stop_adapter(struct ixgbe_hw *hw) { return ixgbe_call_func(hw, hw->mac.ops.stop_adapter, (hw), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_read_pba_string - Reads part number string from EEPROM * @hw: pointer to hardware structure * @pba_num: stores the part number string from the EEPROM * @pba_num_size: part number string buffer length * * Reads the part number string from the EEPROM. **/ s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size) { return ixgbe_read_pba_string_generic(hw, pba_num, pba_num_size); } /** * ixgbe_read_pba_num - Reads part number from EEPROM * @hw: pointer to hardware structure * @pba_num: stores the part number from the EEPROM * * Reads the part number from the EEPROM. **/ s32 ixgbe_read_pba_num(struct ixgbe_hw *hw, u32 *pba_num) { return ixgbe_read_pba_num_generic(hw, pba_num); } /** * ixgbe_identify_phy - Get PHY type * @hw: pointer to hardware structure * * Determines the physical layer module found on the current adapter. **/ s32 ixgbe_identify_phy(struct ixgbe_hw *hw) { s32 status = IXGBE_SUCCESS; if (hw->phy.type == ixgbe_phy_unknown) { status = ixgbe_call_func(hw, hw->phy.ops.identify, (hw), IXGBE_NOT_IMPLEMENTED); } return status; } /** * ixgbe_reset_phy - Perform a PHY reset * @hw: pointer to hardware structure **/ s32 ixgbe_reset_phy(struct ixgbe_hw *hw) { s32 status = IXGBE_SUCCESS; if (hw->phy.type == ixgbe_phy_unknown) { if (ixgbe_identify_phy(hw) != IXGBE_SUCCESS) status = IXGBE_ERR_PHY; } if (status == IXGBE_SUCCESS) { status = ixgbe_call_func(hw, hw->phy.ops.reset, (hw), IXGBE_NOT_IMPLEMENTED); } return status; } /** * ixgbe_get_phy_firmware_version - * @hw: pointer to hardware structure * @firmware_version: pointer to firmware version **/ s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw, u16 *firmware_version) { s32 status = IXGBE_SUCCESS; status = ixgbe_call_func(hw, hw->phy.ops.get_firmware_version, (hw, firmware_version), IXGBE_NOT_IMPLEMENTED); return status; } /** * ixgbe_read_phy_reg - Read PHY register * @hw: pointer to hardware structure * @reg_addr: 32 bit address of PHY register to read * @device_type: type of device you want to communicate with * @phy_data: Pointer to read data from PHY register * * Reads a value from a specified PHY register **/ s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 *phy_data) { if (hw->phy.id == 0) ixgbe_identify_phy(hw); return ixgbe_call_func(hw, hw->phy.ops.read_reg, (hw, reg_addr, device_type, phy_data), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_write_phy_reg - Write PHY register * @hw: pointer to hardware structure * @reg_addr: 32 bit PHY register to write * @device_type: type of device you want to communicate with * @phy_data: Data to write to the PHY register * * Writes a value to specified PHY register **/ s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 phy_data) { if (hw->phy.id == 0) ixgbe_identify_phy(hw); return ixgbe_call_func(hw, hw->phy.ops.write_reg, (hw, reg_addr, device_type, phy_data), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_setup_phy_link - Restart PHY autoneg * @hw: pointer to hardware structure * * Restart autonegotiation and PHY and waits for completion. **/ s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw) { return ixgbe_call_func(hw, hw->phy.ops.setup_link, (hw), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_setup_internal_phy - Configure integrated PHY * @hw: pointer to hardware structure * * Reconfigure the integrated PHY in order to enable talk to the external PHY. * Returns success if not implemented, since nothing needs to be done in this * case. */ s32 ixgbe_setup_internal_phy(struct ixgbe_hw *hw) { return ixgbe_call_func(hw, hw->phy.ops.setup_internal_link, (hw), IXGBE_SUCCESS); } /** * ixgbe_check_phy_link - Determine link and speed status * @hw: pointer to hardware structure * @speed: link speed - * @link_up: TRUE when link is up + * @link_up: true when link is up * * Reads a PHY register to determine if link is up and the current speed for * the PHY. **/ s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *link_up) { return ixgbe_call_func(hw, hw->phy.ops.check_link, (hw, speed, link_up), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_setup_phy_link_speed - Set auto advertise * @hw: pointer to hardware structure * @speed: new link speed - * @autoneg_wait_to_complete: TRUE when waiting for completion is needed + * @autoneg_wait_to_complete: true when waiting for completion is needed * * Sets the auto advertised capabilities **/ s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete) { return ixgbe_call_func(hw, hw->phy.ops.setup_link_speed, (hw, speed, autoneg_wait_to_complete), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_set_phy_power - Control the phy power state * @hw: pointer to hardware structure - * @on: TRUE for on, FALSE for off + * @on: true for on, false for off */ s32 ixgbe_set_phy_power(struct ixgbe_hw *hw, bool on) { return ixgbe_call_func(hw, hw->phy.ops.set_phy_power, (hw, on), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_check_link - Get link and speed status * @hw: pointer to hardware structure * @speed: pointer to link speed - * @link_up: TRUE when link is up + * @link_up: true when link is up * @link_up_wait_to_complete: bool used to wait for link up or not * * Reads the links register to determine if link is up and the current speed **/ s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *link_up, bool link_up_wait_to_complete) { return ixgbe_call_func(hw, hw->mac.ops.check_link, (hw, speed, link_up, link_up_wait_to_complete), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_disable_tx_laser - Disable Tx laser * @hw: pointer to hardware structure * * If the driver needs to disable the laser on SFI optics. **/ void ixgbe_disable_tx_laser(struct ixgbe_hw *hw) { if (hw->mac.ops.disable_tx_laser) hw->mac.ops.disable_tx_laser(hw); } /** * ixgbe_enable_tx_laser - Enable Tx laser * @hw: pointer to hardware structure * * If the driver needs to enable the laser on SFI optics. **/ void ixgbe_enable_tx_laser(struct ixgbe_hw *hw) { if (hw->mac.ops.enable_tx_laser) hw->mac.ops.enable_tx_laser(hw); } /** * ixgbe_flap_tx_laser - flap Tx laser to start autotry process * @hw: pointer to hardware structure * * When the driver changes the link speeds that it can support then * flap the tx laser to alert the link partner to start autotry * process on its end. **/ void ixgbe_flap_tx_laser(struct ixgbe_hw *hw) { if (hw->mac.ops.flap_tx_laser) hw->mac.ops.flap_tx_laser(hw); } /** * ixgbe_setup_link - Set link speed * @hw: pointer to hardware structure * @speed: new link speed - * @autoneg_wait_to_complete: TRUE when waiting for completion is needed + * @autoneg_wait_to_complete: true when waiting for completion is needed * * Configures link settings. Restarts the link. * Performs autonegotiation if needed. **/ s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete) { return ixgbe_call_func(hw, hw->mac.ops.setup_link, (hw, speed, autoneg_wait_to_complete), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_setup_mac_link - Set link speed * @hw: pointer to hardware structure * @speed: new link speed - * @autoneg_wait_to_complete: TRUE when waiting for completion is needed + * @autoneg_wait_to_complete: true when waiting for completion is needed * * Configures link settings. Restarts the link. * Performs autonegotiation if needed. **/ s32 ixgbe_setup_mac_link(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete) { return ixgbe_call_func(hw, hw->mac.ops.setup_mac_link, (hw, speed, autoneg_wait_to_complete), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_get_link_capabilities - Returns link capabilities * @hw: pointer to hardware structure * @speed: link speed capabilities - * @autoneg: TRUE when autoneg or autotry is enabled + * @autoneg: true when autoneg or autotry is enabled * * Determines the link capabilities of the current configuration. **/ s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *autoneg) { return ixgbe_call_func(hw, hw->mac.ops.get_link_capabilities, (hw, speed, autoneg), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_led_on - Turn on LEDs * @hw: pointer to hardware structure * @index: led number to turn on * * Turns on the software controllable LEDs. **/ s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index) { return ixgbe_call_func(hw, hw->mac.ops.led_on, (hw, index), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_led_off - Turn off LEDs * @hw: pointer to hardware structure * @index: led number to turn off * * Turns off the software controllable LEDs. **/ s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index) { return ixgbe_call_func(hw, hw->mac.ops.led_off, (hw, index), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_blink_led_start - Blink LEDs * @hw: pointer to hardware structure * @index: led number to blink * * Blink LED based on index. **/ s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index) { return ixgbe_call_func(hw, hw->mac.ops.blink_led_start, (hw, index), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_blink_led_stop - Stop blinking LEDs * @hw: pointer to hardware structure * @index: led number to stop * * Stop blinking LED based on index. **/ s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index) { return ixgbe_call_func(hw, hw->mac.ops.blink_led_stop, (hw, index), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_init_eeprom_params - Initialize EEPROM parameters * @hw: pointer to hardware structure * * Initializes the EEPROM parameters ixgbe_eeprom_info within the * ixgbe_hw struct in order to set up EEPROM access. **/ s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw) { return ixgbe_call_func(hw, hw->eeprom.ops.init_params, (hw), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_write_eeprom - Write word to EEPROM * @hw: pointer to hardware structure * @offset: offset within the EEPROM to be written to * @data: 16 bit word to be written to the EEPROM * * Writes 16 bit value to EEPROM. If ixgbe_eeprom_update_checksum is not * called after this function, the EEPROM will most likely contain an * invalid checksum. **/ s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data) { return ixgbe_call_func(hw, hw->eeprom.ops.write, (hw, offset, data), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_write_eeprom_buffer - Write word(s) to EEPROM * @hw: pointer to hardware structure * @offset: offset within the EEPROM to be written to * @data: 16 bit word(s) to be written to the EEPROM * @words: number of words * * Writes 16 bit word(s) to EEPROM. If ixgbe_eeprom_update_checksum is not * called after this function, the EEPROM will most likely contain an * invalid checksum. **/ s32 ixgbe_write_eeprom_buffer(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { return ixgbe_call_func(hw, hw->eeprom.ops.write_buffer, (hw, offset, words, data), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_read_eeprom - Read word from EEPROM * @hw: pointer to hardware structure * @offset: offset within the EEPROM to be read * @data: read 16 bit value from EEPROM * * Reads 16 bit value from EEPROM **/ s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data) { return ixgbe_call_func(hw, hw->eeprom.ops.read, (hw, offset, data), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_read_eeprom_buffer - Read word(s) from EEPROM * @hw: pointer to hardware structure * @offset: offset within the EEPROM to be read * @data: read 16 bit word(s) from EEPROM * @words: number of words * * Reads 16 bit word(s) from EEPROM **/ s32 ixgbe_read_eeprom_buffer(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { return ixgbe_call_func(hw, hw->eeprom.ops.read_buffer, (hw, offset, words, data), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_validate_eeprom_checksum - Validate EEPROM checksum * @hw: pointer to hardware structure * @checksum_val: calculated checksum * * Performs checksum calculation and validates the EEPROM checksum **/ s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val) { return ixgbe_call_func(hw, hw->eeprom.ops.validate_checksum, (hw, checksum_val), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_eeprom_update_checksum - Updates the EEPROM checksum * @hw: pointer to hardware structure **/ s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw) { return ixgbe_call_func(hw, hw->eeprom.ops.update_checksum, (hw), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_insert_mac_addr - Find a RAR for this mac address * @hw: pointer to hardware structure * @addr: Address to put into receive address register * @vmdq: VMDq pool to assign * * Puts an ethernet address into a receive address register, or * finds the rar that it is already in; adds to the pool list **/ s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) { return ixgbe_call_func(hw, hw->mac.ops.insert_mac_addr, (hw, addr, vmdq), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_set_rar - Set Rx address register * @hw: pointer to hardware structure * @index: Receive address register to write * @addr: Address to put into receive address register * @vmdq: VMDq "set" * @enable_addr: set flag that address is active * * Puts an ethernet address into a receive address register. **/ s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, u32 enable_addr) { return ixgbe_call_func(hw, hw->mac.ops.set_rar, (hw, index, addr, vmdq, enable_addr), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_clear_rar - Clear Rx address register * @hw: pointer to hardware structure * @index: Receive address register to write * * Puts an ethernet address into a receive address register. **/ s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index) { return ixgbe_call_func(hw, hw->mac.ops.clear_rar, (hw, index), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_set_vmdq - Associate a VMDq index with a receive address * @hw: pointer to hardware structure * @rar: receive address register index to associate with VMDq index * @vmdq: VMDq set or pool index **/ s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq) { return ixgbe_call_func(hw, hw->mac.ops.set_vmdq, (hw, rar, vmdq), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_set_vmdq_san_mac - Associate VMDq index 127 with a receive address * @hw: pointer to hardware structure * @vmdq: VMDq default pool index **/ s32 ixgbe_set_vmdq_san_mac(struct ixgbe_hw *hw, u32 vmdq) { return ixgbe_call_func(hw, hw->mac.ops.set_vmdq_san_mac, (hw, vmdq), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_clear_vmdq - Disassociate a VMDq index from a receive address * @hw: pointer to hardware structure * @rar: receive address register index to disassociate with VMDq index * @vmdq: VMDq set or pool index **/ s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq) { return ixgbe_call_func(hw, hw->mac.ops.clear_vmdq, (hw, rar, vmdq), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_init_rx_addrs - Initializes receive address filters. * @hw: pointer to hardware structure * * Places the MAC address in receive address register 0 and clears the rest * of the receive address registers. Clears the multicast table. Assumes * the receiver is in reset when the routine is called. **/ s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw) { return ixgbe_call_func(hw, hw->mac.ops.init_rx_addrs, (hw), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_get_num_rx_addrs - Returns the number of RAR entries. * @hw: pointer to hardware structure **/ u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw) { return hw->mac.num_rar_entries; } /** * ixgbe_update_uc_addr_list - Updates the MAC's list of secondary addresses * @hw: pointer to hardware structure * @addr_list: the list of new multicast addresses * @addr_count: number of addresses * @func: iterator function to walk the multicast address list * * The given list replaces any existing list. Clears the secondary addrs from * receive address registers. Uses unused receive address registers for the * first secondary addresses, and falls back to promiscuous mode as needed. **/ s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list, u32 addr_count, ixgbe_mc_addr_itr func) { return ixgbe_call_func(hw, hw->mac.ops.update_uc_addr_list, (hw, addr_list, addr_count, func), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_update_mc_addr_list - Updates the MAC's list of multicast addresses * @hw: pointer to hardware structure * @mc_addr_list: the list of new multicast addresses * @mc_addr_count: number of addresses * @func: iterator function to walk the multicast address list * @clear: flag, when set clears the table beforehand * * The given list replaces any existing list. Clears the MC addrs from receive * address registers and the multicast table. Uses unused receive address * registers for the first multicast addresses, and hashes the rest into the * multicast table. **/ s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list, u32 mc_addr_count, ixgbe_mc_addr_itr func, bool clear) { return ixgbe_call_func(hw, hw->mac.ops.update_mc_addr_list, (hw, mc_addr_list, mc_addr_count, func, clear), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_enable_mc - Enable multicast address in RAR * @hw: pointer to hardware structure * * Enables multicast address in RAR and the use of the multicast hash table. **/ s32 ixgbe_enable_mc(struct ixgbe_hw *hw) { return ixgbe_call_func(hw, hw->mac.ops.enable_mc, (hw), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_disable_mc - Disable multicast address in RAR * @hw: pointer to hardware structure * * Disables multicast address in RAR and the use of the multicast hash table. **/ s32 ixgbe_disable_mc(struct ixgbe_hw *hw) { return ixgbe_call_func(hw, hw->mac.ops.disable_mc, (hw), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_clear_vfta - Clear VLAN filter table * @hw: pointer to hardware structure * * Clears the VLAN filer table, and the VMDq index associated with the filter **/ s32 ixgbe_clear_vfta(struct ixgbe_hw *hw) { return ixgbe_call_func(hw, hw->mac.ops.clear_vfta, (hw), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_set_vfta - Set VLAN filter table * @hw: pointer to hardware structure * @vlan: VLAN id to write to VLAN filter * @vind: VMDq output index that maps queue to VLAN id in VLVFB * @vlan_on: boolean flag to turn on/off VLAN * @vlvf_bypass: boolean flag indicating updating the default pool is okay * * Turn on/off specified VLAN in the VLAN filter table. **/ s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on, bool vlvf_bypass) { return ixgbe_call_func(hw, hw->mac.ops.set_vfta, (hw, vlan, vind, vlan_on, vlvf_bypass), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_set_vlvf - Set VLAN Pool Filter * @hw: pointer to hardware structure * @vlan: VLAN id to write to VLAN filter * @vind: VMDq output index that maps queue to VLAN id in VLVFB * @vlan_on: boolean flag to turn on/off VLAN in VLVF * @vfta_delta: pointer to the difference between the current value of VFTA * and the desired value * @vfta: the desired value of the VFTA * @vlvf_bypass: boolean flag indicating updating the default pool is okay * * Turn on/off specified bit in VLVF table. **/ s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on, u32 *vfta_delta, u32 vfta, bool vlvf_bypass) { return ixgbe_call_func(hw, hw->mac.ops.set_vlvf, (hw, vlan, vind, vlan_on, vfta_delta, vfta, vlvf_bypass), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_fc_enable - Enable flow control * @hw: pointer to hardware structure * * Configures the flow control settings based on SW configuration. **/ s32 ixgbe_fc_enable(struct ixgbe_hw *hw) { return ixgbe_call_func(hw, hw->mac.ops.fc_enable, (hw), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_setup_fc - Set up flow control * @hw: pointer to hardware structure * * Called at init time to set up flow control. **/ s32 ixgbe_setup_fc(struct ixgbe_hw *hw) { return ixgbe_call_func(hw, hw->mac.ops.setup_fc, (hw), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_set_fw_drv_ver - Try to send the driver version number FW * @hw: pointer to hardware structure * @maj: driver major number to be sent to firmware * @min: driver minor number to be sent to firmware * @build: driver build number to be sent to firmware * @ver: driver version number to be sent to firmware * @len: length of driver_ver string * @driver_ver: driver string **/ s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build, u8 ver, u16 len, char *driver_ver) { return ixgbe_call_func(hw, hw->mac.ops.set_fw_drv_ver, (hw, maj, min, build, ver, len, driver_ver), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_dmac_config - Configure DMA Coalescing registers. * @hw: pointer to hardware structure * * Configure DMA coalescing. If enabling dmac, dmac is activated. * When disabling dmac, dmac enable dmac bit is cleared. **/ s32 ixgbe_dmac_config(struct ixgbe_hw *hw) { return ixgbe_call_func(hw, hw->mac.ops.dmac_config, (hw), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_dmac_update_tcs - Configure DMA Coalescing registers. * @hw: pointer to hardware structure * * Disables dmac, updates per TC settings, and then enable dmac. **/ s32 ixgbe_dmac_update_tcs(struct ixgbe_hw *hw) { return ixgbe_call_func(hw, hw->mac.ops.dmac_update_tcs, (hw), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_dmac_config_tcs - Configure DMA Coalescing registers. * @hw: pointer to hardware structure * * Configure DMA coalescing threshold per TC and set high priority bit for * FCOE TC. The dmac enable bit must be cleared before configuring. **/ s32 ixgbe_dmac_config_tcs(struct ixgbe_hw *hw) { return ixgbe_call_func(hw, hw->mac.ops.dmac_config_tcs, (hw), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_setup_eee - Enable/disable EEE support * @hw: pointer to the HW structure * @enable_eee: boolean flag to enable EEE * * Enable/disable EEE based on enable_ee flag. * Auto-negotiation must be started after BASE-T EEE bits in PHY register 7.3C * are modified. * **/ s32 ixgbe_setup_eee(struct ixgbe_hw *hw, bool enable_eee) { return ixgbe_call_func(hw, hw->mac.ops.setup_eee, (hw, enable_eee), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_set_source_address_pruning - Enable/Disable source address pruning * @hw: pointer to hardware structure * @enable: enable or disable source address pruning * @pool: Rx pool - Rx pool to toggle source address pruning **/ void ixgbe_set_source_address_pruning(struct ixgbe_hw *hw, bool enable, unsigned int pool) { if (hw->mac.ops.set_source_address_pruning) hw->mac.ops.set_source_address_pruning(hw, enable, pool); } /** * ixgbe_set_ethertype_anti_spoofing - Enable/Disable Ethertype anti-spoofing * @hw: pointer to hardware structure * @enable: enable or disable switch for Ethertype anti-spoofing * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing * **/ void ixgbe_set_ethertype_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) { if (hw->mac.ops.set_ethertype_anti_spoofing) hw->mac.ops.set_ethertype_anti_spoofing(hw, enable, vf); } /** * ixgbe_read_iosf_sb_reg - Read 32 bit PHY register * @hw: pointer to hardware structure * @reg_addr: 32 bit address of PHY register to read * @device_type: type of device you want to communicate with * @phy_data: Pointer to read data from PHY register * * Reads a value from a specified PHY register **/ s32 ixgbe_read_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u32 *phy_data) { return ixgbe_call_func(hw, hw->mac.ops.read_iosf_sb_reg, (hw, reg_addr, device_type, phy_data), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_write_iosf_sb_reg - Write 32 bit register through IOSF Sideband * @hw: pointer to hardware structure * @reg_addr: 32 bit PHY register to write * @device_type: type of device you want to communicate with * @phy_data: Data to write to the PHY register * * Writes a value to specified PHY register **/ s32 ixgbe_write_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u32 phy_data) { return ixgbe_call_func(hw, hw->mac.ops.write_iosf_sb_reg, (hw, reg_addr, device_type, phy_data), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_disable_mdd - Disable malicious driver detection * @hw: pointer to hardware structure * **/ void ixgbe_disable_mdd(struct ixgbe_hw *hw) { if (hw->mac.ops.disable_mdd) hw->mac.ops.disable_mdd(hw); } /** * ixgbe_enable_mdd - Enable malicious driver detection * @hw: pointer to hardware structure * **/ void ixgbe_enable_mdd(struct ixgbe_hw *hw) { if (hw->mac.ops.enable_mdd) hw->mac.ops.enable_mdd(hw); } /** * ixgbe_mdd_event - Handle malicious driver detection event * @hw: pointer to hardware structure * @vf_bitmap: vf bitmap of malicious vfs * **/ void ixgbe_mdd_event(struct ixgbe_hw *hw, u32 *vf_bitmap) { if (hw->mac.ops.mdd_event) hw->mac.ops.mdd_event(hw, vf_bitmap); } /** * ixgbe_restore_mdd_vf - Restore VF that was disabled during malicious driver * detection event * @hw: pointer to hardware structure * @vf: vf index * **/ void ixgbe_restore_mdd_vf(struct ixgbe_hw *hw, u32 vf) { if (hw->mac.ops.restore_mdd_vf) hw->mac.ops.restore_mdd_vf(hw, vf); } /** * ixgbe_fw_recovery_mode - Check if in FW NVM recovery mode * @hw: pointer to hardware structure * **/ bool ixgbe_fw_recovery_mode(struct ixgbe_hw *hw) { if (hw->mac.ops.fw_recovery_mode) return hw->mac.ops.fw_recovery_mode(hw); return false; } /** * ixgbe_enter_lplu - Transition to low power states * @hw: pointer to hardware structure * * Configures Low Power Link Up on transition to low power states * (from D0 to non-D0). **/ s32 ixgbe_enter_lplu(struct ixgbe_hw *hw) { return ixgbe_call_func(hw, hw->phy.ops.enter_lplu, (hw), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_handle_lasi - Handle external Base T PHY interrupt * @hw: pointer to hardware structure * * Handle external Base T PHY interrupt. If high temperature * failure alarm then return error, else if link status change * then setup internal/external PHY link * * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature * failure alarm, else return PHY access status. */ s32 ixgbe_handle_lasi(struct ixgbe_hw *hw) { return ixgbe_call_func(hw, hw->phy.ops.handle_lasi, (hw), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_bypass_rw - Bit bang data into by_pass FW * @hw: pointer to hardware structure * @cmd: Command we send to the FW * @status: The reply from the FW * * Bit-bangs the cmd to the by_pass FW status points to what is returned. **/ s32 ixgbe_bypass_rw(struct ixgbe_hw *hw, u32 cmd, u32 *status) { return ixgbe_call_func(hw, hw->mac.ops.bypass_rw, (hw, cmd, status), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_bypass_valid_rd - Verify valid return from bit-bang. * * If we send a write we can't be sure it took until we can read back * that same register. It can be a problem as some of the feilds may * for valid reasons change inbetween the time wrote the register and * we read it again to verify. So this function check everything we * can check and then assumes it worked. * * @u32 in_reg - The register cmd for the bit-bang read. * @u32 out_reg - The register returned from a bit-bang read. **/ bool ixgbe_bypass_valid_rd(struct ixgbe_hw *hw, u32 in_reg, u32 out_reg) { return ixgbe_call_func(hw, hw->mac.ops.bypass_valid_rd, (in_reg, out_reg), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_bypass_set - Set a bypass field in the FW CTRL Regiter. * @hw: pointer to hardware structure * @cmd: The control word we are setting. * @event: The event we are setting in the FW. This also happens to * be the mask for the event we are setting (handy) * @action: The action we set the event to in the FW. This is in a * bit field that happens to be what we want to put in * the event spot (also handy) * * Writes to the cmd control the bits in actions. **/ s32 ixgbe_bypass_set(struct ixgbe_hw *hw, u32 cmd, u32 event, u32 action) { return ixgbe_call_func(hw, hw->mac.ops.bypass_set, (hw, cmd, event, action), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_bypass_rd_eep - Read the bypass FW eeprom address * @hw: pointer to hardware structure * @addr: The bypass eeprom address to read. * @value: The 8b of data at the address above. **/ s32 ixgbe_bypass_rd_eep(struct ixgbe_hw *hw, u32 addr, u8 *value) { return ixgbe_call_func(hw, hw->mac.ops.bypass_rd_eep, (hw, addr, value), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_read_analog_reg8 - Reads 8 bit analog register * @hw: pointer to hardware structure * @reg: analog register to read * @val: read value * * Performs write operation to analog register specified. **/ s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val) { return ixgbe_call_func(hw, hw->mac.ops.read_analog_reg8, (hw, reg, val), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_write_analog_reg8 - Writes 8 bit analog register * @hw: pointer to hardware structure * @reg: analog register to write * @val: value to write * * Performs write operation to Atlas analog register specified. **/ s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val) { return ixgbe_call_func(hw, hw->mac.ops.write_analog_reg8, (hw, reg, val), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_init_uta_tables - Initializes Unicast Table Arrays. * @hw: pointer to hardware structure * * Initializes the Unicast Table Arrays to zero on device load. This * is part of the Rx init addr execution path. **/ s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw) { return ixgbe_call_func(hw, hw->mac.ops.init_uta_tables, (hw), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_read_i2c_byte - Reads 8 bit word over I2C at specified device address * @hw: pointer to hardware structure * @byte_offset: byte offset to read * @dev_addr: I2C bus address to read from * @data: value read * * Performs byte read operation to SFP module's EEPROM over I2C interface. **/ s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 *data) { return ixgbe_call_func(hw, hw->phy.ops.read_i2c_byte, (hw, byte_offset, dev_addr, data), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_read_i2c_byte_unlocked - Reads 8 bit word via I2C from device address * @hw: pointer to hardware structure * @byte_offset: byte offset to read * @dev_addr: I2C bus address to read from * @data: value read * * Performs byte read operation to SFP module's EEPROM over I2C interface. **/ s32 ixgbe_read_i2c_byte_unlocked(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 *data) { return ixgbe_call_func(hw, hw->phy.ops.read_i2c_byte_unlocked, (hw, byte_offset, dev_addr, data), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_read_link - Perform read operation on link device * @hw: pointer to the hardware structure * @addr: bus address to read from * @reg: device register to read from * @val: pointer to location to receive read value * * Returns an error code on error. */ s32 ixgbe_read_link(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val) { return ixgbe_call_func(hw, hw->link.ops.read_link, (hw, addr, reg, val), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_read_link_unlocked - Perform read operation on link device * @hw: pointer to the hardware structure * @addr: bus address to read from * @reg: device register to read from * @val: pointer to location to receive read value * * Returns an error code on error. **/ s32 ixgbe_read_link_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val) { return ixgbe_call_func(hw, hw->link.ops.read_link_unlocked, (hw, addr, reg, val), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_write_i2c_byte - Writes 8 bit word over I2C * @hw: pointer to hardware structure * @byte_offset: byte offset to write * @dev_addr: I2C bus address to write to * @data: value to write * * Performs byte write operation to SFP module's EEPROM over I2C interface * at a specified device address. **/ s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 data) { return ixgbe_call_func(hw, hw->phy.ops.write_i2c_byte, (hw, byte_offset, dev_addr, data), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_write_i2c_byte_unlocked - Writes 8 bit word over I2C * @hw: pointer to hardware structure * @byte_offset: byte offset to write * @dev_addr: I2C bus address to write to * @data: value to write * * Performs byte write operation to SFP module's EEPROM over I2C interface * at a specified device address. **/ s32 ixgbe_write_i2c_byte_unlocked(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 data) { return ixgbe_call_func(hw, hw->phy.ops.write_i2c_byte_unlocked, (hw, byte_offset, dev_addr, data), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_write_link - Perform write operation on link device * @hw: pointer to the hardware structure * @addr: bus address to write to * @reg: device register to write to * @val: value to write * * Returns an error code on error. */ s32 ixgbe_write_link(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val) { return ixgbe_call_func(hw, hw->link.ops.write_link, (hw, addr, reg, val), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_write_link_unlocked - Perform write operation on link device * @hw: pointer to the hardware structure * @addr: bus address to write to * @reg: device register to write to * @val: value to write * * Returns an error code on error. **/ s32 ixgbe_write_link_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val) { return ixgbe_call_func(hw, hw->link.ops.write_link_unlocked, (hw, addr, reg, val), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_write_i2c_eeprom - Writes 8 bit EEPROM word over I2C interface * @hw: pointer to hardware structure * @byte_offset: EEPROM byte offset to write * @eeprom_data: value to write * * Performs byte write operation to SFP module's EEPROM over I2C interface. **/ s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 eeprom_data) { return ixgbe_call_func(hw, hw->phy.ops.write_i2c_eeprom, (hw, byte_offset, eeprom_data), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_read_i2c_eeprom - Reads 8 bit EEPROM word over I2C interface * @hw: pointer to hardware structure * @byte_offset: EEPROM byte offset to read * @eeprom_data: value read * * Performs byte read operation to SFP module's EEPROM over I2C interface. **/ s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data) { return ixgbe_call_func(hw, hw->phy.ops.read_i2c_eeprom, (hw, byte_offset, eeprom_data), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_get_supported_physical_layer - Returns physical layer type * @hw: pointer to hardware structure * * Determines physical layer capabilities of the current configuration. **/ u64 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw) { return ixgbe_call_func(hw, hw->mac.ops.get_supported_physical_layer, (hw), IXGBE_PHYSICAL_LAYER_UNKNOWN); } /** * ixgbe_enable_rx_dma - Enables Rx DMA unit, dependent on device specifics * @hw: pointer to hardware structure * @regval: bitfield to write to the Rx DMA register * * Enables the Rx DMA unit of the device. **/ s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval) { return ixgbe_call_func(hw, hw->mac.ops.enable_rx_dma, (hw, regval), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_disable_sec_rx_path - Stops the receive data path * @hw: pointer to hardware structure * * Stops the receive data path. **/ s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw) { return ixgbe_call_func(hw, hw->mac.ops.disable_sec_rx_path, (hw), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_enable_sec_rx_path - Enables the receive data path * @hw: pointer to hardware structure * * Enables the receive data path. **/ s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw) { return ixgbe_call_func(hw, hw->mac.ops.enable_sec_rx_path, (hw), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_acquire_swfw_semaphore - Acquire SWFW semaphore * @hw: pointer to hardware structure * @mask: Mask to specify which semaphore to acquire * * Acquires the SWFW semaphore through SW_FW_SYNC register for the specified * function (CSR, PHY0, PHY1, EEPROM, Flash) **/ s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u32 mask) { return ixgbe_call_func(hw, hw->mac.ops.acquire_swfw_sync, (hw, mask), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_release_swfw_semaphore - Release SWFW semaphore * @hw: pointer to hardware structure * @mask: Mask to specify which semaphore to release * * Releases the SWFW semaphore through SW_FW_SYNC register for the specified * function (CSR, PHY0, PHY1, EEPROM, Flash) **/ void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u32 mask) { if (hw->mac.ops.release_swfw_sync) hw->mac.ops.release_swfw_sync(hw, mask); } /** * ixgbe_init_swfw_semaphore - Clean up SWFW semaphore * @hw: pointer to hardware structure * * Attempts to acquire the SWFW semaphore through SW_FW_SYNC register. * Regardless of whether is succeeds or not it then release the semaphore. * This is function is called to recover from catastrophic failures that * may have left the semaphore locked. **/ void ixgbe_init_swfw_semaphore(struct ixgbe_hw *hw) { if (hw->mac.ops.init_swfw_sync) hw->mac.ops.init_swfw_sync(hw); } void ixgbe_disable_rx(struct ixgbe_hw *hw) { if (hw->mac.ops.disable_rx) hw->mac.ops.disable_rx(hw); } void ixgbe_enable_rx(struct ixgbe_hw *hw) { if (hw->mac.ops.enable_rx) hw->mac.ops.enable_rx(hw); } /** * ixgbe_set_rate_select_speed - Set module link speed * @hw: pointer to hardware structure * @speed: link speed to set * * Set module link speed via the rate select. */ void ixgbe_set_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed) { if (hw->mac.ops.set_rate_select_speed) hw->mac.ops.set_rate_select_speed(hw, speed); } diff --git a/sys/dev/ixgbe/ixgbe_common.c b/sys/dev/ixgbe/ixgbe_common.c index 573f7684e959..be631552babe 100644 --- a/sys/dev/ixgbe/ixgbe_common.c +++ b/sys/dev/ixgbe/ixgbe_common.c @@ -1,5529 +1,5529 @@ /****************************************************************************** SPDX-License-Identifier: BSD-3-Clause Copyright (c) 2001-2017, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "ixgbe_common.h" #include "ixgbe_phy.h" #include "ixgbe_dcb.h" #include "ixgbe_dcb_82599.h" #include "ixgbe_api.h" static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw); static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw); static void ixgbe_standby_eeprom(struct ixgbe_hw *hw); static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, u16 count); static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); static void ixgbe_release_eeprom(struct ixgbe_hw *hw); static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, u16 *san_mac_offset); static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data); static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data); static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, u16 offset); /** * ixgbe_init_ops_generic - Inits function ptrs * @hw: pointer to the hardware structure * * Initialize the function pointers. **/ s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw) { struct ixgbe_eeprom_info *eeprom = &hw->eeprom; struct ixgbe_mac_info *mac = &hw->mac; u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); DEBUGFUNC("ixgbe_init_ops_generic"); /* EEPROM */ eeprom->ops.init_params = ixgbe_init_eeprom_params_generic; /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */ if (eec & IXGBE_EEC_PRES) { eeprom->ops.read = ixgbe_read_eerd_generic; eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_generic; } else { eeprom->ops.read = ixgbe_read_eeprom_bit_bang_generic; eeprom->ops.read_buffer = ixgbe_read_eeprom_buffer_bit_bang_generic; } eeprom->ops.write = ixgbe_write_eeprom_generic; eeprom->ops.write_buffer = ixgbe_write_eeprom_buffer_bit_bang_generic; eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_generic; eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_generic; eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_generic; /* MAC */ mac->ops.init_hw = ixgbe_init_hw_generic; mac->ops.reset_hw = NULL; mac->ops.start_hw = ixgbe_start_hw_generic; mac->ops.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic; mac->ops.get_media_type = NULL; mac->ops.get_supported_physical_layer = NULL; mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_generic; mac->ops.get_mac_addr = ixgbe_get_mac_addr_generic; mac->ops.stop_adapter = ixgbe_stop_adapter_generic; mac->ops.get_bus_info = ixgbe_get_bus_info_generic; mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie; mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync; mac->ops.release_swfw_sync = ixgbe_release_swfw_sync; mac->ops.prot_autoc_read = prot_autoc_read_generic; mac->ops.prot_autoc_write = prot_autoc_write_generic; /* LEDs */ mac->ops.led_on = ixgbe_led_on_generic; mac->ops.led_off = ixgbe_led_off_generic; mac->ops.blink_led_start = ixgbe_blink_led_start_generic; mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic; mac->ops.init_led_link_act = ixgbe_init_led_link_act_generic; /* RAR, Multicast, VLAN */ mac->ops.set_rar = ixgbe_set_rar_generic; mac->ops.clear_rar = ixgbe_clear_rar_generic; mac->ops.insert_mac_addr = NULL; mac->ops.set_vmdq = NULL; mac->ops.clear_vmdq = NULL; mac->ops.init_rx_addrs = ixgbe_init_rx_addrs_generic; mac->ops.update_uc_addr_list = ixgbe_update_uc_addr_list_generic; mac->ops.update_mc_addr_list = ixgbe_update_mc_addr_list_generic; mac->ops.enable_mc = ixgbe_enable_mc_generic; mac->ops.disable_mc = ixgbe_disable_mc_generic; mac->ops.clear_vfta = NULL; mac->ops.set_vfta = NULL; mac->ops.set_vlvf = NULL; mac->ops.init_uta_tables = NULL; mac->ops.enable_rx = ixgbe_enable_rx_generic; mac->ops.disable_rx = ixgbe_disable_rx_generic; /* Flow Control */ mac->ops.fc_enable = ixgbe_fc_enable_generic; mac->ops.setup_fc = ixgbe_setup_fc_generic; mac->ops.fc_autoneg = ixgbe_fc_autoneg; /* Link */ mac->ops.get_link_capabilities = NULL; mac->ops.setup_link = NULL; mac->ops.check_link = NULL; mac->ops.dmac_config = NULL; mac->ops.dmac_update_tcs = NULL; mac->ops.dmac_config_tcs = NULL; return IXGBE_SUCCESS; } /** * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation * of flow control * @hw: pointer to hardware structure * - * This function returns TRUE if the device supports flow control - * autonegotiation, and FALSE if it does not. + * This function returns true if the device supports flow control + * autonegotiation, and false if it does not. * **/ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) { - bool supported = FALSE; + bool supported = false; ixgbe_link_speed speed; bool link_up; DEBUGFUNC("ixgbe_device_supports_autoneg_fc"); switch (hw->phy.media_type) { case ixgbe_media_type_fiber_fixed: case ixgbe_media_type_fiber_qsfp: case ixgbe_media_type_fiber: /* flow control autoneg black list */ switch (hw->device_id) { case IXGBE_DEV_ID_X550EM_A_SFP: case IXGBE_DEV_ID_X550EM_A_SFP_N: case IXGBE_DEV_ID_X550EM_A_QSFP: case IXGBE_DEV_ID_X550EM_A_QSFP_N: - supported = FALSE; + supported = false; break; default: - hw->mac.ops.check_link(hw, &speed, &link_up, FALSE); + hw->mac.ops.check_link(hw, &speed, &link_up, false); /* if link is down, assume supported */ if (link_up) supported = speed == IXGBE_LINK_SPEED_1GB_FULL ? - TRUE : FALSE; + true : false; else - supported = TRUE; + supported = true; } break; case ixgbe_media_type_backplane: if (hw->device_id == IXGBE_DEV_ID_X550EM_X_XFI) - supported = FALSE; + supported = false; else - supported = TRUE; + supported = true; break; case ixgbe_media_type_copper: /* only some copper devices support flow control autoneg */ switch (hw->device_id) { case IXGBE_DEV_ID_82599_T3_LOM: case IXGBE_DEV_ID_X540T: case IXGBE_DEV_ID_X540T1: case IXGBE_DEV_ID_X540_BYPASS: case IXGBE_DEV_ID_X550T: case IXGBE_DEV_ID_X550T1: case IXGBE_DEV_ID_X550EM_X_10G_T: case IXGBE_DEV_ID_X550EM_A_10G_T: case IXGBE_DEV_ID_X550EM_A_1G_T: case IXGBE_DEV_ID_X550EM_A_1G_T_L: - supported = TRUE; + supported = true; break; default: - supported = FALSE; + supported = false; } default: break; } if (!supported) ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED, "Device %x does not support flow control autoneg", hw->device_id); return supported; } /** * ixgbe_setup_fc_generic - Set up flow control * @hw: pointer to hardware structure * * Called at init time to set up flow control. **/ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw) { s32 ret_val = IXGBE_SUCCESS; u32 reg = 0, reg_bp = 0; u16 reg_cu = 0; - bool locked = FALSE; + bool locked = false; DEBUGFUNC("ixgbe_setup_fc_generic"); /* Validate the requested mode */ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; goto out; } /* * 10gig parts do not have a word in the EEPROM to determine the * default flow control setting, so we explicitly set it to full. */ if (hw->fc.requested_mode == ixgbe_fc_default) hw->fc.requested_mode = ixgbe_fc_full; /* * Set up the 1G and 10G flow control advertisement registers so the * HW will be able to do fc autoneg once the cable is plugged in. If * we link at 10G, the 1G advertisement is harmless and vice versa. */ switch (hw->phy.media_type) { case ixgbe_media_type_backplane: /* some MAC's need RMW protection on AUTOC */ ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, ®_bp); if (ret_val != IXGBE_SUCCESS) goto out; /* only backplane uses autoc */ /* FALLTHROUGH */ case ixgbe_media_type_fiber_fixed: case ixgbe_media_type_fiber_qsfp: case ixgbe_media_type_fiber: reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); break; case ixgbe_media_type_copper: hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu); break; default: break; } /* * The possible values of fc.requested_mode are: * 0: Flow control is completely disabled * 1: Rx flow control is enabled (we can receive pause frames, * but not send pause frames). * 2: Tx flow control is enabled (we can send pause frames but * we do not support receiving pause frames). * 3: Both Rx and Tx flow control (symmetric) are enabled. * other: Invalid. */ switch (hw->fc.requested_mode) { case ixgbe_fc_none: /* Flow control completely disabled by software override. */ reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); if (hw->phy.media_type == ixgbe_media_type_backplane) reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE); else if (hw->phy.media_type == ixgbe_media_type_copper) reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); break; case ixgbe_fc_tx_pause: /* * Tx Flow control is enabled, and Rx Flow control is * disabled by software override. */ reg |= IXGBE_PCS1GANA_ASM_PAUSE; reg &= ~IXGBE_PCS1GANA_SYM_PAUSE; if (hw->phy.media_type == ixgbe_media_type_backplane) { reg_bp |= IXGBE_AUTOC_ASM_PAUSE; reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE; } else if (hw->phy.media_type == ixgbe_media_type_copper) { reg_cu |= IXGBE_TAF_ASM_PAUSE; reg_cu &= ~IXGBE_TAF_SYM_PAUSE; } break; case ixgbe_fc_rx_pause: /* * Rx Flow control is enabled and Tx Flow control is * disabled by software override. Since there really * isn't a way to advertise that we are capable of RX * Pause ONLY, we will advertise that we support both * symmetric and asymmetric Rx PAUSE, as such we fall * through to the fc_full statement. Later, we will * disable the adapter's ability to send PAUSE frames. */ case ixgbe_fc_full: /* Flow control (both Rx and Tx) is enabled by SW override. */ reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE; if (hw->phy.media_type == ixgbe_media_type_backplane) reg_bp |= IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE; else if (hw->phy.media_type == ixgbe_media_type_copper) reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE; break; default: ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Flow control param set incorrectly\n"); ret_val = IXGBE_ERR_CONFIG; goto out; break; } if (hw->mac.type < ixgbe_mac_X540) { /* * Enable auto-negotiation between the MAC & PHY; * the MAC will advertise clause 37 flow control. */ IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); /* Disable AN timeout */ if (hw->fc.strict_ieee) reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg); } /* * AUTOC restart handles negotiation of 1G and 10G on backplane * and copper. There is no need to set the PCS1GCTL register. * */ if (hw->phy.media_type == ixgbe_media_type_backplane) { reg_bp |= IXGBE_AUTOC_AN_RESTART; ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked); if (ret_val) goto out; } else if ((hw->phy.media_type == ixgbe_media_type_copper) && (ixgbe_device_supports_autoneg_fc(hw))) { hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu); } DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg); out: return ret_val; } /** * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx * @hw: pointer to hardware structure * * Starts the hardware by filling the bus info structure and media type, clears * all on chip counters, initializes receive address registers, multicast * table, VLAN filter table, calls routine to set up link and flow control * settings, and leaves transmit and receive units disabled and uninitialized **/ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) { s32 ret_val; u32 ctrl_ext; u16 device_caps; DEBUGFUNC("ixgbe_start_hw_generic"); /* Set the media type */ hw->phy.media_type = hw->mac.ops.get_media_type(hw); /* PHY ops initialization must be done in reset_hw() */ /* Clear the VLAN filter table */ hw->mac.ops.clear_vfta(hw); /* Clear statistics registers */ hw->mac.ops.clear_hw_cntrs(hw); /* Set No Snoop Disable */ ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS; IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); IXGBE_WRITE_FLUSH(hw); /* Setup flow control */ ret_val = ixgbe_setup_fc(hw); if (ret_val != IXGBE_SUCCESS && ret_val != IXGBE_NOT_IMPLEMENTED) { DEBUGOUT1("Flow control setup failed, returning %d\n", ret_val); return ret_val; } /* Cache bit indicating need for crosstalk fix */ switch (hw->mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X550EM_x: case ixgbe_mac_X550EM_a: hw->mac.ops.get_device_caps(hw, &device_caps); if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR) - hw->need_crosstalk_fix = FALSE; + hw->need_crosstalk_fix = false; else - hw->need_crosstalk_fix = TRUE; + hw->need_crosstalk_fix = true; break; default: - hw->need_crosstalk_fix = FALSE; + hw->need_crosstalk_fix = false; break; } /* Clear adapter stopped flag */ - hw->adapter_stopped = FALSE; + hw->adapter_stopped = false; return IXGBE_SUCCESS; } /** * ixgbe_start_hw_gen2 - Init sequence for common device family * @hw: pointer to hw structure * * Performs the init sequence common to the second generation * of 10 GbE devices. * Devices in the second generation: * 82599 * X540 **/ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) { u32 i; u32 regval; /* Clear the rate limiters */ for (i = 0; i < hw->mac.max_tx_queues; i++) { IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0); } IXGBE_WRITE_FLUSH(hw); /* Disable relaxed ordering */ for (i = 0; i < hw->mac.max_tx_queues; i++) { regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); } for (i = 0; i < hw->mac.max_rx_queues; i++) { regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | IXGBE_DCA_RXCTRL_HEAD_WRO_EN); IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); } return IXGBE_SUCCESS; } /** * ixgbe_init_hw_generic - Generic hardware initialization * @hw: pointer to hardware structure * * Initialize the hardware by resetting the hardware, filling the bus info * structure and media type, clears all on chip counters, initializes receive * address registers, multicast table, VLAN filter table, calls routine to set * up link and flow control settings, and leaves transmit and receive units * disabled and uninitialized **/ s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) { s32 status; DEBUGFUNC("ixgbe_init_hw_generic"); /* Reset the hardware */ status = hw->mac.ops.reset_hw(hw); if (status == IXGBE_SUCCESS || status == IXGBE_ERR_SFP_NOT_PRESENT) { /* Start the HW */ status = hw->mac.ops.start_hw(hw); } /* Initialize the LED link active for LED blink support */ if (hw->mac.ops.init_led_link_act) hw->mac.ops.init_led_link_act(hw); if (status != IXGBE_SUCCESS) DEBUGOUT1("Failed to initialize HW, STATUS = %d\n", status); return status; } /** * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters * @hw: pointer to hardware structure * * Clears all hardware statistics counters by reading them from the hardware * Statistics counters are clear on read. **/ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) { u16 i = 0; DEBUGFUNC("ixgbe_clear_hw_cntrs_generic"); IXGBE_READ_REG(hw, IXGBE_CRCERRS); IXGBE_READ_REG(hw, IXGBE_ILLERRC); IXGBE_READ_REG(hw, IXGBE_ERRBC); IXGBE_READ_REG(hw, IXGBE_MSPDC); for (i = 0; i < 8; i++) IXGBE_READ_REG(hw, IXGBE_MPC(i)); IXGBE_READ_REG(hw, IXGBE_MLFC); IXGBE_READ_REG(hw, IXGBE_MRFC); IXGBE_READ_REG(hw, IXGBE_RLEC); IXGBE_READ_REG(hw, IXGBE_LXONTXC); IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); if (hw->mac.type >= ixgbe_mac_82599EB) { IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); } else { IXGBE_READ_REG(hw, IXGBE_LXONRXC); IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); } for (i = 0; i < 8; i++) { IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); if (hw->mac.type >= ixgbe_mac_82599EB) { IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); } else { IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); } } if (hw->mac.type >= ixgbe_mac_82599EB) for (i = 0; i < 8; i++) IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); IXGBE_READ_REG(hw, IXGBE_PRC64); IXGBE_READ_REG(hw, IXGBE_PRC127); IXGBE_READ_REG(hw, IXGBE_PRC255); IXGBE_READ_REG(hw, IXGBE_PRC511); IXGBE_READ_REG(hw, IXGBE_PRC1023); IXGBE_READ_REG(hw, IXGBE_PRC1522); IXGBE_READ_REG(hw, IXGBE_GPRC); IXGBE_READ_REG(hw, IXGBE_BPRC); IXGBE_READ_REG(hw, IXGBE_MPRC); IXGBE_READ_REG(hw, IXGBE_GPTC); IXGBE_READ_REG(hw, IXGBE_GORCL); IXGBE_READ_REG(hw, IXGBE_GORCH); IXGBE_READ_REG(hw, IXGBE_GOTCL); IXGBE_READ_REG(hw, IXGBE_GOTCH); if (hw->mac.type == ixgbe_mac_82598EB) for (i = 0; i < 8; i++) IXGBE_READ_REG(hw, IXGBE_RNBC(i)); IXGBE_READ_REG(hw, IXGBE_RUC); IXGBE_READ_REG(hw, IXGBE_RFC); IXGBE_READ_REG(hw, IXGBE_ROC); IXGBE_READ_REG(hw, IXGBE_RJC); IXGBE_READ_REG(hw, IXGBE_MNGPRC); IXGBE_READ_REG(hw, IXGBE_MNGPDC); IXGBE_READ_REG(hw, IXGBE_MNGPTC); IXGBE_READ_REG(hw, IXGBE_TORL); IXGBE_READ_REG(hw, IXGBE_TORH); IXGBE_READ_REG(hw, IXGBE_TPR); IXGBE_READ_REG(hw, IXGBE_TPT); IXGBE_READ_REG(hw, IXGBE_PTC64); IXGBE_READ_REG(hw, IXGBE_PTC127); IXGBE_READ_REG(hw, IXGBE_PTC255); IXGBE_READ_REG(hw, IXGBE_PTC511); IXGBE_READ_REG(hw, IXGBE_PTC1023); IXGBE_READ_REG(hw, IXGBE_PTC1522); IXGBE_READ_REG(hw, IXGBE_MPTC); IXGBE_READ_REG(hw, IXGBE_BPTC); for (i = 0; i < 16; i++) { IXGBE_READ_REG(hw, IXGBE_QPRC(i)); IXGBE_READ_REG(hw, IXGBE_QPTC(i)); if (hw->mac.type >= ixgbe_mac_82599EB) { IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); } else { IXGBE_READ_REG(hw, IXGBE_QBRC(i)); IXGBE_READ_REG(hw, IXGBE_QBTC(i)); } } if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) { if (hw->phy.id == 0) ixgbe_identify_phy(hw); hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, IXGBE_MDIO_PCS_DEV_TYPE, &i); hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, IXGBE_MDIO_PCS_DEV_TYPE, &i); hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, IXGBE_MDIO_PCS_DEV_TYPE, &i); hw->phy.ops.read_reg(hw, IXGBE_LDPCECH, IXGBE_MDIO_PCS_DEV_TYPE, &i); } return IXGBE_SUCCESS; } /** * ixgbe_read_pba_string_generic - Reads part number string from EEPROM * @hw: pointer to hardware structure * @pba_num: stores the part number string from the EEPROM * @pba_num_size: part number string buffer length * * Reads the part number string from the EEPROM. **/ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size) { s32 ret_val; u16 data; u16 pba_ptr; u16 offset; u16 length; DEBUGFUNC("ixgbe_read_pba_string_generic"); if (pba_num == NULL) { DEBUGOUT("PBA string buffer was null\n"); return IXGBE_ERR_INVALID_ARGUMENT; } ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); if (ret_val) { DEBUGOUT("NVM Read Error\n"); return ret_val; } ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr); if (ret_val) { DEBUGOUT("NVM Read Error\n"); return ret_val; } /* * if data is not ptr guard the PBA must be in legacy format which * means pba_ptr is actually our second data word for the PBA number * and we can decode it into an ascii string */ if (data != IXGBE_PBANUM_PTR_GUARD) { DEBUGOUT("NVM PBA number is not stored as string\n"); /* we will need 11 characters to store the PBA */ if (pba_num_size < 11) { DEBUGOUT("PBA string buffer too small\n"); return IXGBE_ERR_NO_SPACE; } /* extract hex string from data and pba_ptr */ pba_num[0] = (data >> 12) & 0xF; pba_num[1] = (data >> 8) & 0xF; pba_num[2] = (data >> 4) & 0xF; pba_num[3] = data & 0xF; pba_num[4] = (pba_ptr >> 12) & 0xF; pba_num[5] = (pba_ptr >> 8) & 0xF; pba_num[6] = '-'; pba_num[7] = 0; pba_num[8] = (pba_ptr >> 4) & 0xF; pba_num[9] = pba_ptr & 0xF; /* put a null character on the end of our string */ pba_num[10] = '\0'; /* switch all the data but the '-' to hex char */ for (offset = 0; offset < 10; offset++) { if (pba_num[offset] < 0xA) pba_num[offset] += '0'; else if (pba_num[offset] < 0x10) pba_num[offset] += 'A' - 0xA; } return IXGBE_SUCCESS; } ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length); if (ret_val) { DEBUGOUT("NVM Read Error\n"); return ret_val; } if (length == 0xFFFF || length == 0) { DEBUGOUT("NVM PBA number section invalid length\n"); return IXGBE_ERR_PBA_SECTION; } /* check if pba_num buffer is big enough */ if (pba_num_size < (((u32)length * 2) - 1)) { DEBUGOUT("PBA string buffer too small\n"); return IXGBE_ERR_NO_SPACE; } /* trim pba length from start of string */ pba_ptr++; length--; for (offset = 0; offset < length; offset++) { ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data); if (ret_val) { DEBUGOUT("NVM Read Error\n"); return ret_val; } pba_num[offset * 2] = (u8)(data >> 8); pba_num[(offset * 2) + 1] = (u8)(data & 0xFF); } pba_num[offset * 2] = '\0'; return IXGBE_SUCCESS; } /** * ixgbe_read_pba_num_generic - Reads part number from EEPROM * @hw: pointer to hardware structure * @pba_num: stores the part number from the EEPROM * * Reads the part number from the EEPROM. **/ s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num) { s32 ret_val; u16 data; DEBUGFUNC("ixgbe_read_pba_num_generic"); ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); if (ret_val) { DEBUGOUT("NVM Read Error\n"); return ret_val; } else if (data == IXGBE_PBANUM_PTR_GUARD) { DEBUGOUT("NVM Not supported\n"); return IXGBE_NOT_IMPLEMENTED; } *pba_num = (u32)(data << 16); ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data); if (ret_val) { DEBUGOUT("NVM Read Error\n"); return ret_val; } *pba_num |= (u32)data; return IXGBE_SUCCESS; } /** * ixgbe_read_pba_raw * @hw: pointer to the HW structure * @eeprom_buf: optional pointer to EEPROM image * @eeprom_buf_size: size of EEPROM image in words * @max_pba_block_size: PBA block size limit * @pba: pointer to output PBA structure * * Reads PBA from EEPROM image when eeprom_buf is not NULL. * Reads PBA from physical EEPROM device when eeprom_buf is NULL. * **/ s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf, u32 eeprom_buf_size, u16 max_pba_block_size, struct ixgbe_pba *pba) { s32 ret_val; u16 pba_block_size; if (pba == NULL) return IXGBE_ERR_PARAM; if (eeprom_buf == NULL) { ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2, &pba->word[0]); if (ret_val) return ret_val; } else { if (eeprom_buf_size > IXGBE_PBANUM1_PTR) { pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR]; pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR]; } else { return IXGBE_ERR_PARAM; } } if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) { if (pba->pba_block == NULL) return IXGBE_ERR_PARAM; ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf, eeprom_buf_size, &pba_block_size); if (ret_val) return ret_val; if (pba_block_size > max_pba_block_size) return IXGBE_ERR_PARAM; if (eeprom_buf == NULL) { ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1], pba_block_size, pba->pba_block); if (ret_val) return ret_val; } else { if (eeprom_buf_size > (u32)(pba->word[1] + pba_block_size)) { memcpy(pba->pba_block, &eeprom_buf[pba->word[1]], pba_block_size * sizeof(u16)); } else { return IXGBE_ERR_PARAM; } } } return IXGBE_SUCCESS; } /** * ixgbe_write_pba_raw * @hw: pointer to the HW structure * @eeprom_buf: optional pointer to EEPROM image * @eeprom_buf_size: size of EEPROM image in words * @pba: pointer to PBA structure * * Writes PBA to EEPROM image when eeprom_buf is not NULL. * Writes PBA to physical EEPROM device when eeprom_buf is NULL. * **/ s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf, u32 eeprom_buf_size, struct ixgbe_pba *pba) { s32 ret_val; if (pba == NULL) return IXGBE_ERR_PARAM; if (eeprom_buf == NULL) { ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2, &pba->word[0]); if (ret_val) return ret_val; } else { if (eeprom_buf_size > IXGBE_PBANUM1_PTR) { eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0]; eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1]; } else { return IXGBE_ERR_PARAM; } } if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) { if (pba->pba_block == NULL) return IXGBE_ERR_PARAM; if (eeprom_buf == NULL) { ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1], pba->pba_block[0], pba->pba_block); if (ret_val) return ret_val; } else { if (eeprom_buf_size > (u32)(pba->word[1] + pba->pba_block[0])) { memcpy(&eeprom_buf[pba->word[1]], pba->pba_block, pba->pba_block[0] * sizeof(u16)); } else { return IXGBE_ERR_PARAM; } } } return IXGBE_SUCCESS; } /** * ixgbe_get_pba_block_size * @hw: pointer to the HW structure * @eeprom_buf: optional pointer to EEPROM image * @eeprom_buf_size: size of EEPROM image in words * @pba_data_size: pointer to output variable * * Returns the size of the PBA block in words. Function operates on EEPROM * image if the eeprom_buf pointer is not NULL otherwise it accesses physical * EEPROM device. * **/ s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf, u32 eeprom_buf_size, u16 *pba_block_size) { s32 ret_val; u16 pba_word[2]; u16 length; DEBUGFUNC("ixgbe_get_pba_block_size"); if (eeprom_buf == NULL) { ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2, &pba_word[0]); if (ret_val) return ret_val; } else { if (eeprom_buf_size > IXGBE_PBANUM1_PTR) { pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR]; pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR]; } else { return IXGBE_ERR_PARAM; } } if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) { if (eeprom_buf == NULL) { ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0, &length); if (ret_val) return ret_val; } else { if (eeprom_buf_size > pba_word[1]) length = eeprom_buf[pba_word[1] + 0]; else return IXGBE_ERR_PARAM; } if (length == 0xFFFF || length == 0) return IXGBE_ERR_PBA_SECTION; } else { /* PBA number in legacy format, there is no PBA Block. */ length = 0; } if (pba_block_size != NULL) *pba_block_size = length; return IXGBE_SUCCESS; } /** * ixgbe_get_mac_addr_generic - Generic get MAC address * @hw: pointer to hardware structure * @mac_addr: Adapter MAC address * * Reads the adapter's MAC address from first Receive Address Register (RAR0) * A reset of the adapter must be performed prior to calling this function * in order for the MAC address to have been loaded from the EEPROM into RAR0 **/ s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr) { u32 rar_high; u32 rar_low; u16 i; DEBUGFUNC("ixgbe_get_mac_addr_generic"); rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0)); rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0)); for (i = 0; i < 4; i++) mac_addr[i] = (u8)(rar_low >> (i*8)); for (i = 0; i < 2; i++) mac_addr[i+4] = (u8)(rar_high >> (i*8)); return IXGBE_SUCCESS; } /** * ixgbe_set_pci_config_data_generic - Generic store PCI bus info * @hw: pointer to hardware structure * @link_status: the link status returned by the PCI config space * * Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure **/ void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status) { struct ixgbe_mac_info *mac = &hw->mac; if (hw->bus.type == ixgbe_bus_type_unknown) hw->bus.type = ixgbe_bus_type_pci_express; switch (link_status & IXGBE_PCI_LINK_WIDTH) { case IXGBE_PCI_LINK_WIDTH_1: hw->bus.width = ixgbe_bus_width_pcie_x1; break; case IXGBE_PCI_LINK_WIDTH_2: hw->bus.width = ixgbe_bus_width_pcie_x2; break; case IXGBE_PCI_LINK_WIDTH_4: hw->bus.width = ixgbe_bus_width_pcie_x4; break; case IXGBE_PCI_LINK_WIDTH_8: hw->bus.width = ixgbe_bus_width_pcie_x8; break; default: hw->bus.width = ixgbe_bus_width_unknown; break; } switch (link_status & IXGBE_PCI_LINK_SPEED) { case IXGBE_PCI_LINK_SPEED_2500: hw->bus.speed = ixgbe_bus_speed_2500; break; case IXGBE_PCI_LINK_SPEED_5000: hw->bus.speed = ixgbe_bus_speed_5000; break; case IXGBE_PCI_LINK_SPEED_8000: hw->bus.speed = ixgbe_bus_speed_8000; break; default: hw->bus.speed = ixgbe_bus_speed_unknown; break; } mac->ops.set_lan_id(hw); } /** * ixgbe_get_bus_info_generic - Generic set PCI bus info * @hw: pointer to hardware structure * * Gets the PCI bus info (speed, width, type) then calls helper function to * store this data within the ixgbe_hw structure. **/ s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw) { u16 link_status; DEBUGFUNC("ixgbe_get_bus_info_generic"); /* Get the negotiated link width and speed from PCI config space */ link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS); ixgbe_set_pci_config_data_generic(hw, link_status); return IXGBE_SUCCESS; } /** * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices * @hw: pointer to the HW structure * * Determines the LAN function id by reading memory-mapped registers and swaps * the port value if requested, and set MAC instance for devices that share * CS4227. **/ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) { struct ixgbe_bus_info *bus = &hw->bus; u32 reg; u16 ee_ctrl_4; DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie"); reg = IXGBE_READ_REG(hw, IXGBE_STATUS); bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT; bus->lan_id = (u8)bus->func; /* check for a port swap */ reg = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw)); if (reg & IXGBE_FACTPS_LFS) bus->func ^= 0x1; /* Get MAC instance from EEPROM for configuring CS4227 */ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) { hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4); bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >> IXGBE_EE_CTRL_4_INST_ID_SHIFT; } } /** * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units * @hw: pointer to hardware structure * * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, * disables transmit and receive units. The adapter_stopped flag is used by * the shared code and drivers to determine if the adapter is in a stopped * state and should not touch the hardware. **/ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) { u32 reg_val; u16 i; DEBUGFUNC("ixgbe_stop_adapter_generic"); /* * Set the adapter_stopped flag so other driver functions stop touching * the hardware */ - hw->adapter_stopped = TRUE; + hw->adapter_stopped = true; /* Disable the receive unit */ ixgbe_disable_rx(hw); /* Clear interrupt mask to stop interrupts from being generated */ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); /* Clear any pending interrupts, flush previous writes */ IXGBE_READ_REG(hw, IXGBE_EICR); /* Disable the transmit unit. Each queue must be disabled. */ for (i = 0; i < hw->mac.max_tx_queues; i++) IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH); /* Disable the receive unit by stopping each queue */ for (i = 0; i < hw->mac.max_rx_queues; i++) { reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); reg_val &= ~IXGBE_RXDCTL_ENABLE; reg_val |= IXGBE_RXDCTL_SWFLSH; IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val); } /* flush all queues disables */ IXGBE_WRITE_FLUSH(hw); msec_delay(2); /* * Prevent the PCI-E bus from hanging by disabling PCI-E master * access and verify no pending requests */ return ixgbe_disable_pcie_master(hw); } /** * ixgbe_init_led_link_act_generic - Store the LED index link/activity. * @hw: pointer to hardware structure * * Store the index for the link active LED. This will be used to support * blinking the LED. **/ s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; u32 led_reg, led_mode; u8 i; led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); /* Get LED link active from the LEDCTL register */ for (i = 0; i < 4; i++) { led_mode = led_reg >> IXGBE_LED_MODE_SHIFT(i); if ((led_mode & IXGBE_LED_MODE_MASK_BASE) == IXGBE_LED_LINK_ACTIVE) { mac->led_link_act = i; return IXGBE_SUCCESS; } } /* * If LEDCTL register does not have the LED link active set, then use * known MAC defaults. */ switch (hw->mac.type) { case ixgbe_mac_X550EM_a: case ixgbe_mac_X550EM_x: mac->led_link_act = 1; break; default: mac->led_link_act = 2; } return IXGBE_SUCCESS; } /** * ixgbe_led_on_generic - Turns on the software controllable LEDs. * @hw: pointer to hardware structure * @index: led number to turn on **/ s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) { u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); DEBUGFUNC("ixgbe_led_on_generic"); if (index > 3) return IXGBE_ERR_PARAM; /* To turn on the LED, set mode to ON. */ led_reg &= ~IXGBE_LED_MODE_MASK(index); led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); IXGBE_WRITE_FLUSH(hw); return IXGBE_SUCCESS; } /** * ixgbe_led_off_generic - Turns off the software controllable LEDs. * @hw: pointer to hardware structure * @index: led number to turn off **/ s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) { u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); DEBUGFUNC("ixgbe_led_off_generic"); if (index > 3) return IXGBE_ERR_PARAM; /* To turn off the LED, set mode to OFF. */ led_reg &= ~IXGBE_LED_MODE_MASK(index); led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); IXGBE_WRITE_FLUSH(hw); return IXGBE_SUCCESS; } /** * ixgbe_init_eeprom_params_generic - Initialize EEPROM params * @hw: pointer to hardware structure * * Initializes the EEPROM parameters ixgbe_eeprom_info within the * ixgbe_hw struct in order to set up EEPROM access. **/ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) { struct ixgbe_eeprom_info *eeprom = &hw->eeprom; u32 eec; u16 eeprom_size; DEBUGFUNC("ixgbe_init_eeprom_params_generic"); if (eeprom->type == ixgbe_eeprom_uninitialized) { eeprom->type = ixgbe_eeprom_none; /* Set default semaphore delay to 10ms which is a well * tested value */ eeprom->semaphore_delay = 10; /* Clear EEPROM page size, it will be initialized as needed */ eeprom->word_page_size = 0; /* * Check for EEPROM present first. * If not present leave as none */ eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); if (eec & IXGBE_EEC_PRES) { eeprom->type = ixgbe_eeprom_spi; /* * SPI EEPROM is assumed here. This code would need to * change if a future EEPROM is not SPI. */ eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> IXGBE_EEC_SIZE_SHIFT); eeprom->word_size = 1 << (eeprom_size + IXGBE_EEPROM_WORD_SIZE_SHIFT); } if (eec & IXGBE_EEC_ADDR_SIZE) eeprom->address_bits = 16; else eeprom->address_bits = 8; DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: " "%d\n", eeprom->type, eeprom->word_size, eeprom->address_bits); } return IXGBE_SUCCESS; } /** * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang * @hw: pointer to hardware structure * @offset: offset within the EEPROM to write * @words: number of word(s) * @data: 16 bit word(s) to write to EEPROM * * Reads 16 bit word(s) from EEPROM through bit-bang method **/ s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { s32 status = IXGBE_SUCCESS; u16 i, count; DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic"); hw->eeprom.ops.init_params(hw); if (words == 0) { status = IXGBE_ERR_INVALID_ARGUMENT; goto out; } if (offset + words > hw->eeprom.word_size) { status = IXGBE_ERR_EEPROM; goto out; } /* * The EEPROM page size cannot be queried from the chip. We do lazy * initialization. It is worth to do that when we write large buffer. */ if ((hw->eeprom.word_page_size == 0) && (words > IXGBE_EEPROM_PAGE_SIZE_MAX)) ixgbe_detect_eeprom_page_size_generic(hw, offset); /* * We cannot hold synchronization semaphores for too long * to avoid other entity starvation. However it is more efficient * to read in bursts than synchronizing access for each word. */ for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i, count, &data[i]); if (status != IXGBE_SUCCESS) break; } out: return status; } /** * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM * @hw: pointer to hardware structure * @offset: offset within the EEPROM to be written to * @words: number of word(s) * @data: 16 bit word(s) to be written to the EEPROM * * If ixgbe_eeprom_update_checksum is not called after this function, the * EEPROM will most likely contain an invalid checksum. **/ static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { s32 status; u16 word; u16 page_size; u16 i; u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI; DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang"); /* Prepare the EEPROM for writing */ status = ixgbe_acquire_eeprom(hw); if (status == IXGBE_SUCCESS) { if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) { ixgbe_release_eeprom(hw); status = IXGBE_ERR_EEPROM; } } if (status == IXGBE_SUCCESS) { for (i = 0; i < words; i++) { ixgbe_standby_eeprom(hw); /* Send the WRITE ENABLE command (8 bit opcode ) */ ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_WREN_OPCODE_SPI, IXGBE_EEPROM_OPCODE_BITS); ixgbe_standby_eeprom(hw); /* * Some SPI eeproms use the 8th address bit embedded * in the opcode */ if ((hw->eeprom.address_bits == 8) && ((offset + i) >= 128)) write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; /* Send the Write command (8-bit opcode + addr) */ ixgbe_shift_out_eeprom_bits(hw, write_opcode, IXGBE_EEPROM_OPCODE_BITS); ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), hw->eeprom.address_bits); page_size = hw->eeprom.word_page_size; /* Send the data in burst via SPI*/ do { word = data[i]; word = (word >> 8) | (word << 8); ixgbe_shift_out_eeprom_bits(hw, word, 16); if (page_size == 0) break; /* do not wrap around page */ if (((offset + i) & (page_size - 1)) == (page_size - 1)) break; } while (++i < words); ixgbe_standby_eeprom(hw); msec_delay(10); } /* Done with writing - release the EEPROM */ ixgbe_release_eeprom(hw); } return status; } /** * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM * @hw: pointer to hardware structure * @offset: offset within the EEPROM to be written to * @data: 16 bit word to be written to the EEPROM * * If ixgbe_eeprom_update_checksum is not called after this function, the * EEPROM will most likely contain an invalid checksum. **/ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) { s32 status; DEBUGFUNC("ixgbe_write_eeprom_generic"); hw->eeprom.ops.init_params(hw); if (offset >= hw->eeprom.word_size) { status = IXGBE_ERR_EEPROM; goto out; } status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data); out: return status; } /** * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang * @hw: pointer to hardware structure * @offset: offset within the EEPROM to be read * @data: read 16 bit words(s) from EEPROM * @words: number of word(s) * * Reads 16 bit word(s) from EEPROM through bit-bang method **/ s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { s32 status = IXGBE_SUCCESS; u16 i, count; DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic"); hw->eeprom.ops.init_params(hw); if (words == 0) { status = IXGBE_ERR_INVALID_ARGUMENT; goto out; } if (offset + words > hw->eeprom.word_size) { status = IXGBE_ERR_EEPROM; goto out; } /* * We cannot hold synchronization semaphores for too long * to avoid other entity starvation. However it is more efficient * to read in bursts than synchronizing access for each word. */ for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i, count, &data[i]); if (status != IXGBE_SUCCESS) break; } out: return status; } /** * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang * @hw: pointer to hardware structure * @offset: offset within the EEPROM to be read * @words: number of word(s) * @data: read 16 bit word(s) from EEPROM * * Reads 16 bit word(s) from EEPROM through bit-bang method **/ static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { s32 status; u16 word_in; u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI; u16 i; DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang"); /* Prepare the EEPROM for reading */ status = ixgbe_acquire_eeprom(hw); if (status == IXGBE_SUCCESS) { if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) { ixgbe_release_eeprom(hw); status = IXGBE_ERR_EEPROM; } } if (status == IXGBE_SUCCESS) { for (i = 0; i < words; i++) { ixgbe_standby_eeprom(hw); /* * Some SPI eeproms use the 8th address bit embedded * in the opcode */ if ((hw->eeprom.address_bits == 8) && ((offset + i) >= 128)) read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; /* Send the READ command (opcode + addr) */ ixgbe_shift_out_eeprom_bits(hw, read_opcode, IXGBE_EEPROM_OPCODE_BITS); ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), hw->eeprom.address_bits); /* Read the data. */ word_in = ixgbe_shift_in_eeprom_bits(hw, 16); data[i] = (word_in >> 8) | (word_in << 8); } /* End this read operation */ ixgbe_release_eeprom(hw); } return status; } /** * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang * @hw: pointer to hardware structure * @offset: offset within the EEPROM to be read * @data: read 16 bit value from EEPROM * * Reads 16 bit value from EEPROM through bit-bang method **/ s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) { s32 status; DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic"); hw->eeprom.ops.init_params(hw); if (offset >= hw->eeprom.word_size) { status = IXGBE_ERR_EEPROM; goto out; } status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); out: return status; } /** * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD * @hw: pointer to hardware structure * @offset: offset of word in the EEPROM to read * @words: number of word(s) * @data: 16 bit word(s) from the EEPROM * * Reads a 16 bit word(s) from the EEPROM using the EERD register. **/ s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { u32 eerd; s32 status = IXGBE_SUCCESS; u32 i; DEBUGFUNC("ixgbe_read_eerd_buffer_generic"); hw->eeprom.ops.init_params(hw); if (words == 0) { status = IXGBE_ERR_INVALID_ARGUMENT; ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words"); goto out; } if (offset >= hw->eeprom.word_size) { status = IXGBE_ERR_EEPROM; ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset"); goto out; } for (i = 0; i < words; i++) { eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | IXGBE_EEPROM_RW_REG_START; IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd); status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ); if (status == IXGBE_SUCCESS) { data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >> IXGBE_EEPROM_RW_REG_DATA); } else { DEBUGOUT("Eeprom read timed out\n"); goto out; } } out: return status; } /** * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size * @hw: pointer to hardware structure * @offset: offset within the EEPROM to be used as a scratch pad * * Discover EEPROM page size by writing marching data at given offset. * This function is called only when we are writing a new large buffer * at given offset so the data would be overwritten anyway. **/ static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, u16 offset) { u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX]; s32 status = IXGBE_SUCCESS; u16 i; DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic"); for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++) data[i] = i; hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX; status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, IXGBE_EEPROM_PAGE_SIZE_MAX, data); hw->eeprom.word_page_size = 0; if (status != IXGBE_SUCCESS) goto out; status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); if (status != IXGBE_SUCCESS) goto out; /* * When writing in burst more than the actual page size * EEPROM address wraps around current page. */ hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0]; DEBUGOUT1("Detected EEPROM page size = %d words.", hw->eeprom.word_page_size); out: return status; } /** * ixgbe_read_eerd_generic - Read EEPROM word using EERD * @hw: pointer to hardware structure * @offset: offset of word in the EEPROM to read * @data: word read from the EEPROM * * Reads a 16 bit word from the EEPROM using the EERD register. **/ s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) { return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data); } /** * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR * @hw: pointer to hardware structure * @offset: offset of word in the EEPROM to write * @words: number of word(s) * @data: word(s) write to the EEPROM * * Write a 16 bit word(s) to the EEPROM using the EEWR register. **/ s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { u32 eewr; s32 status = IXGBE_SUCCESS; u16 i; DEBUGFUNC("ixgbe_write_eewr_generic"); hw->eeprom.ops.init_params(hw); if (words == 0) { status = IXGBE_ERR_INVALID_ARGUMENT; ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words"); goto out; } if (offset >= hw->eeprom.word_size) { status = IXGBE_ERR_EEPROM; ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset"); goto out; } for (i = 0; i < words; i++) { eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | (data[i] << IXGBE_EEPROM_RW_REG_DATA) | IXGBE_EEPROM_RW_REG_START; status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); if (status != IXGBE_SUCCESS) { DEBUGOUT("Eeprom write EEWR timed out\n"); goto out; } IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr); status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); if (status != IXGBE_SUCCESS) { DEBUGOUT("Eeprom write EEWR timed out\n"); goto out; } } out: return status; } /** * ixgbe_write_eewr_generic - Write EEPROM word using EEWR * @hw: pointer to hardware structure * @offset: offset of word in the EEPROM to write * @data: word write to the EEPROM * * Write a 16 bit word to the EEPROM using the EEWR register. **/ s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data) { return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data); } /** * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status * @hw: pointer to hardware structure * @ee_reg: EEPROM flag for polling * * Polls the status bit (bit 1) of the EERD or EEWR to determine when the * read or write is done respectively. **/ s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) { u32 i; u32 reg; s32 status = IXGBE_ERR_EEPROM; DEBUGFUNC("ixgbe_poll_eerd_eewr_done"); for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) { if (ee_reg == IXGBE_NVM_POLL_READ) reg = IXGBE_READ_REG(hw, IXGBE_EERD); else reg = IXGBE_READ_REG(hw, IXGBE_EEWR); if (reg & IXGBE_EEPROM_RW_REG_DONE) { status = IXGBE_SUCCESS; break; } usec_delay(5); } if (i == IXGBE_EERD_EEWR_ATTEMPTS) ERROR_REPORT1(IXGBE_ERROR_POLLING, "EEPROM read/write done polling timed out"); return status; } /** * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang * @hw: pointer to hardware structure * * Prepares EEPROM for access using bit-bang method. This function should * be called before issuing a command to the EEPROM. **/ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) { s32 status = IXGBE_SUCCESS; u32 eec; u32 i; DEBUGFUNC("ixgbe_acquire_eeprom"); if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != IXGBE_SUCCESS) status = IXGBE_ERR_SWFW_SYNC; if (status == IXGBE_SUCCESS) { eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); /* Request EEPROM Access */ eec |= IXGBE_EEC_REQ; IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) { eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); if (eec & IXGBE_EEC_GNT) break; usec_delay(5); } /* Release if grant not acquired */ if (!(eec & IXGBE_EEC_GNT)) { eec &= ~IXGBE_EEC_REQ; IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); DEBUGOUT("Could not acquire EEPROM grant\n"); hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); status = IXGBE_ERR_EEPROM; } /* Setup EEPROM for Read/Write */ if (status == IXGBE_SUCCESS) { /* Clear CS and SK */ eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); IXGBE_WRITE_FLUSH(hw); usec_delay(1); } } return status; } /** * ixgbe_get_eeprom_semaphore - Get hardware semaphore * @hw: pointer to hardware structure * * Sets the hardware semaphores so EEPROM access can occur for bit-bang method **/ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) { s32 status = IXGBE_ERR_EEPROM; u32 timeout = 2000; u32 i; u32 swsm; DEBUGFUNC("ixgbe_get_eeprom_semaphore"); /* Get SMBI software semaphore between device drivers first */ for (i = 0; i < timeout; i++) { /* * If the SMBI bit is 0 when we read it, then the bit will be * set and we have the semaphore */ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); if (!(swsm & IXGBE_SWSM_SMBI)) { status = IXGBE_SUCCESS; break; } usec_delay(50); } if (i == timeout) { DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore " "not granted.\n"); /* * this release is particularly important because our attempts * above to get the semaphore may have succeeded, and if there * was a timeout, we should unconditionally clear the semaphore * bits to free the driver to make progress */ ixgbe_release_eeprom_semaphore(hw); usec_delay(50); /* * one last try * If the SMBI bit is 0 when we read it, then the bit will be * set and we have the semaphore */ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); if (!(swsm & IXGBE_SWSM_SMBI)) status = IXGBE_SUCCESS; } /* Now get the semaphore between SW/FW through the SWESMBI bit */ if (status == IXGBE_SUCCESS) { for (i = 0; i < timeout; i++) { swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); /* Set the SW EEPROM semaphore bit to request access */ swsm |= IXGBE_SWSM_SWESMBI; IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm); /* * If we set the bit successfully then we got the * semaphore. */ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); if (swsm & IXGBE_SWSM_SWESMBI) break; usec_delay(50); } /* * Release semaphores and return error if SW EEPROM semaphore * was not granted because we don't have access to the EEPROM */ if (i >= timeout) { ERROR_REPORT1(IXGBE_ERROR_POLLING, "SWESMBI Software EEPROM semaphore not granted.\n"); ixgbe_release_eeprom_semaphore(hw); status = IXGBE_ERR_EEPROM; } } else { ERROR_REPORT1(IXGBE_ERROR_POLLING, "Software semaphore SMBI between device drivers " "not granted.\n"); } return status; } /** * ixgbe_release_eeprom_semaphore - Release hardware semaphore * @hw: pointer to hardware structure * * This function clears hardware semaphore bits. **/ static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) { u32 swsm; DEBUGFUNC("ixgbe_release_eeprom_semaphore"); swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */ swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI); IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); IXGBE_WRITE_FLUSH(hw); } /** * ixgbe_ready_eeprom - Polls for EEPROM ready * @hw: pointer to hardware structure **/ static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) { s32 status = IXGBE_SUCCESS; u16 i; u8 spi_stat_reg; DEBUGFUNC("ixgbe_ready_eeprom"); /* * Read "Status Register" repeatedly until the LSB is cleared. The * EEPROM will signal that the command has been completed by clearing * bit 0 of the internal status register. If it's not cleared within * 5 milliseconds, then error out. */ for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) { ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI, IXGBE_EEPROM_OPCODE_BITS); spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8); if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI)) break; usec_delay(5); ixgbe_standby_eeprom(hw); } /* * On some parts, SPI write time could vary from 0-20mSec on 3.3V * devices (and only 0-5mSec on 5V devices) */ if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) { DEBUGOUT("SPI EEPROM Status error\n"); status = IXGBE_ERR_EEPROM; } return status; } /** * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state * @hw: pointer to hardware structure **/ static void ixgbe_standby_eeprom(struct ixgbe_hw *hw) { u32 eec; DEBUGFUNC("ixgbe_standby_eeprom"); eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); /* Toggle CS to flush commands */ eec |= IXGBE_EEC_CS; IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); IXGBE_WRITE_FLUSH(hw); usec_delay(1); eec &= ~IXGBE_EEC_CS; IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); IXGBE_WRITE_FLUSH(hw); usec_delay(1); } /** * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM. * @hw: pointer to hardware structure * @data: data to send to the EEPROM * @count: number of bits to shift out **/ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, u16 count) { u32 eec; u32 mask; u32 i; DEBUGFUNC("ixgbe_shift_out_eeprom_bits"); eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); /* * Mask is used to shift "count" bits of "data" out to the EEPROM * one bit at a time. Determine the starting bit based on count */ mask = 0x01 << (count - 1); for (i = 0; i < count; i++) { /* * A "1" is shifted out to the EEPROM by setting bit "DI" to a * "1", and then raising and then lowering the clock (the SK * bit controls the clock input to the EEPROM). A "0" is * shifted out to the EEPROM by setting "DI" to "0" and then * raising and then lowering the clock. */ if (data & mask) eec |= IXGBE_EEC_DI; else eec &= ~IXGBE_EEC_DI; IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); IXGBE_WRITE_FLUSH(hw); usec_delay(1); ixgbe_raise_eeprom_clk(hw, &eec); ixgbe_lower_eeprom_clk(hw, &eec); /* * Shift mask to signify next bit of data to shift in to the * EEPROM */ mask = mask >> 1; } /* We leave the "DI" bit set to "0" when we leave this routine. */ eec &= ~IXGBE_EEC_DI; IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); IXGBE_WRITE_FLUSH(hw); } /** * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM * @hw: pointer to hardware structure * @count: number of bits to shift **/ static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) { u32 eec; u32 i; u16 data = 0; DEBUGFUNC("ixgbe_shift_in_eeprom_bits"); /* * In order to read a register from the EEPROM, we need to shift * 'count' bits in from the EEPROM. Bits are "shifted in" by raising * the clock input to the EEPROM (setting the SK bit), and then reading * the value of the "DO" bit. During this "shifting in" process the * "DI" bit should always be clear. */ eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI); for (i = 0; i < count; i++) { data = data << 1; ixgbe_raise_eeprom_clk(hw, &eec); eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); eec &= ~(IXGBE_EEC_DI); if (eec & IXGBE_EEC_DO) data |= 1; ixgbe_lower_eeprom_clk(hw, &eec); } return data; } /** * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input. * @hw: pointer to hardware structure * @eec: EEC register's current value **/ static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) { DEBUGFUNC("ixgbe_raise_eeprom_clk"); /* * Raise the clock input to the EEPROM * (setting the SK bit), then delay */ *eec = *eec | IXGBE_EEC_SK; IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec); IXGBE_WRITE_FLUSH(hw); usec_delay(1); } /** * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input. * @hw: pointer to hardware structure * @eec: EEC's current value **/ static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) { DEBUGFUNC("ixgbe_lower_eeprom_clk"); /* * Lower the clock input to the EEPROM (clearing the SK bit), then * delay */ *eec = *eec & ~IXGBE_EEC_SK; IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec); IXGBE_WRITE_FLUSH(hw); usec_delay(1); } /** * ixgbe_release_eeprom - Release EEPROM, release semaphores * @hw: pointer to hardware structure **/ static void ixgbe_release_eeprom(struct ixgbe_hw *hw) { u32 eec; DEBUGFUNC("ixgbe_release_eeprom"); eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); eec |= IXGBE_EEC_CS; /* Pull CS high */ eec &= ~IXGBE_EEC_SK; /* Lower SCK */ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); IXGBE_WRITE_FLUSH(hw); usec_delay(1); /* Stop requesting EEPROM access */ eec &= ~IXGBE_EEC_REQ; IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); /* Delay before attempt to obtain semaphore again to allow FW access */ msec_delay(hw->eeprom.semaphore_delay); } /** * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum * @hw: pointer to hardware structure * * Returns a negative error code on error, or the 16-bit checksum **/ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) { u16 i; u16 j; u16 checksum = 0; u16 length = 0; u16 pointer = 0; u16 word = 0; DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic"); /* Include 0x0-0x3F in the checksum */ for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { if (hw->eeprom.ops.read(hw, i, &word)) { DEBUGOUT("EEPROM read failed\n"); return IXGBE_ERR_EEPROM; } checksum += word; } /* Include all data from pointers except for the fw pointer */ for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { if (hw->eeprom.ops.read(hw, i, &pointer)) { DEBUGOUT("EEPROM read failed\n"); return IXGBE_ERR_EEPROM; } /* If the pointer seems invalid */ if (pointer == 0xFFFF || pointer == 0) continue; if (hw->eeprom.ops.read(hw, pointer, &length)) { DEBUGOUT("EEPROM read failed\n"); return IXGBE_ERR_EEPROM; } if (length == 0xFFFF || length == 0) continue; for (j = pointer + 1; j <= pointer + length; j++) { if (hw->eeprom.ops.read(hw, j, &word)) { DEBUGOUT("EEPROM read failed\n"); return IXGBE_ERR_EEPROM; } checksum += word; } } checksum = (u16)IXGBE_EEPROM_SUM - checksum; return (s32)checksum; } /** * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum * @hw: pointer to hardware structure * @checksum_val: calculated checksum * * Performs checksum calculation and validates the EEPROM checksum. If the * caller does not need checksum_val, the value can be NULL. **/ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, u16 *checksum_val) { s32 status; u16 checksum; u16 read_checksum = 0; DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic"); /* Read the first word from the EEPROM. If this times out or fails, do * not continue or we could be in for a very long wait while every * EEPROM read fails */ status = hw->eeprom.ops.read(hw, 0, &checksum); if (status) { DEBUGOUT("EEPROM read failed\n"); return status; } status = hw->eeprom.ops.calc_checksum(hw); if (status < 0) return status; checksum = (u16)(status & 0xffff); status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); if (status) { DEBUGOUT("EEPROM read failed\n"); return status; } /* Verify read checksum from EEPROM is the same as * calculated checksum */ if (read_checksum != checksum) status = IXGBE_ERR_EEPROM_CHECKSUM; /* If the user cares, return the calculated checksum */ if (checksum_val) *checksum_val = checksum; return status; } /** * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum * @hw: pointer to hardware structure **/ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) { s32 status; u16 checksum; DEBUGFUNC("ixgbe_update_eeprom_checksum_generic"); /* Read the first word from the EEPROM. If this times out or fails, do * not continue or we could be in for a very long wait while every * EEPROM read fails */ status = hw->eeprom.ops.read(hw, 0, &checksum); if (status) { DEBUGOUT("EEPROM read failed\n"); return status; } status = hw->eeprom.ops.calc_checksum(hw); if (status < 0) return status; checksum = (u16)(status & 0xffff); status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum); return status; } /** * ixgbe_validate_mac_addr - Validate MAC address * @mac_addr: pointer to MAC address. * * Tests a MAC address to ensure it is a valid Individual Address. **/ s32 ixgbe_validate_mac_addr(u8 *mac_addr) { s32 status = IXGBE_SUCCESS; DEBUGFUNC("ixgbe_validate_mac_addr"); /* Make sure it is not a multicast address */ if (IXGBE_IS_MULTICAST(mac_addr)) { status = IXGBE_ERR_INVALID_MAC_ADDR; /* Not a broadcast address */ } else if (IXGBE_IS_BROADCAST(mac_addr)) { status = IXGBE_ERR_INVALID_MAC_ADDR; /* Reject the zero address */ } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) { status = IXGBE_ERR_INVALID_MAC_ADDR; } return status; } /** * ixgbe_set_rar_generic - Set Rx address register * @hw: pointer to hardware structure * @index: Receive address register to write * @addr: Address to put into receive address register * @vmdq: VMDq "set" or "pool" index * @enable_addr: set flag that address is active * * Puts an ethernet address into a receive address register. **/ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, u32 enable_addr) { u32 rar_low, rar_high; u32 rar_entries = hw->mac.num_rar_entries; DEBUGFUNC("ixgbe_set_rar_generic"); /* Make sure we are using a valid rar index range */ if (index >= rar_entries) { ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, "RAR index %d is out of range.\n", index); return IXGBE_ERR_INVALID_ARGUMENT; } /* setup VMDq pool selection before this RAR gets enabled */ hw->mac.ops.set_vmdq(hw, index, vmdq); /* * HW expects these in little endian so we reverse the byte * order from network order (big endian) to little endian */ rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); /* * Some parts put the VMDq setting in the extra RAH bits, * so save everything except the lower 16 bits that hold part * of the address and the address valid bit. */ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8)); if (enable_addr != 0) rar_high |= IXGBE_RAH_AV; IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); return IXGBE_SUCCESS; } /** * ixgbe_clear_rar_generic - Remove Rx address register * @hw: pointer to hardware structure * @index: Receive address register to write * * Clears an ethernet address from a receive address register. **/ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) { u32 rar_high; u32 rar_entries = hw->mac.num_rar_entries; DEBUGFUNC("ixgbe_clear_rar_generic"); /* Make sure we are using a valid rar index range */ if (index >= rar_entries) { ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, "RAR index %d is out of range.\n", index); return IXGBE_ERR_INVALID_ARGUMENT; } /* * Some parts put the VMDq setting in the extra RAH bits, * so save everything except the lower 16 bits that hold part * of the address and the address valid bit. */ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); /* clear VMDq pool/queue selection for this RAR */ hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); return IXGBE_SUCCESS; } /** * ixgbe_init_rx_addrs_generic - Initializes receive address filters. * @hw: pointer to hardware structure * * Places the MAC address in receive address register 0 and clears the rest * of the receive address registers. Clears the multicast table. Assumes * the receiver is in reset when the routine is called. **/ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) { u32 i; u32 rar_entries = hw->mac.num_rar_entries; DEBUGFUNC("ixgbe_init_rx_addrs_generic"); /* * If the current mac address is valid, assume it is a software override * to the permanent address. * Otherwise, use the permanent address from the eeprom. */ if (ixgbe_validate_mac_addr(hw->mac.addr) == IXGBE_ERR_INVALID_MAC_ADDR) { /* Get the MAC address from the RAR0 for later reference */ hw->mac.ops.get_mac_addr(hw, hw->mac.addr); DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ", hw->mac.addr[0], hw->mac.addr[1], hw->mac.addr[2]); DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3], hw->mac.addr[4], hw->mac.addr[5]); } else { /* Setup the receive address. */ DEBUGOUT("Overriding MAC Address in RAR[0]\n"); DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ", hw->mac.addr[0], hw->mac.addr[1], hw->mac.addr[2]); DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3], hw->mac.addr[4], hw->mac.addr[5]); hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); } /* clear VMDq pool/queue selection for RAR 0 */ hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); hw->addr_ctrl.overflow_promisc = 0; hw->addr_ctrl.rar_used_count = 1; /* Zero out the other receive addresses. */ DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1); for (i = 1; i < rar_entries; i++) { IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); } /* Clear the MTA */ hw->addr_ctrl.mta_in_use = 0; IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); DEBUGOUT(" Clearing MTA\n"); for (i = 0; i < hw->mac.mcft_size; i++) IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); ixgbe_init_uta_tables(hw); return IXGBE_SUCCESS; } /** * ixgbe_add_uc_addr - Adds a secondary unicast address. * @hw: pointer to hardware structure * @addr: new address * @vmdq: VMDq "set" or "pool" index * * Adds it to unused receive address register or goes into promiscuous mode. **/ void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) { u32 rar_entries = hw->mac.num_rar_entries; u32 rar; DEBUGFUNC("ixgbe_add_uc_addr"); DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n", addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); /* * Place this address in the RAR if there is room, * else put the controller into promiscuous mode */ if (hw->addr_ctrl.rar_used_count < rar_entries) { rar = hw->addr_ctrl.rar_used_count; hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar); hw->addr_ctrl.rar_used_count++; } else { hw->addr_ctrl.overflow_promisc++; } DEBUGOUT("ixgbe_add_uc_addr Complete\n"); } /** * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses * @hw: pointer to hardware structure * @addr_list: the list of new addresses * @addr_count: number of addresses * @next: iterator function to walk the address list * * The given list replaces any existing list. Clears the secondary addrs from * receive address registers. Uses unused receive address registers for the * first secondary addresses, and falls back to promiscuous mode as needed. * * Drivers using secondary unicast addresses must set user_set_promisc when * manually putting the device into promiscuous mode. **/ s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list, u32 addr_count, ixgbe_mc_addr_itr next) { u8 *addr; u32 i; u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc; u32 uc_addr_in_use; u32 fctrl; u32 vmdq; DEBUGFUNC("ixgbe_update_uc_addr_list_generic"); /* * Clear accounting of old secondary address list, * don't count RAR[0] */ uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1; hw->addr_ctrl.rar_used_count -= uc_addr_in_use; hw->addr_ctrl.overflow_promisc = 0; /* Zero out the other receive addresses */ DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1); for (i = 0; i < uc_addr_in_use; i++) { IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0); IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0); } /* Add the new addresses */ for (i = 0; i < addr_count; i++) { DEBUGOUT(" Adding the secondary addresses:\n"); addr = next(hw, &addr_list, &vmdq); ixgbe_add_uc_addr(hw, addr, vmdq); } if (hw->addr_ctrl.overflow_promisc) { /* enable promisc if not already in overflow or set by user */ if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { DEBUGOUT(" Entering address overflow promisc mode\n"); fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); fctrl |= IXGBE_FCTRL_UPE; IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); } } else { /* only disable if set by overflow, not by user */ if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { DEBUGOUT(" Leaving address overflow promisc mode\n"); fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); fctrl &= ~IXGBE_FCTRL_UPE; IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); } } DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n"); return IXGBE_SUCCESS; } /** * ixgbe_mta_vector - Determines bit-vector in multicast table to set * @hw: pointer to hardware structure * @mc_addr: the multicast address * * Extracts the 12 bits, from a multicast address, to determine which * bit-vector to set in the multicast table. The hardware uses 12 bits, from * incoming rx multicast addresses, to determine the bit-vector to check in * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set * by the MO field of the MCSTCTRL. The MO field is set during initialization * to mc_filter_type. **/ static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) { u32 vector = 0; DEBUGFUNC("ixgbe_mta_vector"); switch (hw->mac.mc_filter_type) { case 0: /* use bits [47:36] of the address */ vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); break; case 1: /* use bits [46:35] of the address */ vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); break; case 2: /* use bits [45:34] of the address */ vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); break; case 3: /* use bits [43:32] of the address */ vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); break; default: /* Invalid mc_filter_type */ DEBUGOUT("MC filter type param set incorrectly\n"); ASSERT(0); break; } /* vector can only be 12-bits or boundary will be exceeded */ vector &= 0xFFF; return vector; } /** * ixgbe_set_mta - Set bit-vector in multicast table * @hw: pointer to hardware structure * @mc_addr: Multicast address * * Sets the bit-vector in the multicast table. **/ void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) { u32 vector; u32 vector_bit; u32 vector_reg; DEBUGFUNC("ixgbe_set_mta"); hw->addr_ctrl.mta_in_use++; vector = ixgbe_mta_vector(hw, mc_addr); DEBUGOUT1(" bit-vector = 0x%03X\n", vector); /* * The MTA is a register array of 128 32-bit registers. It is treated * like an array of 4096 bits. We want to set bit * BitArray[vector_value]. So we figure out what register the bit is * in, read it, OR in the new bit, then write back the new value. The * register is determined by the upper 7 bits of the vector value and * the bit within that register are determined by the lower 5 bits of * the value. */ vector_reg = (vector >> 5) & 0x7F; vector_bit = vector & 0x1F; hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit); } /** * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses * @hw: pointer to hardware structure * @mc_addr_list: the list of new multicast addresses * @mc_addr_count: number of addresses * @next: iterator function to walk the multicast address list * @clear: flag, when set clears the table beforehand * * When the clear flag is set, the given list replaces any existing list. * Hashes the given addresses into the multicast table. **/ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, u32 mc_addr_count, ixgbe_mc_addr_itr next, bool clear) { u32 i; u32 vmdq; DEBUGFUNC("ixgbe_update_mc_addr_list_generic"); /* * Set the new number of MC addresses that we are being requested to * use. */ hw->addr_ctrl.num_mc_addrs = mc_addr_count; hw->addr_ctrl.mta_in_use = 0; /* Clear mta_shadow */ if (clear) { DEBUGOUT(" Clearing MTA\n"); memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); } /* Update mta_shadow */ for (i = 0; i < mc_addr_count; i++) { DEBUGOUT(" Adding the multicast addresses:\n"); ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq)); } /* Enable mta */ for (i = 0; i < hw->mac.mcft_size; i++) IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i, hw->mac.mta_shadow[i]); if (hw->addr_ctrl.mta_in_use > 0) IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n"); return IXGBE_SUCCESS; } /** * ixgbe_enable_mc_generic - Enable multicast address in RAR * @hw: pointer to hardware structure * * Enables multicast address in RAR and the use of the multicast hash table. **/ s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) { struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; DEBUGFUNC("ixgbe_enable_mc_generic"); if (a->mta_in_use > 0) IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); return IXGBE_SUCCESS; } /** * ixgbe_disable_mc_generic - Disable multicast address in RAR * @hw: pointer to hardware structure * * Disables multicast address in RAR and the use of the multicast hash table. **/ s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) { struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; DEBUGFUNC("ixgbe_disable_mc_generic"); if (a->mta_in_use > 0) IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); return IXGBE_SUCCESS; } /** * ixgbe_fc_enable_generic - Enable flow control * @hw: pointer to hardware structure * * Enable flow control according to the current settings. **/ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) { s32 ret_val = IXGBE_SUCCESS; u32 mflcn_reg, fccfg_reg; u32 reg; u32 fcrtl, fcrth; int i; DEBUGFUNC("ixgbe_fc_enable_generic"); /* Validate the water mark configuration */ if (!hw->fc.pause_time) { ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; goto out; } /* Low water mark of zero causes XOFF floods */ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && hw->fc.high_water[i]) { if (!hw->fc.low_water[i] || hw->fc.low_water[i] >= hw->fc.high_water[i]) { DEBUGOUT("Invalid water mark configuration\n"); ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; goto out; } } } /* Negotiate the fc mode to use */ hw->mac.ops.fc_autoneg(hw); /* Disable any previous flow control settings */ mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE); fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); /* * The possible values of fc.current_mode are: * 0: Flow control is completely disabled * 1: Rx flow control is enabled (we can receive pause frames, * but not send pause frames). * 2: Tx flow control is enabled (we can send pause frames but * we do not support receiving pause frames). * 3: Both Rx and Tx flow control (symmetric) are enabled. * other: Invalid. */ switch (hw->fc.current_mode) { case ixgbe_fc_none: /* * Flow control is disabled by software override or autoneg. * The code below will actually disable it in the HW. */ break; case ixgbe_fc_rx_pause: /* * Rx Flow control is enabled and Tx Flow control is * disabled by software override. Since there really * isn't a way to advertise that we are capable of RX * Pause ONLY, we will advertise that we support both * symmetric and asymmetric Rx PAUSE. Later, we will * disable the adapter's ability to send PAUSE frames. */ mflcn_reg |= IXGBE_MFLCN_RFCE; break; case ixgbe_fc_tx_pause: /* * Tx Flow control is enabled, and Rx Flow control is * disabled by software override. */ fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; break; case ixgbe_fc_full: /* Flow control (both Rx and Tx) is enabled by SW override. */ mflcn_reg |= IXGBE_MFLCN_RFCE; fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; break; default: ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Flow control param set incorrectly\n"); ret_val = IXGBE_ERR_CONFIG; goto out; break; } /* Set 802.3x based flow control settings. */ mflcn_reg |= IXGBE_MFLCN_DPF; IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); /* Set up and enable Rx high/low water mark thresholds, enable XON. */ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && hw->fc.high_water[i]) { fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl); fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; } else { IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); /* * In order to prevent Tx hangs when the internal Tx * switch is enabled we must set the high water mark * to the Rx packet buffer size - 24KB. This allows * the Tx switch to function even under heavy Rx * workloads. */ fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576; } IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth); } /* Configure pause time (2 TCs per register) */ reg = hw->fc.pause_time * 0x00010001; for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); /* Configure flow control refresh threshold value */ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); out: return ret_val; } /** * ixgbe_negotiate_fc - Negotiate flow control * @hw: pointer to hardware structure * @adv_reg: flow control advertised settings * @lp_reg: link partner's flow control settings * @adv_sym: symmetric pause bit in advertisement * @adv_asm: asymmetric pause bit in advertisement * @lp_sym: symmetric pause bit in link partner advertisement * @lp_asm: asymmetric pause bit in link partner advertisement * * Find the intersection between advertised settings and link partner's * advertised settings **/ s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) { if ((!(adv_reg)) || (!(lp_reg))) { ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED, "Local or link partner's advertised flow control " "settings are NULL. Local: %x, link partner: %x\n", adv_reg, lp_reg); return IXGBE_ERR_FC_NOT_NEGOTIATED; } if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) { /* * Now we need to check if the user selected Rx ONLY * of pause frames. In this case, we had to advertise * FULL flow control because we could not advertise RX * ONLY. Hence, we must now check to see if we need to * turn OFF the TRANSMISSION of PAUSE frames. */ if (hw->fc.requested_mode == ixgbe_fc_full) { hw->fc.current_mode = ixgbe_fc_full; DEBUGOUT("Flow Control = FULL.\n"); } else { hw->fc.current_mode = ixgbe_fc_rx_pause; DEBUGOUT("Flow Control=RX PAUSE frames only\n"); } } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) && (lp_reg & lp_sym) && (lp_reg & lp_asm)) { hw->fc.current_mode = ixgbe_fc_tx_pause; DEBUGOUT("Flow Control = TX PAUSE frames only.\n"); } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) && !(lp_reg & lp_sym) && (lp_reg & lp_asm)) { hw->fc.current_mode = ixgbe_fc_rx_pause; DEBUGOUT("Flow Control = RX PAUSE frames only.\n"); } else { hw->fc.current_mode = ixgbe_fc_none; DEBUGOUT("Flow Control = NONE.\n"); } return IXGBE_SUCCESS; } /** * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber * @hw: pointer to hardware structure * * Enable flow control according on 1 gig fiber. **/ static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) { u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; /* * On multispeed fiber at 1g, bail out if * - link is up but AN did not complete, or if * - link is up and AN completed but timed out */ linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) { DEBUGOUT("Auto-Negotiation did not complete or timed out\n"); goto out; } pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg, pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE, IXGBE_PCS1GANA_ASM_PAUSE, IXGBE_PCS1GANA_SYM_PAUSE, IXGBE_PCS1GANA_ASM_PAUSE); out: return ret_val; } /** * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37 * @hw: pointer to hardware structure * * Enable flow control according to IEEE clause 37. **/ static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) { u32 links2, anlp1_reg, autoc_reg, links; s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; /* * On backplane, bail out if * - backplane autoneg was not completed, or if * - we are 82599 and link partner is not AN enabled */ links = IXGBE_READ_REG(hw, IXGBE_LINKS); if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) { DEBUGOUT("Auto-Negotiation did not complete\n"); goto out; } if (hw->mac.type == ixgbe_mac_82599EB) { links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) { DEBUGOUT("Link partner is not AN enabled\n"); goto out; } } /* * Read the 10g AN autoc and LP ability registers and resolve * local flow control settings accordingly */ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); ret_val = ixgbe_negotiate_fc(hw, autoc_reg, anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE, IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE); out: return ret_val; } /** * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37 * @hw: pointer to hardware structure * * Enable flow control according to IEEE clause 37. **/ static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw) { u16 technology_ability_reg = 0; u16 lp_technology_ability_reg = 0; hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &technology_ability_reg); hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP, IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &lp_technology_ability_reg); return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg, (u32)lp_technology_ability_reg, IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE, IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE); } /** * ixgbe_fc_autoneg - Configure flow control * @hw: pointer to hardware structure * * Compares our advertised flow control capabilities to those advertised by * our link partner, and determines the proper flow control mode to use. **/ void ixgbe_fc_autoneg(struct ixgbe_hw *hw) { s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; ixgbe_link_speed speed; bool link_up; DEBUGFUNC("ixgbe_fc_autoneg"); /* * AN should have completed when the cable was plugged in. * Look for reasons to bail out. Bail out if: * - FC autoneg is disabled, or if * - link is not up. */ if (hw->fc.disable_fc_autoneg) { /* TODO: This should be just an informative log */ ERROR_REPORT1(IXGBE_ERROR_CAUTION, "Flow control autoneg is disabled"); goto out; } - hw->mac.ops.check_link(hw, &speed, &link_up, FALSE); + hw->mac.ops.check_link(hw, &speed, &link_up, false); if (!link_up) { ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down"); goto out; } switch (hw->phy.media_type) { /* Autoneg flow control on fiber adapters */ case ixgbe_media_type_fiber_fixed: case ixgbe_media_type_fiber_qsfp: case ixgbe_media_type_fiber: if (speed == IXGBE_LINK_SPEED_1GB_FULL) ret_val = ixgbe_fc_autoneg_fiber(hw); break; /* Autoneg flow control on backplane adapters */ case ixgbe_media_type_backplane: ret_val = ixgbe_fc_autoneg_backplane(hw); break; /* Autoneg flow control on copper adapters */ case ixgbe_media_type_copper: if (ixgbe_device_supports_autoneg_fc(hw)) ret_val = ixgbe_fc_autoneg_copper(hw); break; default: break; } out: if (ret_val == IXGBE_SUCCESS) { - hw->fc.fc_was_autonegged = TRUE; + hw->fc.fc_was_autonegged = true; } else { - hw->fc.fc_was_autonegged = FALSE; + hw->fc.fc_was_autonegged = false; hw->fc.current_mode = hw->fc.requested_mode; } } /* * ixgbe_pcie_timeout_poll - Return number of times to poll for completion * @hw: pointer to hardware structure * * System-wide timeout range is encoded in PCIe Device Control2 register. * * Add 10% to specified maximum and return the number of times to poll for * completion timeout, in units of 100 microsec. Never return less than * 800 = 80 millisec. */ static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw) { s16 devctl2; u32 pollcnt; devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2); devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK; switch (devctl2) { case IXGBE_PCIDEVCTRL2_65_130ms: pollcnt = 1300; /* 130 millisec */ break; case IXGBE_PCIDEVCTRL2_260_520ms: pollcnt = 5200; /* 520 millisec */ break; case IXGBE_PCIDEVCTRL2_1_2s: pollcnt = 20000; /* 2 sec */ break; case IXGBE_PCIDEVCTRL2_4_8s: pollcnt = 80000; /* 8 sec */ break; case IXGBE_PCIDEVCTRL2_17_34s: pollcnt = 34000; /* 34 sec */ break; case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */ case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */ case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */ case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */ default: pollcnt = 800; /* 80 millisec minimum */ break; } /* add 10% to spec maximum */ return (pollcnt * 11) / 10; } /** * ixgbe_disable_pcie_master - Disable PCI-express master access * @hw: pointer to hardware structure * * Disables PCI-Express master access and verifies there are no pending * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS * is returned signifying master requests disabled. **/ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) { s32 status = IXGBE_SUCCESS; u32 i, poll; u16 value; DEBUGFUNC("ixgbe_disable_pcie_master"); /* Always set this bit to ensure any future transactions are blocked */ IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS); /* Exit if master requests are blocked */ if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) || IXGBE_REMOVED(hw->hw_addr)) goto out; /* Poll for master request bit to clear */ for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { usec_delay(100); if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) goto out; } /* * Two consecutive resets are required via CTRL.RST per datasheet * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine * of this need. The first reset prevents new master requests from * being issued by our device. We then must wait 1usec or more for any * remaining completions from the PCIe bus to trickle in, and then reset * again to clear out any effects they may have had on our device. */ DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n"); hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; if (hw->mac.type >= ixgbe_mac_X550) goto out; /* * Before proceeding, make sure that the PCIe block does not have * transactions pending. */ poll = ixgbe_pcie_timeout_poll(hw); for (i = 0; i < poll; i++) { usec_delay(100); value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS); if (IXGBE_REMOVED(hw->hw_addr)) goto out; if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) goto out; } ERROR_REPORT1(IXGBE_ERROR_POLLING, "PCIe transaction pending bit also did not clear.\n"); status = IXGBE_ERR_MASTER_REQUESTS_PENDING; out: return status; } /** * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore * @hw: pointer to hardware structure * @mask: Mask to specify which semaphore to acquire * * Acquires the SWFW semaphore through the GSSR register for the specified * function (CSR, PHY0, PHY1, EEPROM, Flash) **/ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask) { u32 gssr = 0; u32 swmask = mask; u32 fwmask = mask << 5; u32 timeout = 200; u32 i; DEBUGFUNC("ixgbe_acquire_swfw_sync"); for (i = 0; i < timeout; i++) { /* * SW NVM semaphore bit is used for access to all * SW_FW_SYNC bits (not just NVM) */ if (ixgbe_get_eeprom_semaphore(hw)) return IXGBE_ERR_SWFW_SYNC; gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); if (!(gssr & (fwmask | swmask))) { gssr |= swmask; IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); ixgbe_release_eeprom_semaphore(hw); return IXGBE_SUCCESS; } else { /* Resource is currently in use by FW or SW */ ixgbe_release_eeprom_semaphore(hw); msec_delay(5); } } /* If time expired clear the bits holding the lock and retry */ if (gssr & (fwmask | swmask)) ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask)); msec_delay(5); return IXGBE_ERR_SWFW_SYNC; } /** * ixgbe_release_swfw_sync - Release SWFW semaphore * @hw: pointer to hardware structure * @mask: Mask to specify which semaphore to release * * Releases the SWFW semaphore through the GSSR register for the specified * function (CSR, PHY0, PHY1, EEPROM, Flash) **/ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask) { u32 gssr; u32 swmask = mask; DEBUGFUNC("ixgbe_release_swfw_sync"); ixgbe_get_eeprom_semaphore(hw); gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); gssr &= ~swmask; IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); ixgbe_release_eeprom_semaphore(hw); } /** * ixgbe_disable_sec_rx_path_generic - Stops the receive data path * @hw: pointer to hardware structure * * Stops the receive data path and waits for the HW to internally empty * the Rx security block **/ s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw) { #define IXGBE_MAX_SECRX_POLL 40 int i; int secrxreg; DEBUGFUNC("ixgbe_disable_sec_rx_path_generic"); secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); secrxreg |= IXGBE_SECRXCTRL_RX_DIS; IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) { secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT); if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY) break; else /* Use interrupt-safe sleep just in case */ usec_delay(1000); } /* For informational purposes only */ if (i >= IXGBE_MAX_SECRX_POLL) DEBUGOUT("Rx unit being enabled before security " "path fully disabled. Continuing with init.\n"); return IXGBE_SUCCESS; } /** * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read * @hw: pointer to hardware structure * @locked: bool to indicate whether the SW/FW lock was taken * @reg_val: Value we read from AUTOC * * The default case requires no protection so just to the register read. */ s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val) { - *locked = FALSE; + *locked = false; *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC); return IXGBE_SUCCESS; } /** * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write * @hw: pointer to hardware structure * @reg_val: value to write to AUTOC * @locked: bool to indicate whether the SW/FW lock was already taken by * previous read. * * The default case requires no protection so just to the register write. */ s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked) { UNREFERENCED_1PARAMETER(locked); IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val); return IXGBE_SUCCESS; } /** * ixgbe_enable_sec_rx_path_generic - Enables the receive data path * @hw: pointer to hardware structure * * Enables the receive data path. **/ s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw) { u32 secrxreg; DEBUGFUNC("ixgbe_enable_sec_rx_path_generic"); secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS; IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); IXGBE_WRITE_FLUSH(hw); return IXGBE_SUCCESS; } /** * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit * @hw: pointer to hardware structure * @regval: register value to write to RXCTRL * * Enables the Rx DMA unit **/ s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) { DEBUGFUNC("ixgbe_enable_rx_dma_generic"); if (regval & IXGBE_RXCTRL_RXEN) ixgbe_enable_rx(hw); else ixgbe_disable_rx(hw); return IXGBE_SUCCESS; } /** * ixgbe_blink_led_start_generic - Blink LED based on index. * @hw: pointer to hardware structure * @index: led number to blink **/ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) { ixgbe_link_speed speed = 0; bool link_up = 0; u32 autoc_reg = 0; u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); s32 ret_val = IXGBE_SUCCESS; - bool locked = FALSE; + bool locked = false; DEBUGFUNC("ixgbe_blink_led_start_generic"); if (index > 3) return IXGBE_ERR_PARAM; /* * Link must be up to auto-blink the LEDs; * Force it if link is down. */ - hw->mac.ops.check_link(hw, &speed, &link_up, FALSE); + hw->mac.ops.check_link(hw, &speed, &link_up, false); if (!link_up) { ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); if (ret_val != IXGBE_SUCCESS) goto out; autoc_reg |= IXGBE_AUTOC_AN_RESTART; autoc_reg |= IXGBE_AUTOC_FLU; ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); if (ret_val != IXGBE_SUCCESS) goto out; IXGBE_WRITE_FLUSH(hw); msec_delay(10); } led_reg &= ~IXGBE_LED_MODE_MASK(index); led_reg |= IXGBE_LED_BLINK(index); IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); IXGBE_WRITE_FLUSH(hw); out: return ret_val; } /** * ixgbe_blink_led_stop_generic - Stop blinking LED based on index. * @hw: pointer to hardware structure * @index: led number to stop blinking **/ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) { u32 autoc_reg = 0; u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); s32 ret_val = IXGBE_SUCCESS; - bool locked = FALSE; + bool locked = false; DEBUGFUNC("ixgbe_blink_led_stop_generic"); if (index > 3) return IXGBE_ERR_PARAM; ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); if (ret_val != IXGBE_SUCCESS) goto out; autoc_reg &= ~IXGBE_AUTOC_FLU; autoc_reg |= IXGBE_AUTOC_AN_RESTART; ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); if (ret_val != IXGBE_SUCCESS) goto out; led_reg &= ~IXGBE_LED_MODE_MASK(index); led_reg &= ~IXGBE_LED_BLINK(index); led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); IXGBE_WRITE_FLUSH(hw); out: return ret_val; } /** * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM * @hw: pointer to hardware structure * @san_mac_offset: SAN MAC address offset * * This function will read the EEPROM location for the SAN MAC address * pointer, and returns the value at that location. This is used in both * get and set mac_addr routines. **/ static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, u16 *san_mac_offset) { s32 ret_val; DEBUGFUNC("ixgbe_get_san_mac_addr_offset"); /* * First read the EEPROM pointer to see if the MAC addresses are * available. */ ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset); if (ret_val) { ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, "eeprom at offset %d failed", IXGBE_SAN_MAC_ADDR_PTR); } return ret_val; } /** * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM * @hw: pointer to hardware structure * @san_mac_addr: SAN MAC address * * Reads the SAN MAC address from the EEPROM, if it's available. This is * per-port, so set_lan_id() must be called before reading the addresses. * set_lan_id() is called by identify_sfp(), but this cannot be relied * upon for non-SFP connections, so we must call it here. **/ s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) { u16 san_mac_data, san_mac_offset; u8 i; s32 ret_val; DEBUGFUNC("ixgbe_get_san_mac_addr_generic"); /* * First read the EEPROM pointer to see if the MAC addresses are * available. If they're not, no point in calling set_lan_id() here. */ ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF) goto san_mac_addr_out; /* make sure we know which port we need to program */ hw->mac.ops.set_lan_id(hw); /* apply the port offset to the address offset */ (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); for (i = 0; i < 3; i++) { ret_val = hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data); if (ret_val) { ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, "eeprom read at offset %d failed", san_mac_offset); goto san_mac_addr_out; } san_mac_addr[i * 2] = (u8)(san_mac_data); san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8); san_mac_offset++; } return IXGBE_SUCCESS; san_mac_addr_out: /* * No addresses available in this EEPROM. It's not an * error though, so just wipe the local address and return. */ for (i = 0; i < 6; i++) san_mac_addr[i] = 0xFF; return IXGBE_SUCCESS; } /** * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM * @hw: pointer to hardware structure * @san_mac_addr: SAN MAC address * * Write a SAN MAC address to the EEPROM. **/ s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) { s32 ret_val; u16 san_mac_data, san_mac_offset; u8 i; DEBUGFUNC("ixgbe_set_san_mac_addr_generic"); /* Look for SAN mac address pointer. If not defined, return */ ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF) return IXGBE_ERR_NO_SAN_ADDR_PTR; /* Make sure we know which port we need to write */ hw->mac.ops.set_lan_id(hw); /* Apply the port offset to the address offset */ (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); for (i = 0; i < 3; i++) { san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8); san_mac_data |= (u16)(san_mac_addr[i * 2]); hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data); san_mac_offset++; } return IXGBE_SUCCESS; } /** * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count * @hw: pointer to hardware structure * * Read PCIe configuration space, and get the MSI-X vector count from * the capabilities table. **/ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) { u16 msix_count = 1; u16 max_msix_count; u16 pcie_offset; switch (hw->mac.type) { case ixgbe_mac_82598EB: pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS; max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598; break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_X550EM_a: pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS; max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599; break; default: return msix_count; } DEBUGFUNC("ixgbe_get_pcie_msix_count_generic"); msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset); if (IXGBE_REMOVED(hw->hw_addr)) msix_count = 0; msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; /* MSI-X count is zero-based in HW */ msix_count++; if (msix_count > max_msix_count) msix_count = max_msix_count; return msix_count; } /** * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address * @hw: pointer to hardware structure * @addr: Address to put into receive address register * @vmdq: VMDq pool to assign * * Puts an ethernet address into a receive address register, or * finds the rar that it is already in; adds to the pool list **/ s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) { static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF; u32 first_empty_rar = NO_EMPTY_RAR_FOUND; u32 rar; u32 rar_low, rar_high; u32 addr_low, addr_high; DEBUGFUNC("ixgbe_insert_mac_addr_generic"); /* swap bytes for HW little endian */ addr_low = addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24); addr_high = addr[4] | (addr[5] << 8); /* * Either find the mac_id in rar or find the first empty space. * rar_highwater points to just after the highest currently used * rar in order to shorten the search. It grows when we add a new * rar to the top. */ for (rar = 0; rar < hw->mac.rar_highwater; rar++) { rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); if (((IXGBE_RAH_AV & rar_high) == 0) && first_empty_rar == NO_EMPTY_RAR_FOUND) { first_empty_rar = rar; } else if ((rar_high & 0xFFFF) == addr_high) { rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar)); if (rar_low == addr_low) break; /* found it already in the rars */ } } if (rar < hw->mac.rar_highwater) { /* already there so just add to the pool bits */ ixgbe_set_vmdq(hw, rar, vmdq); } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) { /* stick it into first empty RAR slot we found */ rar = first_empty_rar; ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); } else if (rar == hw->mac.rar_highwater) { /* add it to the top of the list and inc the highwater mark */ ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); hw->mac.rar_highwater++; } else if (rar >= hw->mac.num_rar_entries) { return IXGBE_ERR_INVALID_MAC_ADDR; } /* * If we found rar[0], make sure the default pool bit (we use pool 0) * remains cleared to be sure default pool packets will get delivered */ if (rar == 0) ixgbe_clear_vmdq(hw, rar, 0); return rar; } /** * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address * @hw: pointer to hardware struct * @rar: receive address register index to disassociate * @vmdq: VMDq pool index to remove from the rar **/ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) { u32 mpsar_lo, mpsar_hi; u32 rar_entries = hw->mac.num_rar_entries; DEBUGFUNC("ixgbe_clear_vmdq_generic"); /* Make sure we are using a valid rar index range */ if (rar >= rar_entries) { ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, "RAR index %d is out of range.\n", rar); return IXGBE_ERR_INVALID_ARGUMENT; } mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); if (IXGBE_REMOVED(hw->hw_addr)) goto done; if (!mpsar_lo && !mpsar_hi) goto done; if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { if (mpsar_lo) { IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); } if (mpsar_hi) { IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); } } else if (vmdq < 32) { mpsar_lo &= ~(1 << vmdq); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo); } else { mpsar_hi &= ~(1 << (vmdq - 32)); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi); } /* was that the last pool using this rar? */ if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0 && rar != hw->mac.san_mac_rar_index) hw->mac.ops.clear_rar(hw, rar); done: return IXGBE_SUCCESS; } /** * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address * @hw: pointer to hardware struct * @rar: receive address register index to associate with a VMDq index * @vmdq: VMDq pool index **/ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) { u32 mpsar; u32 rar_entries = hw->mac.num_rar_entries; DEBUGFUNC("ixgbe_set_vmdq_generic"); /* Make sure we are using a valid rar index range */ if (rar >= rar_entries) { ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, "RAR index %d is out of range.\n", rar); return IXGBE_ERR_INVALID_ARGUMENT; } if (vmdq < 32) { mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); mpsar |= 1 << vmdq; IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar); } else { mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); mpsar |= 1 << (vmdq - 32); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar); } return IXGBE_SUCCESS; } /** * This function should only be involved in the IOV mode. * In IOV mode, Default pool is next pool after the number of * VFs advertized and not 0. * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index] * * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address * @hw: pointer to hardware struct * @vmdq: VMDq pool index **/ s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq) { u32 rar = hw->mac.san_mac_rar_index; DEBUGFUNC("ixgbe_set_vmdq_san_mac"); if (vmdq < 32) { IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); } else { IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32)); } return IXGBE_SUCCESS; } /** * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array * @hw: pointer to hardware structure **/ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw) { int i; DEBUGFUNC("ixgbe_init_uta_tables_generic"); DEBUGOUT(" Clearing UTA\n"); for (i = 0; i < 128; i++) IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); return IXGBE_SUCCESS; } /** * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot * @hw: pointer to hardware structure * @vlan: VLAN id to write to VLAN filter - * @vlvf_bypass: TRUE to find vlanid only, FALSE returns first empty slot if + * @vlvf_bypass: true to find vlanid only, false returns first empty slot if * vlanid not found * * * return the VLVF index where this VLAN id should be placed * **/ s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass) { s32 regindex, first_empty_slot; u32 bits; /* short cut the special case */ if (vlan == 0) return 0; /* if vlvf_bypass is set we don't want to use an empty slot, we * will simply bypass the VLVF if there are no entries present in the * VLVF that contain our VLAN */ first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0; /* add VLAN enable bit for comparison */ vlan |= IXGBE_VLVF_VIEN; /* Search for the vlan id in the VLVF entries. Save off the first empty * slot found along the way. * * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1 */ for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) { bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); if (bits == vlan) return regindex; if (!first_empty_slot && !bits) first_empty_slot = regindex; } /* If we are here then we didn't find the VLAN. Return first empty * slot we found during our search, else error. */ if (!first_empty_slot) ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "No space in VLVF.\n"); return first_empty_slot ? first_empty_slot : IXGBE_ERR_NO_SPACE; } /** * ixgbe_set_vfta_generic - Set VLAN filter table * @hw: pointer to hardware structure * @vlan: VLAN id to write to VLAN filter * @vind: VMDq output index that maps queue to VLAN id in VLVFB * @vlan_on: boolean flag to turn on/off VLAN * @vlvf_bypass: boolean flag indicating updating default pool is okay * * Turn on/off specified VLAN in the VLAN filter table. **/ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on, bool vlvf_bypass) { u32 regidx, vfta_delta, vfta; s32 ret_val; DEBUGFUNC("ixgbe_set_vfta_generic"); if (vlan > 4095 || vind > 63) return IXGBE_ERR_PARAM; /* * this is a 2 part operation - first the VFTA, then the * VLVF and VLVFB if VT Mode is set * We don't write the VFTA until we know the VLVF part succeeded. */ /* Part 1 * The VFTA is a bitstring made up of 128 32-bit registers * that enable the particular VLAN id, much like the MTA: * bits[11-5]: which register * bits[4-0]: which bit in the register */ regidx = vlan / 32; vfta_delta = 1 << (vlan % 32); vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx)); /* * vfta_delta represents the difference between the current value * of vfta and the value we want in the register. Since the diff * is an XOR mask we can just update the vfta using an XOR */ vfta_delta &= vlan_on ? ~vfta : vfta; vfta ^= vfta_delta; /* Part 2 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF */ ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on, &vfta_delta, vfta, vlvf_bypass); if (ret_val != IXGBE_SUCCESS) { if (vlvf_bypass) goto vfta_update; return ret_val; } vfta_update: /* Update VFTA now that we are ready for traffic */ if (vfta_delta) IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta); return IXGBE_SUCCESS; } /** * ixgbe_set_vlvf_generic - Set VLAN Pool Filter * @hw: pointer to hardware structure * @vlan: VLAN id to write to VLAN filter * @vind: VMDq output index that maps queue to VLAN id in VLVFB * @vlan_on: boolean flag to turn on/off VLAN in VLVF * @vfta_delta: pointer to the difference between the current value of VFTA * and the desired value * @vfta: the desired value of the VFTA * @vlvf_bypass: boolean flag indicating updating default pool is okay * * Turn on/off specified bit in VLVF table. **/ s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on, u32 *vfta_delta, u32 vfta, bool vlvf_bypass) { u32 bits; s32 vlvf_index; DEBUGFUNC("ixgbe_set_vlvf_generic"); if (vlan > 4095 || vind > 63) return IXGBE_ERR_PARAM; /* If VT Mode is set * Either vlan_on * make sure the vlan is in VLVF * set the vind bit in the matching VLVFB * Or !vlan_on * clear the pool bit and possibly the vind */ if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE)) return IXGBE_SUCCESS; vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass); if (vlvf_index < 0) return vlvf_index; bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32)); /* set the pool bit */ bits |= 1 << (vind % 32); if (vlan_on) goto vlvf_update; /* clear the pool bit */ bits ^= 1 << (vind % 32); if (!bits && !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) { /* Clear VFTA first, then disable VLVF. Otherwise * we run the risk of stray packets leaking into * the PF via the default pool */ if (*vfta_delta) IXGBE_WRITE_REG(hw, IXGBE_VFTA(vlan / 32), vfta); /* disable VLVF and clear remaining bit from pool */ IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0); return IXGBE_SUCCESS; } /* If there are still bits set in the VLVFB registers * for the VLAN ID indicated we need to see if the * caller is requesting that we clear the VFTA entry bit. * If the caller has requested that we clear the VFTA * entry bit but there are still pools/VFs using this VLAN * ID entry then ignore the request. We're not worried * about the case where we're turning the VFTA VLAN ID * entry bit on, only when requested to turn it off as * there may be multiple pools and/or VFs using the * VLAN ID entry. In that case we cannot clear the * VFTA bit until all pools/VFs using that VLAN ID have also * been cleared. This will be indicated by "bits" being * zero. */ *vfta_delta = 0; vlvf_update: /* record pool change and enable VLAN ID if not already enabled */ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits); IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan); return IXGBE_SUCCESS; } /** * ixgbe_clear_vfta_generic - Clear VLAN filter table * @hw: pointer to hardware structure * * Clears the VLAN filer table, and the VMDq index associated with the filter **/ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) { u32 offset; DEBUGFUNC("ixgbe_clear_vfta_generic"); for (offset = 0; offset < hw->mac.vft_size; offset++) IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) { IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0); IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0); IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0); } return IXGBE_SUCCESS; } /** * ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix * @hw: pointer to hardware structure * * Contains the logic to identify if we need to verify link for the * crosstalk fix **/ static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw) { /* Does FW say we need the fix */ if (!hw->need_crosstalk_fix) - return FALSE; + return false; /* Only consider SFP+ PHYs i.e. media type fiber */ switch (hw->mac.ops.get_media_type(hw)) { case ixgbe_media_type_fiber: case ixgbe_media_type_fiber_qsfp: break; default: - return FALSE; + return false; } - return TRUE; + return true; } /** * ixgbe_check_mac_link_generic - Determine link and speed status * @hw: pointer to hardware structure * @speed: pointer to link speed - * @link_up: TRUE when link is up + * @link_up: true when link is up * @link_up_wait_to_complete: bool used to wait for link up or not * * Reads the links register to determine if link is up and the current speed **/ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *link_up, bool link_up_wait_to_complete) { u32 links_reg, links_orig; u32 i; DEBUGFUNC("ixgbe_check_mac_link_generic"); /* If Crosstalk fix enabled do the sanity check of making sure * the SFP+ cage is full. */ if (ixgbe_need_crosstalk_fix(hw)) { u32 sfp_cage_full; switch (hw->mac.type) { case ixgbe_mac_82599EB: sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & IXGBE_ESDP_SDP2; break; case ixgbe_mac_X550EM_x: case ixgbe_mac_X550EM_a: sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & IXGBE_ESDP_SDP0; break; default: /* sanity check - No SFP+ devices here */ - sfp_cage_full = FALSE; + sfp_cage_full = false; break; } if (!sfp_cage_full) { - *link_up = FALSE; + *link_up = false; *speed = IXGBE_LINK_SPEED_UNKNOWN; return IXGBE_SUCCESS; } } /* clear the old state */ links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS); links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); if (links_orig != links_reg) { DEBUGOUT2("LINKS changed from %08X to %08X\n", links_orig, links_reg); } if (link_up_wait_to_complete) { for (i = 0; i < hw->mac.max_link_up_time; i++) { if (links_reg & IXGBE_LINKS_UP) { - *link_up = TRUE; + *link_up = true; break; } else { - *link_up = FALSE; + *link_up = false; } msec_delay(100); links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); } } else { if (links_reg & IXGBE_LINKS_UP) - *link_up = TRUE; + *link_up = true; else - *link_up = FALSE; + *link_up = false; } switch (links_reg & IXGBE_LINKS_SPEED_82599) { case IXGBE_LINKS_SPEED_10G_82599: *speed = IXGBE_LINK_SPEED_10GB_FULL; if (hw->mac.type >= ixgbe_mac_X550) { if (links_reg & IXGBE_LINKS_SPEED_NON_STD) *speed = IXGBE_LINK_SPEED_2_5GB_FULL; } break; case IXGBE_LINKS_SPEED_1G_82599: *speed = IXGBE_LINK_SPEED_1GB_FULL; break; case IXGBE_LINKS_SPEED_100_82599: *speed = IXGBE_LINK_SPEED_100_FULL; if (hw->mac.type == ixgbe_mac_X550) { if (links_reg & IXGBE_LINKS_SPEED_NON_STD) *speed = IXGBE_LINK_SPEED_5GB_FULL; } break; case IXGBE_LINKS_SPEED_10_X550EM_A: *speed = IXGBE_LINK_SPEED_UNKNOWN; if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) *speed = IXGBE_LINK_SPEED_10_FULL; break; default: *speed = IXGBE_LINK_SPEED_UNKNOWN; } return IXGBE_SUCCESS; } /** * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from * the EEPROM * @hw: pointer to hardware structure * @wwnn_prefix: the alternative WWNN prefix * @wwpn_prefix: the alternative WWPN prefix * * This function will read the EEPROM from the alternative SAN MAC address * block to check the support for the alternative WWNN/WWPN prefix support. **/ s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, u16 *wwpn_prefix) { u16 offset, caps; u16 alt_san_mac_blk_offset; DEBUGFUNC("ixgbe_get_wwn_prefix_generic"); /* clear output first */ *wwnn_prefix = 0xFFFF; *wwpn_prefix = 0xFFFF; /* check if alternative SAN MAC is supported */ offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR; if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset)) goto wwn_prefix_err; if ((alt_san_mac_blk_offset == 0) || (alt_san_mac_blk_offset == 0xFFFF)) goto wwn_prefix_out; /* check capability in alternative san mac address block */ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET; if (hw->eeprom.ops.read(hw, offset, &caps)) goto wwn_prefix_err; if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN)) goto wwn_prefix_out; /* get the corresponding prefix for WWNN/WWPN */ offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) { ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, "eeprom read at offset %d failed", offset); } offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET; if (hw->eeprom.ops.read(hw, offset, wwpn_prefix)) goto wwn_prefix_err; wwn_prefix_out: return IXGBE_SUCCESS; wwn_prefix_err: ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, "eeprom read at offset %d failed", offset); return IXGBE_SUCCESS; } /** * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM * @hw: pointer to hardware structure * @bs: the fcoe boot status * * This function will read the FCOE boot status from the iSCSI FCOE block **/ s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs) { u16 offset, caps, flags; s32 status; DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic"); /* clear output first */ *bs = ixgbe_fcoe_bootstatus_unavailable; /* check if FCOE IBA block is present */ offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR; status = hw->eeprom.ops.read(hw, offset, &caps); if (status != IXGBE_SUCCESS) goto out; if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE)) goto out; /* check if iSCSI FCOE block is populated */ status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset); if (status != IXGBE_SUCCESS) goto out; if ((offset == 0) || (offset == 0xFFFF)) goto out; /* read fcoe flags in iSCSI FCOE block */ offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET; status = hw->eeprom.ops.read(hw, offset, &flags); if (status != IXGBE_SUCCESS) goto out; if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE) *bs = ixgbe_fcoe_bootstatus_enabled; else *bs = ixgbe_fcoe_bootstatus_disabled; out: return status; } /** * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing * @hw: pointer to hardware structure * @enable: enable or disable switch for MAC anti-spoofing * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing * **/ void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) { int vf_target_reg = vf >> 3; int vf_target_shift = vf % 8; u32 pfvfspoof; if (hw->mac.type == ixgbe_mac_82598EB) return; pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); if (enable) pfvfspoof |= (1 << vf_target_shift); else pfvfspoof &= ~(1 << vf_target_shift); IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); } /** * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing * @hw: pointer to hardware structure * @enable: enable or disable switch for VLAN anti-spoofing * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing * **/ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) { int vf_target_reg = vf >> 3; int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT; u32 pfvfspoof; if (hw->mac.type == ixgbe_mac_82598EB) return; pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); if (enable) pfvfspoof |= (1 << vf_target_shift); else pfvfspoof &= ~(1 << vf_target_shift); IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); } /** * ixgbe_get_device_caps_generic - Get additional device capabilities * @hw: pointer to hardware structure * @device_caps: the EEPROM word with the extra device capabilities * * This function will read the EEPROM location for the device capabilities, * and return the word through device_caps. **/ s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps) { DEBUGFUNC("ixgbe_get_device_caps_generic"); hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps); return IXGBE_SUCCESS; } /** * ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering * @hw: pointer to hardware structure * **/ void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw) { u32 regval; u32 i; DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2"); /* Enable relaxed ordering */ for (i = 0; i < hw->mac.max_tx_queues; i++) { regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN; IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); } for (i = 0; i < hw->mac.max_rx_queues; i++) { regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN | IXGBE_DCA_RXCTRL_HEAD_WRO_EN; IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); } } /** * ixgbe_calculate_checksum - Calculate checksum for buffer * @buffer: pointer to EEPROM * @length: size of EEPROM to calculate a checksum for * Calculates the checksum for some buffer on a specified length. The * checksum calculated is returned. **/ u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) { u32 i; u8 sum = 0; DEBUGFUNC("ixgbe_calculate_checksum"); if (!buffer) return 0; for (i = 0; i < length; i++) sum += buffer[i]; return (u8) (0 - sum); } /** * ixgbe_hic_unlocked - Issue command to manageability block unlocked * @hw: pointer to the HW structure * @buffer: command to write and where the return status will be placed * @length: length of buffer, must be multiple of 4 bytes * @timeout: time in ms to wait for command completion * * Communicates with the manageability block. On success return IXGBE_SUCCESS * else returns semaphore error when encountering an error acquiring * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. * * This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held * by the caller. **/ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length, u32 timeout) { u32 hicr, i, fwsts; u16 dword_len; DEBUGFUNC("ixgbe_hic_unlocked"); if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { DEBUGOUT1("Buffer length failure buffersize=%d.\n", length); return IXGBE_ERR_HOST_INTERFACE_COMMAND; } /* Set bit 9 of FWSTS clearing FW reset indication */ fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS); IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI); /* Check that the host interface is enabled. */ hicr = IXGBE_READ_REG(hw, IXGBE_HICR); if (!(hicr & IXGBE_HICR_EN)) { DEBUGOUT("IXGBE_HOST_EN bit disabled.\n"); return IXGBE_ERR_HOST_INTERFACE_COMMAND; } /* Calculate length in DWORDs. We must be DWORD aligned */ if (length % sizeof(u32)) { DEBUGOUT("Buffer length failure, not aligned to dword"); return IXGBE_ERR_INVALID_ARGUMENT; } dword_len = length >> 2; /* The device driver writes the relevant command block * into the ram area. */ for (i = 0; i < dword_len; i++) IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG, i, IXGBE_CPU_TO_LE32(buffer[i])); /* Setting this bit tells the ARC that a new command is pending. */ IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); for (i = 0; i < timeout; i++) { hicr = IXGBE_READ_REG(hw, IXGBE_HICR); if (!(hicr & IXGBE_HICR_C)) break; msec_delay(1); } /* For each command except "Apply Update" perform * status checks in the HICR registry. */ if ((buffer[0] & IXGBE_HOST_INTERFACE_MASK_CMD) == IXGBE_HOST_INTERFACE_APPLY_UPDATE_CMD) return IXGBE_SUCCESS; /* Check command completion */ if ((timeout && i == timeout) || !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) { ERROR_REPORT1(IXGBE_ERROR_CAUTION, "Command has failed with no status valid.\n"); return IXGBE_ERR_HOST_INTERFACE_COMMAND; } return IXGBE_SUCCESS; } /** * ixgbe_host_interface_command - Issue command to manageability block * @hw: pointer to the HW structure * @buffer: contains the command to write and where the return status will * be placed * @length: length of buffer, must be multiple of 4 bytes * @timeout: time in ms to wait for command completion - * @return_data: read and return data from the buffer (TRUE) or not (FALSE) + * @return_data: read and return data from the buffer (true) or not (false) * Needed because FW structures are big endian and decoding of * these fields can be 8 bit or 16 bit based on command. Decoding * is not easily understood without making a table of commands. * So we will leave this up to the caller to read back the data * in these cases. * * Communicates with the manageability block. On success return IXGBE_SUCCESS * else returns semaphore error when encountering an error acquiring * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. **/ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, u32 length, u32 timeout, bool return_data) { u32 hdr_size = sizeof(struct ixgbe_hic_hdr); struct ixgbe_hic_hdr *resp = (struct ixgbe_hic_hdr *)buffer; u16 buf_len; s32 status; u32 bi; u32 dword_len; DEBUGFUNC("ixgbe_host_interface_command"); if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { DEBUGOUT1("Buffer length failure buffersize=%d.\n", length); return IXGBE_ERR_HOST_INTERFACE_COMMAND; } /* Take management host interface semaphore */ status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); if (status) return status; status = ixgbe_hic_unlocked(hw, buffer, length, timeout); if (status) goto rel_out; if (!return_data) goto rel_out; /* Calculate length in DWORDs */ dword_len = hdr_size >> 2; /* first pull in the header so we know the buffer length */ for (bi = 0; bi < dword_len; bi++) { buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); IXGBE_LE32_TO_CPUS(&buffer[bi]); } /* * If there is any thing in data position pull it in * Read Flash command requires reading buffer length from * two byes instead of one byte */ if (resp->cmd == IXGBE_HOST_INTERFACE_FLASH_READ_CMD || resp->cmd == IXGBE_HOST_INTERFACE_SHADOW_RAM_READ_CMD) { for (; bi < dword_len + 2; bi++) { buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); IXGBE_LE32_TO_CPUS(&buffer[bi]); } buf_len = (((u16)(resp->cmd_or_resp.ret_status) << 3) & 0xF00) | resp->buf_len; hdr_size += (2 << 2); } else { buf_len = resp->buf_len; } if (!buf_len) goto rel_out; if (length < buf_len + hdr_size) { DEBUGOUT("Buffer not large enough for reply message.\n"); status = IXGBE_ERR_HOST_INTERFACE_COMMAND; goto rel_out; } /* Calculate length in DWORDs, add 3 for odd lengths */ dword_len = (buf_len + 3) >> 2; /* Pull in the rest of the buffer (bi is where we left off) */ for (; bi <= dword_len; bi++) { buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); IXGBE_LE32_TO_CPUS(&buffer[bi]); } rel_out: hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); return status; } /** * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware * @hw: pointer to the HW structure * @maj: driver version major number * @min: driver version minor number * @build: driver version build number * @sub: driver version sub build number * @len: unused * @driver_ver: unused * * Sends driver version number to firmware through the manageability * block. On success return IXGBE_SUCCESS * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. **/ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build, u8 sub, u16 len, const char *driver_ver) { struct ixgbe_hic_drv_info fw_cmd; int i; s32 ret_val = IXGBE_SUCCESS; DEBUGFUNC("ixgbe_set_fw_drv_ver_generic"); UNREFERENCED_2PARAMETER(len, driver_ver); fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN; fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; fw_cmd.port_num = (u8)hw->bus.func; fw_cmd.ver_maj = maj; fw_cmd.ver_min = min; fw_cmd.ver_build = build; fw_cmd.ver_sub = sub; fw_cmd.hdr.checksum = 0; fw_cmd.pad = 0; fw_cmd.pad2 = 0; fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, sizeof(fw_cmd), IXGBE_HI_COMMAND_TIMEOUT, - TRUE); + true); if (ret_val != IXGBE_SUCCESS) continue; if (fw_cmd.hdr.cmd_or_resp.ret_status == FW_CEM_RESP_STATUS_SUCCESS) ret_val = IXGBE_SUCCESS; else ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; break; } return ret_val; } /** * ixgbe_set_rxpba_generic - Initialize Rx packet buffer * @hw: pointer to hardware structure * @num_pb: number of packet buffers to allocate * @headroom: reserve n KB of headroom * @strategy: packet buffer allocation strategy **/ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom, int strategy) { u32 pbsize = hw->mac.rx_pb_size; int i = 0; u32 rxpktsize, txpktsize, txpbthresh; /* Reserve headroom */ pbsize -= headroom; if (!num_pb) num_pb = 1; /* Divide remaining packet buffer space amongst the number of packet * buffers requested using supplied strategy. */ switch (strategy) { case PBA_STRATEGY_WEIGHTED: /* ixgbe_dcb_pba_80_48 strategy weight first half of packet * buffer with 5/8 of the packet buffer space. */ rxpktsize = (pbsize * 5) / (num_pb * 4); pbsize -= rxpktsize * (num_pb / 2); rxpktsize <<= IXGBE_RXPBSIZE_SHIFT; for (; i < (num_pb / 2); i++) IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); /* configure remaining packet buffers */ /* FALLTHROUGH */ case PBA_STRATEGY_EQUAL: rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT; for (; i < num_pb; i++) IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); break; default: break; } /* Only support an equally distributed Tx packet buffer strategy. */ txpktsize = IXGBE_TXPBSIZE_MAX / num_pb; txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX; for (i = 0; i < num_pb; i++) { IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize); IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh); } /* Clear unused TCs, if any, to zero buffer size*/ for (; i < IXGBE_MAX_PB; i++) { IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0); IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0); } } /** * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo * @hw: pointer to the hardware structure * * The 82599 and x540 MACs can experience issues if TX work is still pending * when a reset occurs. This function prevents this by flushing the PCIe * buffers on the system. **/ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw) { u32 gcr_ext, hlreg0, i, poll; u16 value; /* * If double reset is not requested then all transactions should * already be clear and as such there is no work to do */ if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED)) return; /* * Set loopback enable to prevent any transmits from being sent * should the link come up. This assumes that the RXCTRL.RXEN bit * has already been cleared. */ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK); /* Wait for a last completion before clearing buffers */ IXGBE_WRITE_FLUSH(hw); msec_delay(3); /* * Before proceeding, make sure that the PCIe block does not have * transactions pending. */ poll = ixgbe_pcie_timeout_poll(hw); for (i = 0; i < poll; i++) { usec_delay(100); value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS); if (IXGBE_REMOVED(hw->hw_addr)) goto out; if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) goto out; } out: /* initiate cleaning flow for buffers in the PCIe transaction layer */ gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR); /* Flush all writes and allow 20usec for all transactions to clear */ IXGBE_WRITE_FLUSH(hw); usec_delay(20); /* restore previous register values */ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); } /** * ixgbe_bypass_rw_generic - Bit bang data into by_pass FW * * @hw: pointer to hardware structure * @cmd: Command we send to the FW * @status: The reply from the FW * * Bit-bangs the cmd to the by_pass FW status points to what is returned. **/ #define IXGBE_BYPASS_BB_WAIT 1 s32 ixgbe_bypass_rw_generic(struct ixgbe_hw *hw, u32 cmd, u32 *status) { int i; u32 sck, sdi, sdo, dir_sck, dir_sdi, dir_sdo; u32 esdp; if (!status) return IXGBE_ERR_PARAM; *status = 0; /* SDP vary by MAC type */ switch (hw->mac.type) { case ixgbe_mac_82599EB: sck = IXGBE_ESDP_SDP7; sdi = IXGBE_ESDP_SDP0; sdo = IXGBE_ESDP_SDP6; dir_sck = IXGBE_ESDP_SDP7_DIR; dir_sdi = IXGBE_ESDP_SDP0_DIR; dir_sdo = IXGBE_ESDP_SDP6_DIR; break; case ixgbe_mac_X540: sck = IXGBE_ESDP_SDP2; sdi = IXGBE_ESDP_SDP0; sdo = IXGBE_ESDP_SDP1; dir_sck = IXGBE_ESDP_SDP2_DIR; dir_sdi = IXGBE_ESDP_SDP0_DIR; dir_sdo = IXGBE_ESDP_SDP1_DIR; break; default: return IXGBE_ERR_DEVICE_NOT_SUPPORTED; } /* Set SDP pins direction */ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); esdp |= dir_sck; /* SCK as output */ esdp |= dir_sdi; /* SDI as output */ esdp &= ~dir_sdo; /* SDO as input */ esdp |= sck; esdp |= sdi; IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); IXGBE_WRITE_FLUSH(hw); msec_delay(IXGBE_BYPASS_BB_WAIT); /* Generate start condition */ esdp &= ~sdi; IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); IXGBE_WRITE_FLUSH(hw); msec_delay(IXGBE_BYPASS_BB_WAIT); esdp &= ~sck; IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); IXGBE_WRITE_FLUSH(hw); msec_delay(IXGBE_BYPASS_BB_WAIT); /* Clock out the new control word and clock in the status */ for (i = 0; i < 32; i++) { if ((cmd >> (31 - i)) & 0x01) { esdp |= sdi; IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); } else { esdp &= ~sdi; IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); } IXGBE_WRITE_FLUSH(hw); msec_delay(IXGBE_BYPASS_BB_WAIT); esdp |= sck; IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); IXGBE_WRITE_FLUSH(hw); msec_delay(IXGBE_BYPASS_BB_WAIT); esdp &= ~sck; IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); IXGBE_WRITE_FLUSH(hw); msec_delay(IXGBE_BYPASS_BB_WAIT); esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); if (esdp & sdo) *status = (*status << 1) | 0x01; else *status = (*status << 1) | 0x00; msec_delay(IXGBE_BYPASS_BB_WAIT); } /* stop condition */ esdp |= sck; esdp &= ~sdi; IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); IXGBE_WRITE_FLUSH(hw); msec_delay(IXGBE_BYPASS_BB_WAIT); esdp |= sdi; IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); IXGBE_WRITE_FLUSH(hw); /* set the page bits to match the cmd that the status it belongs to */ *status = (*status & 0x3fffffff) | (cmd & 0xc0000000); return IXGBE_SUCCESS; } /** * ixgbe_bypass_valid_rd_generic - Verify valid return from bit-bang. * * If we send a write we can't be sure it took until we can read back * that same register. It can be a problem as some of the feilds may * for valid reasons change inbetween the time wrote the register and * we read it again to verify. So this function check everything we * can check and then assumes it worked. * * @u32 in_reg - The register cmd for the bit-bang read. * @u32 out_reg - The register returned from a bit-bang read. **/ bool ixgbe_bypass_valid_rd_generic(u32 in_reg, u32 out_reg) { u32 mask; /* Page must match for all control pages */ if ((in_reg & BYPASS_PAGE_M) != (out_reg & BYPASS_PAGE_M)) - return FALSE; + return false; switch (in_reg & BYPASS_PAGE_M) { case BYPASS_PAGE_CTL0: /* All the following can't change since the last write * - All the event actions * - The timeout value */ mask = BYPASS_AUX_ON_M | BYPASS_MAIN_ON_M | BYPASS_MAIN_OFF_M | BYPASS_AUX_OFF_M | BYPASS_WDTIMEOUT_M | BYPASS_WDT_VALUE_M; if ((out_reg & mask) != (in_reg & mask)) - return FALSE; + return false; /* 0x0 is never a valid value for bypass status */ if (!(out_reg & BYPASS_STATUS_OFF_M)) - return FALSE; + return false; break; case BYPASS_PAGE_CTL1: /* All the following can't change since the last write * - time valid bit * - time we last sent */ mask = BYPASS_CTL1_VALID_M | BYPASS_CTL1_TIME_M; if ((out_reg & mask) != (in_reg & mask)) - return FALSE; + return false; break; case BYPASS_PAGE_CTL2: /* All we can check in this page is control number * which is already done above. */ break; } - /* We are as sure as we can be return TRUE */ - return TRUE; + /* We are as sure as we can be return true */ + return true; } /** * ixgbe_bypass_set_generic - Set a bypass field in the FW CTRL Regiter. * * @hw: pointer to hardware structure * @cmd: The control word we are setting. * @event: The event we are setting in the FW. This also happens to * be the mask for the event we are setting (handy) * @action: The action we set the event to in the FW. This is in a * bit field that happens to be what we want to put in * the event spot (also handy) **/ s32 ixgbe_bypass_set_generic(struct ixgbe_hw *hw, u32 ctrl, u32 event, u32 action) { u32 by_ctl = 0; u32 cmd, verify; u32 count = 0; /* Get current values */ cmd = ctrl; /* just reading only need control number */ if (ixgbe_bypass_rw_generic(hw, cmd, &by_ctl)) return IXGBE_ERR_INVALID_ARGUMENT; /* Set to new action */ cmd = (by_ctl & ~event) | BYPASS_WE | action; if (ixgbe_bypass_rw_generic(hw, cmd, &by_ctl)) return IXGBE_ERR_INVALID_ARGUMENT; /* Page 0 force a FW eeprom write which is slow so verify */ if ((cmd & BYPASS_PAGE_M) == BYPASS_PAGE_CTL0) { verify = BYPASS_PAGE_CTL0; do { if (count++ > 5) return IXGBE_BYPASS_FW_WRITE_FAILURE; if (ixgbe_bypass_rw_generic(hw, verify, &by_ctl)) return IXGBE_ERR_INVALID_ARGUMENT; } while (!ixgbe_bypass_valid_rd_generic(cmd, by_ctl)); } else { /* We have give the FW time for the write to stick */ msec_delay(100); } return IXGBE_SUCCESS; } /** * ixgbe_bypass_rd_eep_generic - Read the bypass FW eeprom addres. * * @hw: pointer to hardware structure * @addr: The bypass eeprom address to read. * @value: The 8b of data at the address above. **/ s32 ixgbe_bypass_rd_eep_generic(struct ixgbe_hw *hw, u32 addr, u8 *value) { u32 cmd; u32 status; /* send the request */ cmd = BYPASS_PAGE_CTL2 | BYPASS_WE; cmd |= (addr << BYPASS_CTL2_OFFSET_SHIFT) & BYPASS_CTL2_OFFSET_M; if (ixgbe_bypass_rw_generic(hw, cmd, &status)) return IXGBE_ERR_INVALID_ARGUMENT; /* We have give the FW time for the write to stick */ msec_delay(100); /* now read the results */ cmd &= ~BYPASS_WE; if (ixgbe_bypass_rw_generic(hw, cmd, &status)) return IXGBE_ERR_INVALID_ARGUMENT; *value = status & BYPASS_CTL2_DATA_M; return IXGBE_SUCCESS; } /** * ixgbe_get_orom_version - Return option ROM from EEPROM * * @hw: pointer to hardware structure * @nvm_ver: pointer to output structure * - * if valid option ROM version, nvm_ver->or_valid set to TRUE - * else nvm_ver->or_valid is FALSE. + * if valid option ROM version, nvm_ver->or_valid set to true + * else nvm_ver->or_valid is false. **/ void ixgbe_get_orom_version(struct ixgbe_hw *hw, struct ixgbe_nvm_version *nvm_ver) { u16 offset, eeprom_cfg_blkh, eeprom_cfg_blkl; - nvm_ver->or_valid = FALSE; + nvm_ver->or_valid = false; /* Option Rom may or may not be present. Start with pointer */ hw->eeprom.ops.read(hw, NVM_OROM_OFFSET, &offset); /* make sure offset is valid */ if ((offset == 0x0) || (offset == NVM_INVALID_PTR)) return; hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_HI, &eeprom_cfg_blkh); hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_LOW, &eeprom_cfg_blkl); /* option rom exists and is valid */ if ((eeprom_cfg_blkl | eeprom_cfg_blkh) == 0x0 || eeprom_cfg_blkl == NVM_VER_INVALID || eeprom_cfg_blkh == NVM_VER_INVALID) return; - nvm_ver->or_valid = TRUE; + nvm_ver->or_valid = true; nvm_ver->or_major = eeprom_cfg_blkl >> NVM_OROM_SHIFT; nvm_ver->or_build = (eeprom_cfg_blkl << NVM_OROM_SHIFT) | (eeprom_cfg_blkh >> NVM_OROM_SHIFT); nvm_ver->or_patch = eeprom_cfg_blkh & NVM_OROM_PATCH_MASK; } /** * ixgbe_get_oem_prod_version - Return OEM Product version * * @hw: pointer to hardware structure * @nvm_ver: pointer to output structure * - * if valid OEM product version, nvm_ver->oem_valid set to TRUE - * else nvm_ver->oem_valid is FALSE. + * if valid OEM product version, nvm_ver->oem_valid set to true + * else nvm_ver->oem_valid is false. **/ void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw, struct ixgbe_nvm_version *nvm_ver) { u16 rel_num, prod_ver, mod_len, cap, offset; - nvm_ver->oem_valid = FALSE; + nvm_ver->oem_valid = false; hw->eeprom.ops.read(hw, NVM_OEM_PROD_VER_PTR, &offset); /* Return if offset to OEM Product Version block is invalid */ if (offset == 0x0 || offset == NVM_INVALID_PTR) return; /* Read product version block */ hw->eeprom.ops.read(hw, offset, &mod_len); hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_CAP_OFF, &cap); /* Return if OEM product version block is invalid */ if (mod_len != NVM_OEM_PROD_VER_MOD_LEN || (cap & NVM_OEM_PROD_VER_CAP_MASK) != 0x0) return; hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_L, &prod_ver); hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_H, &rel_num); /* Return if version is invalid */ if ((rel_num | prod_ver) == 0x0 || rel_num == NVM_VER_INVALID || prod_ver == NVM_VER_INVALID) return; nvm_ver->oem_major = prod_ver >> NVM_VER_SHIFT; nvm_ver->oem_minor = prod_ver & NVM_VER_MASK; nvm_ver->oem_release = rel_num; - nvm_ver->oem_valid = TRUE; + nvm_ver->oem_valid = true; } /** * ixgbe_get_etk_id - Return Etrack ID from EEPROM * * @hw: pointer to hardware structure * @nvm_ver: pointer to output structure * * word read errors will return 0xFFFF **/ void ixgbe_get_etk_id(struct ixgbe_hw *hw, struct ixgbe_nvm_version *nvm_ver) { u16 etk_id_l, etk_id_h; if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_LOW, &etk_id_l)) etk_id_l = NVM_VER_INVALID; if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_HI, &etk_id_h)) etk_id_h = NVM_VER_INVALID; /* The word order for the version format is determined by high order * word bit 15. */ if ((etk_id_h & NVM_ETK_VALID) == 0) { nvm_ver->etk_id = etk_id_h; nvm_ver->etk_id |= (etk_id_l << NVM_ETK_SHIFT); } else { nvm_ver->etk_id = etk_id_l; nvm_ver->etk_id |= (etk_id_h << NVM_ETK_SHIFT); } } /** * ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg * @hw: pointer to hardware structure * @map: pointer to u8 arr for returning map * * Read the rtrup2tc HW register and resolve its content into map **/ void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map) { u32 reg, i; reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) map[i] = IXGBE_RTRUP2TC_UP_MASK & (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT)); return; } void ixgbe_disable_rx_generic(struct ixgbe_hw *hw) { u32 pfdtxgswc; u32 rxctrl; rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); if (rxctrl & IXGBE_RXCTRL_RXEN) { if (hw->mac.type != ixgbe_mac_82598EB) { pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); - hw->mac.set_lben = TRUE; + hw->mac.set_lben = true; } else { - hw->mac.set_lben = FALSE; + hw->mac.set_lben = false; } } rxctrl &= ~IXGBE_RXCTRL_RXEN; IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); } } void ixgbe_enable_rx_generic(struct ixgbe_hw *hw) { u32 pfdtxgswc; u32 rxctrl; rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN)); if (hw->mac.type != ixgbe_mac_82598EB) { if (hw->mac.set_lben) { pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN; IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); - hw->mac.set_lben = FALSE; + hw->mac.set_lben = false; } } } /** - * ixgbe_mng_present - returns TRUE when management capability is present + * ixgbe_mng_present - returns true when management capability is present * @hw: pointer to hardware structure */ bool ixgbe_mng_present(struct ixgbe_hw *hw) { u32 fwsm; if (hw->mac.type < ixgbe_mac_82599EB) - return FALSE; + return false; fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)); return !!(fwsm & IXGBE_FWSM_FW_MODE_PT); } /** * ixgbe_mng_enabled - Is the manageability engine enabled? * @hw: pointer to hardware structure * - * Returns TRUE if the manageability engine is enabled. + * Returns true if the manageability engine is enabled. **/ bool ixgbe_mng_enabled(struct ixgbe_hw *hw) { u32 fwsm, manc, factps; fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)); if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) - return FALSE; + return false; manc = IXGBE_READ_REG(hw, IXGBE_MANC); if (!(manc & IXGBE_MANC_RCV_TCO_EN)) - return FALSE; + return false; if (hw->mac.type <= ixgbe_mac_X540) { factps = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw)); if (factps & IXGBE_FACTPS_MNGCG) - return FALSE; + return false; } - return TRUE; + return true; } /** * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed * @hw: pointer to hardware structure * @speed: new link speed - * @autoneg_wait_to_complete: TRUE when waiting for completion is needed + * @autoneg_wait_to_complete: true when waiting for completion is needed * * Set the link speed in the MAC and/or PHY register and restarts link. **/ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete) { ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; s32 status = IXGBE_SUCCESS; u32 speedcnt = 0; u32 i = 0; - bool autoneg, link_up = FALSE; + bool autoneg, link_up = false; DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber"); /* Mask off requested but non-supported speeds */ status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg); if (status != IXGBE_SUCCESS) return status; speed &= link_speed; /* Try each speed one by one, highest priority first. We do this in * software because 10Gb fiber doesn't support speed autonegotiation. */ if (speed & IXGBE_LINK_SPEED_10GB_FULL) { speedcnt++; highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; /* Set the module link speed */ switch (hw->phy.media_type) { case ixgbe_media_type_fiber_fixed: case ixgbe_media_type_fiber: ixgbe_set_rate_select_speed(hw, IXGBE_LINK_SPEED_10GB_FULL); break; case ixgbe_media_type_fiber_qsfp: /* QSFP module automatically detects MAC link speed */ break; default: DEBUGOUT("Unexpected media type.\n"); break; } /* Allow module to change analog characteristics (1G->10G) */ msec_delay(40); status = ixgbe_setup_mac_link(hw, IXGBE_LINK_SPEED_10GB_FULL, autoneg_wait_to_complete); if (status != IXGBE_SUCCESS) return status; /* Flap the Tx laser if it has not already been done */ ixgbe_flap_tx_laser(hw); /* Wait for the controller to acquire link. Per IEEE 802.3ap, * Section 73.10.2, we may have to wait up to 1000ms if KR is * attempted. 82599 uses the same timing for 10g SFI. */ for (i = 0; i < 10; i++) { /* Wait for the link partner to also set speed */ msec_delay(100); /* If we have link, just jump out */ status = ixgbe_check_link(hw, &link_speed, - &link_up, FALSE); + &link_up, false); if (status != IXGBE_SUCCESS) return status; if (link_up) goto out; } } if (speed & IXGBE_LINK_SPEED_1GB_FULL) { speedcnt++; if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; /* Set the module link speed */ switch (hw->phy.media_type) { case ixgbe_media_type_fiber_fixed: case ixgbe_media_type_fiber: ixgbe_set_rate_select_speed(hw, IXGBE_LINK_SPEED_1GB_FULL); break; case ixgbe_media_type_fiber_qsfp: /* QSFP module automatically detects link speed */ break; default: DEBUGOUT("Unexpected media type.\n"); break; } /* Allow module to change analog characteristics (10G->1G) */ msec_delay(40); status = ixgbe_setup_mac_link(hw, IXGBE_LINK_SPEED_1GB_FULL, autoneg_wait_to_complete); if (status != IXGBE_SUCCESS) return status; /* Flap the Tx laser if it has not already been done */ ixgbe_flap_tx_laser(hw); /* Wait for the link partner to also set speed */ msec_delay(100); /* If we have link, just jump out */ - status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); + status = ixgbe_check_link(hw, &link_speed, &link_up, false); if (status != IXGBE_SUCCESS) return status; if (link_up) goto out; } /* We didn't get link. Configure back to the highest speed we tried, * (if there was more than one). We call ourselves back with just the * single highest speed that the user requested. */ if (speedcnt > 1) status = ixgbe_setup_mac_link_multispeed_fiber(hw, highest_link_speed, autoneg_wait_to_complete); out: /* Set autoneg_advertised value based on input link speed */ hw->phy.autoneg_advertised = 0; if (speed & IXGBE_LINK_SPEED_10GB_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; if (speed & IXGBE_LINK_SPEED_1GB_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; return status; } /** * ixgbe_set_soft_rate_select_speed - Set module link speed * @hw: pointer to hardware structure * @speed: link speed to set * * Set module link speed via the soft rate select. */ void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed) { s32 status; u8 rs, eeprom_data; switch (speed) { case IXGBE_LINK_SPEED_10GB_FULL: /* one bit mask same as setting on */ rs = IXGBE_SFF_SOFT_RS_SELECT_10G; break; case IXGBE_LINK_SPEED_1GB_FULL: rs = IXGBE_SFF_SOFT_RS_SELECT_1G; break; default: DEBUGOUT("Invalid fixed module speed\n"); return; } /* Set RS0 */ status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, IXGBE_I2C_EEPROM_DEV_ADDR2, &eeprom_data); if (status) { DEBUGOUT("Failed to read Rx Rate Select RS0\n"); goto out; } eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, IXGBE_I2C_EEPROM_DEV_ADDR2, eeprom_data); if (status) { DEBUGOUT("Failed to write Rx Rate Select RS0\n"); goto out; } /* Set RS1 */ status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, IXGBE_I2C_EEPROM_DEV_ADDR2, &eeprom_data); if (status) { DEBUGOUT("Failed to read Rx Rate Select RS1\n"); goto out; } eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, IXGBE_I2C_EEPROM_DEV_ADDR2, eeprom_data); if (status) { DEBUGOUT("Failed to write Rx Rate Select RS1\n"); goto out; } out: return; } diff --git a/sys/dev/ixgbe/ixgbe_dcb.c b/sys/dev/ixgbe/ixgbe_dcb.c index dac03d8cb3b3..d0ae965bc6ac 100644 --- a/sys/dev/ixgbe/ixgbe_dcb.c +++ b/sys/dev/ixgbe/ixgbe_dcb.c @@ -1,760 +1,760 @@ /****************************************************************************** SPDX-License-Identifier: BSD-3-Clause Copyright (c) 2001-2017, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "ixgbe_type.h" #include "ixgbe_dcb.h" #include "ixgbe_dcb_82598.h" #include "ixgbe_dcb_82599.h" /** * ixgbe_dcb_calculate_tc_credits - This calculates the ieee traffic class * credits from the configured bandwidth percentages. Credits * are the smallest unit programmable into the underlying * hardware. The IEEE 802.1Qaz specification do not use bandwidth * groups so this is much simplified from the CEE case. * @bw: bandwidth index by traffic class * @refill: refill credits index by traffic class * @max: max credits by traffic class * @max_frame_size: maximum frame size */ s32 ixgbe_dcb_calculate_tc_credits(u8 *bw, u16 *refill, u16 *max, int max_frame_size) { int min_percent = 100; int min_credit, multiplier; int i; min_credit = ((max_frame_size / 2) + IXGBE_DCB_CREDIT_QUANTUM - 1) / IXGBE_DCB_CREDIT_QUANTUM; for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { if (bw[i] < min_percent && bw[i]) min_percent = bw[i]; } multiplier = (min_credit / min_percent) + 1; /* Find out the hw credits for each TC */ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { int val = min(bw[i] * multiplier, IXGBE_DCB_MAX_CREDIT_REFILL); if (val < min_credit) val = min_credit; refill[i] = (u16)val; max[i] = bw[i] ? (bw[i]*IXGBE_DCB_MAX_CREDIT)/100 : min_credit; } return 0; } /** * ixgbe_dcb_calculate_tc_credits_cee - Calculates traffic class credits * @hw: pointer to hardware structure * @dcb_config: Struct containing DCB settings * @max_frame_size: Maximum frame size * @direction: Configuring either Tx or Rx * * This function calculates the credits allocated to each traffic class. * It should be called only after the rules are checked by * ixgbe_dcb_check_config_cee(). */ s32 ixgbe_dcb_calculate_tc_credits_cee(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config, u32 max_frame_size, u8 direction) { struct ixgbe_dcb_tc_path *p; u32 min_multiplier = 0; u16 min_percent = 100; s32 ret_val = IXGBE_SUCCESS; /* Initialization values default for Tx settings */ u32 min_credit = 0; u32 credit_refill = 0; u32 credit_max = 0; u16 link_percentage = 0; u8 bw_percent = 0; u8 i; if (dcb_config == NULL) { ret_val = IXGBE_ERR_CONFIG; goto out; } min_credit = ((max_frame_size / 2) + IXGBE_DCB_CREDIT_QUANTUM - 1) / IXGBE_DCB_CREDIT_QUANTUM; /* Find smallest link percentage */ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { p = &dcb_config->tc_config[i].path[direction]; bw_percent = dcb_config->bw_percentage[direction][p->bwg_id]; link_percentage = p->bwg_percent; link_percentage = (link_percentage * bw_percent) / 100; if (link_percentage && link_percentage < min_percent) min_percent = link_percentage; } /* * The ratio between traffic classes will control the bandwidth * percentages seen on the wire. To calculate this ratio we use * a multiplier. It is required that the refill credits must be * larger than the max frame size so here we find the smallest * multiplier that will allow all bandwidth percentages to be * greater than the max frame size. */ min_multiplier = (min_credit / min_percent) + 1; /* Find out the link percentage for each TC first */ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { p = &dcb_config->tc_config[i].path[direction]; bw_percent = dcb_config->bw_percentage[direction][p->bwg_id]; link_percentage = p->bwg_percent; /* Must be careful of integer division for very small nums */ link_percentage = (link_percentage * bw_percent) / 100; if (p->bwg_percent > 0 && link_percentage == 0) link_percentage = 1; /* Save link_percentage for reference */ p->link_percent = (u8)link_percentage; /* Calculate credit refill ratio using multiplier */ credit_refill = min(link_percentage * min_multiplier, (u32)IXGBE_DCB_MAX_CREDIT_REFILL); /* Refill at least minimum credit */ if (credit_refill < min_credit) credit_refill = min_credit; p->data_credits_refill = (u16)credit_refill; /* Calculate maximum credit for the TC */ credit_max = (link_percentage * IXGBE_DCB_MAX_CREDIT) / 100; /* * Adjustment based on rule checking, if the percentage * of a TC is too small, the maximum credit may not be * enough to send out a jumbo frame in data plane arbitration. */ if (credit_max < min_credit) credit_max = min_credit; if (direction == IXGBE_DCB_TX_CONFIG) { /* * Adjustment based on rule checking, if the * percentage of a TC is too small, the maximum * credit may not be enough to send out a TSO * packet in descriptor plane arbitration. */ if (credit_max && (credit_max < IXGBE_DCB_MIN_TSO_CREDIT) && (hw->mac.type == ixgbe_mac_82598EB)) credit_max = IXGBE_DCB_MIN_TSO_CREDIT; dcb_config->tc_config[i].desc_credits_max = (u16)credit_max; } p->data_credits_max = (u16)credit_max; } out: return ret_val; } /** * ixgbe_dcb_unpack_pfc_cee - Unpack dcb_config PFC info * @cfg: dcb configuration to unpack into hardware consumable fields * @map: user priority to traffic class map * @pfc_up: u8 to store user priority PFC bitmask * * This unpacks the dcb configuration PFC info which is stored per * traffic class into a 8bit user priority bitmask that can be * consumed by hardware routines. The priority to tc map must be * updated before calling this routine to use current up-to maps. */ void ixgbe_dcb_unpack_pfc_cee(struct ixgbe_dcb_config *cfg, u8 *map, u8 *pfc_up) { struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; int up; /* * If the TC for this user priority has PFC enabled then set the * matching bit in 'pfc_up' to reflect that PFC is enabled. */ for (*pfc_up = 0, up = 0; up < IXGBE_DCB_MAX_USER_PRIORITY; up++) { if (tc_config[map[up]].pfc != ixgbe_dcb_pfc_disabled) *pfc_up |= 1 << up; } } void ixgbe_dcb_unpack_refill_cee(struct ixgbe_dcb_config *cfg, int direction, u16 *refill) { struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; int tc; for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) refill[tc] = tc_config[tc].path[direction].data_credits_refill; } void ixgbe_dcb_unpack_max_cee(struct ixgbe_dcb_config *cfg, u16 *max) { struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; int tc; for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) max[tc] = tc_config[tc].desc_credits_max; } void ixgbe_dcb_unpack_bwgid_cee(struct ixgbe_dcb_config *cfg, int direction, u8 *bwgid) { struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; int tc; for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) bwgid[tc] = tc_config[tc].path[direction].bwg_id; } void ixgbe_dcb_unpack_tsa_cee(struct ixgbe_dcb_config *cfg, int direction, u8 *tsa) { struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; int tc; for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) tsa[tc] = tc_config[tc].path[direction].tsa; } u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up) { struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; u8 prio_mask = 1 << up; u8 tc = cfg->num_tcs.pg_tcs; /* If tc is 0 then DCB is likely not enabled or supported */ if (!tc) goto out; /* * Test from maximum TC to 1 and report the first match we find. If * we find no match we can assume that the TC is 0 since the TC must * be set for all user priorities */ for (tc--; tc; tc--) { if (prio_mask & tc_config[tc].path[direction].up_to_tc_bitmap) break; } out: return tc; } void ixgbe_dcb_unpack_map_cee(struct ixgbe_dcb_config *cfg, int direction, u8 *map) { u8 up; for (up = 0; up < IXGBE_DCB_MAX_USER_PRIORITY; up++) map[up] = ixgbe_dcb_get_tc_from_up(cfg, direction, up); } /** * ixgbe_dcb_config - Struct containing DCB settings. * @dcb_config: Pointer to DCB config structure * * This function checks DCB rules for DCB settings. * The following rules are checked: * 1. The sum of bandwidth percentages of all Bandwidth Groups must total 100%. * 2. The sum of bandwidth percentages of all Traffic Classes within a Bandwidth * Group must total 100. * 3. A Traffic Class should not be set to both Link Strict Priority * and Group Strict Priority. * 4. Link strict Bandwidth Groups can only have link strict traffic classes * with zero bandwidth. */ s32 ixgbe_dcb_check_config_cee(struct ixgbe_dcb_config *dcb_config) { struct ixgbe_dcb_tc_path *p; s32 ret_val = IXGBE_SUCCESS; u8 i, j, bw = 0, bw_id; u8 bw_sum[2][IXGBE_DCB_MAX_BW_GROUP]; bool link_strict[2][IXGBE_DCB_MAX_BW_GROUP]; memset(bw_sum, 0, sizeof(bw_sum)); memset(link_strict, 0, sizeof(link_strict)); /* First Tx, then Rx */ for (i = 0; i < 2; i++) { /* Check each traffic class for rule violation */ for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) { p = &dcb_config->tc_config[j].path[i]; bw = p->bwg_percent; bw_id = p->bwg_id; if (bw_id >= IXGBE_DCB_MAX_BW_GROUP) { ret_val = IXGBE_ERR_CONFIG; goto err_config; } if (p->tsa == ixgbe_dcb_tsa_strict) { - link_strict[i][bw_id] = TRUE; + link_strict[i][bw_id] = true; /* Link strict should have zero bandwidth */ if (bw) { ret_val = IXGBE_ERR_CONFIG; goto err_config; } } else if (!bw) { /* * Traffic classes without link strict * should have non-zero bandwidth. */ ret_val = IXGBE_ERR_CONFIG; goto err_config; } bw_sum[i][bw_id] += bw; } bw = 0; /* Check each bandwidth group for rule violation */ for (j = 0; j < IXGBE_DCB_MAX_BW_GROUP; j++) { bw += dcb_config->bw_percentage[i][j]; /* * Sum of bandwidth percentages of all traffic classes * within a Bandwidth Group must total 100 except for * link strict group (zero bandwidth). */ if (link_strict[i][j]) { if (bw_sum[i][j]) { /* * Link strict group should have zero * bandwidth. */ ret_val = IXGBE_ERR_CONFIG; goto err_config; } } else if (bw_sum[i][j] != IXGBE_DCB_BW_PERCENT && bw_sum[i][j] != 0) { ret_val = IXGBE_ERR_CONFIG; goto err_config; } } if (bw != IXGBE_DCB_BW_PERCENT) { ret_val = IXGBE_ERR_CONFIG; goto err_config; } } err_config: DEBUGOUT2("DCB error code %d while checking %s settings.\n", ret_val, (i == IXGBE_DCB_TX_CONFIG) ? "Tx" : "Rx"); return ret_val; } /** * ixgbe_dcb_get_tc_stats - Returns status of each traffic class * @hw: pointer to hardware structure * @stats: pointer to statistics structure * @tc_count: Number of elements in bwg_array. * * This function returns the status data for each of the Traffic Classes in use. */ s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats, u8 tc_count) { s32 ret = IXGBE_NOT_IMPLEMENTED; switch (hw->mac.type) { case ixgbe_mac_82598EB: ret = ixgbe_dcb_get_tc_stats_82598(hw, stats, tc_count); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_X550EM_a: #if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT) ret = ixgbe_dcb_get_tc_stats_82599(hw, stats, tc_count); break; #endif default: break; } return ret; } /** * ixgbe_dcb_get_pfc_stats - Returns CBFC status of each traffic class * @hw: pointer to hardware structure * @stats: pointer to statistics structure * @tc_count: Number of elements in bwg_array. * * This function returns the CBFC status data for each of the Traffic Classes. */ s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats, u8 tc_count) { s32 ret = IXGBE_NOT_IMPLEMENTED; switch (hw->mac.type) { case ixgbe_mac_82598EB: ret = ixgbe_dcb_get_pfc_stats_82598(hw, stats, tc_count); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_X550EM_a: #if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT) ret = ixgbe_dcb_get_pfc_stats_82599(hw, stats, tc_count); break; #endif default: break; } return ret; } /** * ixgbe_dcb_config_rx_arbiter_cee - Config Rx arbiter * @hw: pointer to hardware structure * @dcb_config: pointer to ixgbe_dcb_config structure * * Configure Rx Data Arbiter and credits for each traffic class. */ s32 ixgbe_dcb_config_rx_arbiter_cee(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config) { s32 ret = IXGBE_NOT_IMPLEMENTED; u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = { 0 }; u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS] = { 0 }; u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS] = { 0 }; u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = { 0 }; ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill); ixgbe_dcb_unpack_max_cee(dcb_config, max); ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid); ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa); ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map); switch (hw->mac.type) { case ixgbe_mac_82598EB: ret = ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_X550EM_a: #if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT) ret = ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwgid, tsa, map); break; #endif default: break; } return ret; } /** * ixgbe_dcb_config_tx_desc_arbiter_cee - Config Tx Desc arbiter * @hw: pointer to hardware structure * @dcb_config: pointer to ixgbe_dcb_config structure * * Configure Tx Descriptor Arbiter and credits for each traffic class. */ s32 ixgbe_dcb_config_tx_desc_arbiter_cee(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config) { s32 ret = IXGBE_NOT_IMPLEMENTED; u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS]; u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS]; u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS]; u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS]; ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill); ixgbe_dcb_unpack_max_cee(dcb_config, max); ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid); ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa); switch (hw->mac.type) { case ixgbe_mac_82598EB: ret = ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwgid, tsa); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_X550EM_a: #if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT) ret = ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwgid, tsa); break; #endif default: break; } return ret; } /** * ixgbe_dcb_config_tx_data_arbiter_cee - Config Tx data arbiter * @hw: pointer to hardware structure * @dcb_config: pointer to ixgbe_dcb_config structure * * Configure Tx Data Arbiter and credits for each traffic class. */ s32 ixgbe_dcb_config_tx_data_arbiter_cee(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config) { s32 ret = IXGBE_NOT_IMPLEMENTED; u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS]; u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS]; u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS]; u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS]; ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill); ixgbe_dcb_unpack_max_cee(dcb_config, max); ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid); ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa); ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map); switch (hw->mac.type) { case ixgbe_mac_82598EB: ret = ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwgid, tsa); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_X550EM_a: #if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT) ret = ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwgid, tsa, map); break; #endif default: break; } return ret; } /** * ixgbe_dcb_config_pfc_cee - Config priority flow control * @hw: pointer to hardware structure * @dcb_config: pointer to ixgbe_dcb_config structure * * Configure Priority Flow Control for each traffic class. */ s32 ixgbe_dcb_config_pfc_cee(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config) { s32 ret = IXGBE_NOT_IMPLEMENTED; u8 pfc_en; u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map); ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en); switch (hw->mac.type) { case ixgbe_mac_82598EB: ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_X550EM_a: #if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT) ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, map); break; #endif default: break; } return ret; } /** * ixgbe_dcb_config_tc_stats - Config traffic class statistics * @hw: pointer to hardware structure * * Configure queue statistics registers, all queues belonging to same traffic * class uses a single set of queue statistics counters. */ s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *hw) { s32 ret = IXGBE_NOT_IMPLEMENTED; switch (hw->mac.type) { case ixgbe_mac_82598EB: ret = ixgbe_dcb_config_tc_stats_82598(hw); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_X550EM_a: #if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT) ret = ixgbe_dcb_config_tc_stats_82599(hw, NULL); break; #endif default: break; } return ret; } /** * ixgbe_dcb_hw_config_cee - Config and enable DCB * @hw: pointer to hardware structure * @dcb_config: pointer to ixgbe_dcb_config structure * * Configure dcb settings and enable dcb mode. */ s32 ixgbe_dcb_hw_config_cee(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config) { s32 ret = IXGBE_NOT_IMPLEMENTED; u8 pfc_en; u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS]; u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS]; u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS]; u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Unpack CEE standard containers */ ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill); ixgbe_dcb_unpack_max_cee(dcb_config, max); ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid); ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa); ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map); hw->mac.ops.setup_rxpba(hw, dcb_config->num_tcs.pg_tcs, 0, dcb_config->rx_pba_cfg); switch (hw->mac.type) { case ixgbe_mac_82598EB: ret = ixgbe_dcb_hw_config_82598(hw, dcb_config->link_speed, refill, max, bwgid, tsa); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_X550EM_a: #if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT) ixgbe_dcb_config_82599(hw, dcb_config); ret = ixgbe_dcb_hw_config_82599(hw, dcb_config->link_speed, refill, max, bwgid, tsa, map); ixgbe_dcb_config_tc_stats_82599(hw, dcb_config); break; #endif default: break; } if (!ret && dcb_config->pfc_mode_enable) { ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en); ret = ixgbe_dcb_config_pfc(hw, pfc_en, map); } return ret; } /* Helper routines to abstract HW specifics from DCB netlink ops */ s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *hw, u8 pfc_en, u8 *map) { int ret = IXGBE_ERR_PARAM; switch (hw->mac.type) { case ixgbe_mac_82598EB: ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_X550EM_a: #if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT) ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, map); break; #endif default: break; } return ret; } s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa, u8 *map) { switch (hw->mac.type) { case ixgbe_mac_82598EB: ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa); ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id, tsa); ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id, tsa); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_X550EM_a: #if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT) ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, tsa, map); ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, tsa); ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, tsa, map); break; #endif default: break; } return 0; } diff --git a/sys/dev/ixgbe/ixgbe_dcb_82599.c b/sys/dev/ixgbe/ixgbe_dcb_82599.c index 3056cbf5c2f3..c5fc42e23649 100644 --- a/sys/dev/ixgbe/ixgbe_dcb_82599.c +++ b/sys/dev/ixgbe/ixgbe_dcb_82599.c @@ -1,612 +1,612 @@ /****************************************************************************** SPDX-License-Identifier: BSD-3-Clause Copyright (c) 2001-2017, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "ixgbe_type.h" #include "ixgbe_dcb.h" #include "ixgbe_dcb_82599.h" /** * ixgbe_dcb_get_tc_stats_82599 - Returns status for each traffic class * @hw: pointer to hardware structure * @stats: pointer to statistics structure * @tc_count: Number of elements in bwg_array. * * This function returns the status data for each of the Traffic Classes in use. */ s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats, u8 tc_count) { int tc; DEBUGFUNC("dcb_get_tc_stats"); if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS) return IXGBE_ERR_PARAM; /* Statistics pertaining to each traffic class */ for (tc = 0; tc < tc_count; tc++) { /* Transmitted Packets */ stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc)); /* Transmitted Bytes (read low first to prevent missed carry) */ stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(tc)); stats->qbtc[tc] += (((u64)(IXGBE_READ_REG(hw, IXGBE_QBTC_H(tc)))) << 32); /* Received Packets */ stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc)); /* Received Bytes (read low first to prevent missed carry) */ stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(tc)); stats->qbrc[tc] += (((u64)(IXGBE_READ_REG(hw, IXGBE_QBRC_H(tc)))) << 32); /* Received Dropped Packet */ stats->qprdc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRDC(tc)); } return IXGBE_SUCCESS; } /** * ixgbe_dcb_get_pfc_stats_82599 - Return CBFC status data * @hw: pointer to hardware structure * @stats: pointer to statistics structure * @tc_count: Number of elements in bwg_array. * * This function returns the CBFC status data for each of the Traffic Classes. */ s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats, u8 tc_count) { int tc; DEBUGFUNC("dcb_get_pfc_stats"); if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS) return IXGBE_ERR_PARAM; for (tc = 0; tc < tc_count; tc++) { /* Priority XOFF Transmitted */ stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc)); /* Priority XOFF Received */ stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(tc)); } return IXGBE_SUCCESS; } /** * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter * @hw: pointer to hardware structure * @refill: refill credits index by traffic class * @max: max credits index by traffic class * @bwg_id: bandwidth grouping indexed by traffic class * @tsa: transmission selection algorithm indexed by traffic class * @map: priority to tc assignments indexed by priority * * Configure Rx Packet Arbiter and credits for each traffic class. */ s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa, u8 *map) { u32 reg = 0; u32 credit_refill = 0; u32 credit_max = 0; u8 i = 0; /* * Disable the arbiter before changing parameters * (always enable recycle mode; WSP) */ reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS; IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg); /* * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding * bits sets for the UPs that needs to be mappped to that TC. * e.g if priorities 6 and 7 are to be mapped to a TC then the * up_to_tc_bitmap value for that TC will be 11000000 in binary. */ reg = 0; for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) reg |= (map[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT)); IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg); /* Configure traffic class credits and priority */ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { credit_refill = refill[i]; credit_max = max[i]; reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT); reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT; if (tsa[i] == ixgbe_dcb_tsa_strict) reg |= IXGBE_RTRPT4C_LSP; IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg); } /* * Configure Rx packet plane (recycle mode; WSP) and * enable arbiter */ reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC; IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg); return IXGBE_SUCCESS; } /** * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter * @hw: pointer to hardware structure * @refill: refill credits index by traffic class * @max: max credits index by traffic class * @bwg_id: bandwidth grouping indexed by traffic class * @tsa: transmission selection algorithm indexed by traffic class * * Configure Tx Descriptor Arbiter and credits for each traffic class. */ s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa) { u32 reg, max_credits; u8 i; /* Clear the per-Tx queue credits; we use per-TC instead */ for (i = 0; i < 128; i++) { IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); IXGBE_WRITE_REG(hw, IXGBE_RTTDT1C, 0); } /* Configure traffic class credits and priority */ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { max_credits = max[i]; reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT; reg |= (u32)(refill[i]); reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT; if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee) reg |= IXGBE_RTTDT2C_GSP; if (tsa[i] == ixgbe_dcb_tsa_strict) reg |= IXGBE_RTTDT2C_LSP; IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg); } /* * Configure Tx descriptor plane (recycle mode; WSP) and * enable arbiter */ reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM; IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); return IXGBE_SUCCESS; } /** * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter * @hw: pointer to hardware structure * @refill: refill credits index by traffic class * @max: max credits index by traffic class * @bwg_id: bandwidth grouping indexed by traffic class * @tsa: transmission selection algorithm indexed by traffic class * @map: priority to tc assignments indexed by priority * * Configure Tx Packet Arbiter and credits for each traffic class. */ s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa, u8 *map) { u32 reg; u8 i; /* * Disable the arbiter before changing parameters * (always enable recycle mode; SP; arb delay) */ reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM | (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT) | IXGBE_RTTPCS_ARBDIS; IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg); /* * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding * bits sets for the UPs that needs to be mappped to that TC. * e.g if priorities 6 and 7 are to be mapped to a TC then the * up_to_tc_bitmap value for that TC will be 11000000 in binary. */ reg = 0; for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) reg |= (map[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT)); IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg); /* Configure traffic class credits and priority */ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { reg = refill[i]; reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT; reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT; if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee) reg |= IXGBE_RTTPT2C_GSP; if (tsa[i] == ixgbe_dcb_tsa_strict) reg |= IXGBE_RTTPT2C_LSP; IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg); } /* * Configure Tx packet plane (recycle mode; SP; arb delay) and * enable arbiter */ reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM | (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT); IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg); return IXGBE_SUCCESS; } /** * ixgbe_dcb_config_pfc_82599 - Configure priority flow control * @hw: pointer to hardware structure * @pfc_en: enabled pfc bitmask * @map: priority to tc assignments indexed by priority * * Configure Priority Flow Control (PFC) for each traffic class. */ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *map) { u32 i, j, fcrtl, reg; u8 max_tc = 0; /* Enable Transmit Priority Flow Control */ IXGBE_WRITE_REG(hw, IXGBE_FCCFG, IXGBE_FCCFG_TFCE_PRIORITY); /* Enable Receive Priority Flow Control */ reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); reg |= IXGBE_MFLCN_DPF; /* * X540 supports per TC Rx priority flow control. So * clear all TCs and only enable those that should be * enabled. */ reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE); if (hw->mac.type >= ixgbe_mac_X540) reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT; if (pfc_en) reg |= IXGBE_MFLCN_RPFCE; IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg); for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) { if (map[i] > max_tc) max_tc = map[i]; } /* Configure PFC Tx thresholds per TC */ for (i = 0; i <= max_tc; i++) { int enabled = 0; for (j = 0; j < IXGBE_DCB_MAX_USER_PRIORITY; j++) { if ((map[j] == i) && (pfc_en & (1 << j))) { enabled = 1; break; } } if (enabled) { reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN; fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl); } else { /* * In order to prevent Tx hangs when the internal Tx * switch is enabled we must set the high water mark * to the Rx packet buffer size - 24KB. This allows * the Tx switch to function even under heavy Rx * workloads. */ reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576; IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); } IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg); } for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), 0); } /* Configure pause time (2 TCs per register) */ reg = hw->fc.pause_time | (hw->fc.pause_time << 16); for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); /* Configure flow control refresh threshold value */ IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); return IXGBE_SUCCESS; } /** * ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics * @hw: pointer to hardware structure * @dcb_config: pointer to ixgbe_dcb_config structure * * Configure queue statistics registers, all queues belonging to same traffic * class uses a single set of queue statistics counters. */ s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config) { u32 reg = 0; u8 i = 0; u8 tc_count = 8; - bool vt_mode = FALSE; + bool vt_mode = false; if (dcb_config != NULL) { tc_count = dcb_config->num_tcs.pg_tcs; vt_mode = dcb_config->vt_mode; } - if (!((tc_count == 8 && vt_mode == FALSE) || tc_count == 4)) + if (!((tc_count == 8 && vt_mode == false) || tc_count == 4)) return IXGBE_ERR_PARAM; - if (tc_count == 8 && vt_mode == FALSE) { + if (tc_count == 8 && vt_mode == false) { /* * Receive Queues stats setting * 32 RQSMR registers, each configuring 4 queues. * * Set all 16 queues of each TC to the same stat * with TC 'n' going to stat 'n'. */ for (i = 0; i < 32; i++) { reg = 0x01010101 * (i / 4); IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg); } /* * Transmit Queues stats setting * 32 TQSM registers, each controlling 4 queues. * * Set all queues of each TC to the same stat * with TC 'n' going to stat 'n'. * Tx queues are allocated non-uniformly to TCs: * 32, 32, 16, 16, 8, 8, 8, 8. */ for (i = 0; i < 32; i++) { if (i < 8) reg = 0x00000000; else if (i < 16) reg = 0x01010101; else if (i < 20) reg = 0x02020202; else if (i < 24) reg = 0x03030303; else if (i < 26) reg = 0x04040404; else if (i < 28) reg = 0x05050505; else if (i < 30) reg = 0x06060606; else reg = 0x07070707; IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg); } - } else if (tc_count == 4 && vt_mode == FALSE) { + } else if (tc_count == 4 && vt_mode == false) { /* * Receive Queues stats setting * 32 RQSMR registers, each configuring 4 queues. * * Set all 16 queues of each TC to the same stat * with TC 'n' going to stat 'n'. */ for (i = 0; i < 32; i++) { if (i % 8 > 3) /* In 4 TC mode, odd 16-queue ranges are * not used. */ continue; reg = 0x01010101 * (i / 8); IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg); } /* * Transmit Queues stats setting * 32 TQSM registers, each controlling 4 queues. * * Set all queues of each TC to the same stat * with TC 'n' going to stat 'n'. * Tx queues are allocated non-uniformly to TCs: * 64, 32, 16, 16. */ for (i = 0; i < 32; i++) { if (i < 16) reg = 0x00000000; else if (i < 24) reg = 0x01010101; else if (i < 28) reg = 0x02020202; else reg = 0x03030303; IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg); } - } else if (tc_count == 4 && vt_mode == TRUE) { + } else if (tc_count == 4 && vt_mode == true) { /* * Receive Queues stats setting * 32 RQSMR registers, each configuring 4 queues. * * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each * pool. Set all 32 queues of each TC across pools to the same * stat with TC 'n' going to stat 'n'. */ for (i = 0; i < 32; i++) IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0x03020100); /* * Transmit Queues stats setting * 32 TQSM registers, each controlling 4 queues. * * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each * pool. Set all 32 queues of each TC across pools to the same * stat with TC 'n' going to stat 'n'. */ for (i = 0; i < 32; i++) IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0x03020100); } return IXGBE_SUCCESS; } /** * ixgbe_dcb_config_82599 - Configure general DCB parameters * @hw: pointer to hardware structure * @dcb_config: pointer to ixgbe_dcb_config structure * * Configure general DCB parameters. */ s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config) { u32 reg; u32 q; /* Disable the Tx desc arbiter so that MTQC can be changed */ reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS); reg |= IXGBE_RTTDCS_ARBDIS; IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); reg = IXGBE_READ_REG(hw, IXGBE_MRQC); if (dcb_config->num_tcs.pg_tcs == 8) { /* Enable DCB for Rx with 8 TCs */ switch (reg & IXGBE_MRQC_MRQE_MASK) { case 0: case IXGBE_MRQC_RT4TCEN: /* RSS disabled cases */ reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_RT8TCEN; break; case IXGBE_MRQC_RSSEN: case IXGBE_MRQC_RTRSS4TCEN: /* RSS enabled cases */ reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_RTRSS8TCEN; break; default: /* * Unsupported value, assume stale data, * overwrite no RSS */ ASSERT(0); reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_RT8TCEN; } } if (dcb_config->num_tcs.pg_tcs == 4) { /* We support both VT-on and VT-off with 4 TCs. */ if (dcb_config->vt_mode) reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_VMDQRT4TCEN; else reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_RTRSS4TCEN; } IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg); /* Enable DCB for Tx with 8 TCs */ if (dcb_config->num_tcs.pg_tcs == 8) reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; else { /* We support both VT-on and VT-off with 4 TCs. */ reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; if (dcb_config->vt_mode) reg |= IXGBE_MTQC_VT_ENA; } IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg); /* Disable drop for all queues */ for (q = 0; q < 128; q++) IXGBE_WRITE_REG(hw, IXGBE_QDE, (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT))); /* Enable the Tx desc arbiter */ reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS); reg &= ~IXGBE_RTTDCS_ARBDIS; IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); /* Enable Security TX Buffer IFG for DCB */ reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); reg |= IXGBE_SECTX_DCB; IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg); return IXGBE_SUCCESS; } /** * ixgbe_dcb_hw_config_82599 - Configure and enable DCB * @hw: pointer to hardware structure * @link_speed: unused * @refill: refill credits index by traffic class * @max: max credits index by traffic class * @bwg_id: bandwidth grouping indexed by traffic class * @tsa: transmission selection algorithm indexed by traffic class * @map: priority to tc assignments indexed by priority * * Configure dcb settings and enable dcb mode. */ s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, int link_speed, u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa, u8 *map) { UNREFERENCED_1PARAMETER(link_speed); ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, tsa, map); ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, tsa); ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, tsa, map); return IXGBE_SUCCESS; } diff --git a/sys/dev/ixgbe/ixgbe_mbx.c b/sys/dev/ixgbe/ixgbe_mbx.c index f9cbd0e4ba3d..26e7e5deaf05 100644 --- a/sys/dev/ixgbe/ixgbe_mbx.c +++ b/sys/dev/ixgbe/ixgbe_mbx.c @@ -1,660 +1,660 @@ /****************************************************************************** SPDX-License-Identifier: BSD-3-Clause Copyright (c) 2001-2017, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "ixgbe_type.h" #include "ixgbe_mbx.h" /** * ixgbe_poll_for_msg - Wait for message notification * @hw: pointer to the HW structure * @mbx_id: id of mailbox to write * * returns SUCCESS if it successfully received a message notification **/ static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; int countdown = mbx->timeout; DEBUGFUNC("ixgbe_poll_for_msg"); if (!countdown || !mbx->ops.check_for_msg) goto out; while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) { countdown--; if (!countdown) break; usec_delay(mbx->usec_delay); } if (countdown == 0) ERROR_REPORT2(IXGBE_ERROR_POLLING, "Polling for VF%d mailbox message timedout", mbx_id); out: return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX; } /** * ixgbe_poll_for_ack - Wait for message acknowledgement * @hw: pointer to the HW structure * @mbx_id: id of mailbox to write * * returns SUCCESS if it successfully received a message acknowledgement **/ static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; int countdown = mbx->timeout; DEBUGFUNC("ixgbe_poll_for_ack"); if (!countdown || !mbx->ops.check_for_ack) goto out; while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) { countdown--; if (!countdown) break; usec_delay(mbx->usec_delay); } if (countdown == 0) ERROR_REPORT2(IXGBE_ERROR_POLLING, "Polling for VF%d mailbox ack timedout", mbx_id); out: return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX; } /** * ixgbe_read_posted_mbx - Wait for message notification and receive message * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * @mbx_id: id of mailbox to write * * returns SUCCESS if it successfully received a message notification and * copied it into the receive buffer. **/ static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; s32 ret_val = IXGBE_ERR_MBX; DEBUGFUNC("ixgbe_read_posted_mbx"); if (!mbx->ops.read) goto out; ret_val = ixgbe_poll_for_msg(hw, mbx_id); /* if ack received read message, otherwise we timed out */ if (!ret_val) ret_val = mbx->ops.read(hw, msg, size, mbx_id); out: return ret_val; } /** * ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * @mbx_id: id of mailbox to write * * returns SUCCESS if it successfully copied message into the buffer and * received an ack to that message within delay * timeout period **/ static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; s32 ret_val = IXGBE_ERR_MBX; DEBUGFUNC("ixgbe_write_posted_mbx"); /* exit if either we can't write or there isn't a defined timeout */ if (!mbx->ops.write || !mbx->timeout) goto out; /* send msg */ ret_val = mbx->ops.write(hw, msg, size, mbx_id); /* if msg sent wait until we receive an ack */ if (!ret_val) ret_val = ixgbe_poll_for_ack(hw, mbx_id); out: return ret_val; } /** * ixgbe_init_mbx_ops_generic - Initialize MB function pointers * @hw: pointer to the HW structure * * Setups up the mailbox read and write message function pointers **/ void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw) { struct ixgbe_mbx_info *mbx = &hw->mbx; mbx->ops.read_posted = ixgbe_read_posted_mbx; mbx->ops.write_posted = ixgbe_write_posted_mbx; } /** * ixgbe_read_v2p_mailbox - read v2p mailbox * @hw: pointer to the HW structure * * This function is used to read the v2p mailbox without losing the read to * clear status bits. **/ static u32 ixgbe_read_v2p_mailbox(struct ixgbe_hw *hw) { u32 v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX); v2p_mailbox |= hw->mbx.v2p_mailbox; hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS; return v2p_mailbox; } /** * ixgbe_check_for_bit_vf - Determine if a status bit was set * @hw: pointer to the HW structure * @mask: bitmask for bits to be tested and cleared * * This function is used to check for the read to clear bits within * the V2P mailbox. **/ static s32 ixgbe_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask) { u32 v2p_mailbox = ixgbe_read_v2p_mailbox(hw); s32 ret_val = IXGBE_ERR_MBX; if (v2p_mailbox & mask) ret_val = IXGBE_SUCCESS; hw->mbx.v2p_mailbox &= ~mask; return ret_val; } /** * ixgbe_check_for_msg_vf - checks to see if the PF has sent mail * @hw: pointer to the HW structure * @mbx_id: id of mailbox to check * * returns SUCCESS if the PF has set the Status bit or else ERR_MBX **/ static s32 ixgbe_check_for_msg_vf(struct ixgbe_hw *hw, u16 mbx_id) { s32 ret_val = IXGBE_ERR_MBX; UNREFERENCED_1PARAMETER(mbx_id); DEBUGFUNC("ixgbe_check_for_msg_vf"); if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) { ret_val = IXGBE_SUCCESS; hw->mbx.stats.reqs++; } return ret_val; } /** * ixgbe_check_for_ack_vf - checks to see if the PF has ACK'd * @hw: pointer to the HW structure * @mbx_id: id of mailbox to check * * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX **/ static s32 ixgbe_check_for_ack_vf(struct ixgbe_hw *hw, u16 mbx_id) { s32 ret_val = IXGBE_ERR_MBX; UNREFERENCED_1PARAMETER(mbx_id); DEBUGFUNC("ixgbe_check_for_ack_vf"); if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) { ret_val = IXGBE_SUCCESS; hw->mbx.stats.acks++; } return ret_val; } /** * ixgbe_check_for_rst_vf - checks to see if the PF has reset * @hw: pointer to the HW structure * @mbx_id: id of mailbox to check * - * returns TRUE if the PF has set the reset done bit or else FALSE + * returns true if the PF has set the reset done bit or else false **/ static s32 ixgbe_check_for_rst_vf(struct ixgbe_hw *hw, u16 mbx_id) { s32 ret_val = IXGBE_ERR_MBX; UNREFERENCED_1PARAMETER(mbx_id); DEBUGFUNC("ixgbe_check_for_rst_vf"); if (!ixgbe_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD | IXGBE_VFMAILBOX_RSTI))) { ret_val = IXGBE_SUCCESS; hw->mbx.stats.rsts++; } return ret_val; } /** * ixgbe_obtain_mbx_lock_vf - obtain mailbox lock * @hw: pointer to the HW structure * * return SUCCESS if we obtained the mailbox lock **/ static s32 ixgbe_obtain_mbx_lock_vf(struct ixgbe_hw *hw) { s32 ret_val = IXGBE_ERR_MBX; DEBUGFUNC("ixgbe_obtain_mbx_lock_vf"); /* Take ownership of the buffer */ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU); /* reserve mailbox for vf use */ if (ixgbe_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU) ret_val = IXGBE_SUCCESS; return ret_val; } /** * ixgbe_write_mbx_vf - Write a message to the mailbox * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * @mbx_id: id of mailbox to write * * returns SUCCESS if it successfully copied message into the buffer **/ static s32 ixgbe_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) { s32 ret_val; u16 i; UNREFERENCED_1PARAMETER(mbx_id); DEBUGFUNC("ixgbe_write_mbx_vf"); /* lock the mailbox to prevent pf/vf race condition */ ret_val = ixgbe_obtain_mbx_lock_vf(hw); if (ret_val) goto out_no_write; /* flush msg and acks as we are overwriting the message buffer */ ixgbe_check_for_msg_vf(hw, 0); ixgbe_check_for_ack_vf(hw, 0); /* copy the caller specified message to the mailbox memory buffer */ for (i = 0; i < size; i++) IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]); /* update stats */ hw->mbx.stats.msgs_tx++; /* Drop VFU and interrupt the PF to tell it a message has been sent */ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ); out_no_write: return ret_val; } /** * ixgbe_read_mbx_vf - Reads a message from the inbox intended for vf * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * @mbx_id: id of mailbox to read * * returns SUCCESS if it successfully read message from buffer **/ static s32 ixgbe_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) { s32 ret_val = IXGBE_SUCCESS; u16 i; DEBUGFUNC("ixgbe_read_mbx_vf"); UNREFERENCED_1PARAMETER(mbx_id); /* lock the mailbox to prevent pf/vf race condition */ ret_val = ixgbe_obtain_mbx_lock_vf(hw); if (ret_val) goto out_no_read; /* copy the message from the mailbox memory buffer */ for (i = 0; i < size; i++) msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i); /* Acknowledge receipt and release mailbox, then we're done */ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK); /* update stats */ hw->mbx.stats.msgs_rx++; out_no_read: return ret_val; } /** * ixgbe_init_mbx_params_vf - set initial values for vf mailbox * @hw: pointer to the HW structure * * Initializes the hw->mbx struct to correct values for vf mailbox */ void ixgbe_init_mbx_params_vf(struct ixgbe_hw *hw) { struct ixgbe_mbx_info *mbx = &hw->mbx; /* start mailbox as timed out and let the reset_hw call set the timeout * value to begin communications */ mbx->timeout = 0; mbx->usec_delay = IXGBE_VF_MBX_INIT_DELAY; mbx->size = IXGBE_VFMAILBOX_SIZE; mbx->ops.read = ixgbe_read_mbx_vf; mbx->ops.write = ixgbe_write_mbx_vf; mbx->ops.read_posted = ixgbe_read_posted_mbx; mbx->ops.write_posted = ixgbe_write_posted_mbx; mbx->ops.check_for_msg = ixgbe_check_for_msg_vf; mbx->ops.check_for_ack = ixgbe_check_for_ack_vf; mbx->ops.check_for_rst = ixgbe_check_for_rst_vf; mbx->stats.msgs_tx = 0; mbx->stats.msgs_rx = 0; mbx->stats.reqs = 0; mbx->stats.acks = 0; mbx->stats.rsts = 0; } static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index) { u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index)); s32 ret_val = IXGBE_ERR_MBX; if (mbvficr & mask) { ret_val = IXGBE_SUCCESS; IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask); } return ret_val; } /** * ixgbe_check_for_msg_pf - checks to see if the VF has sent mail * @hw: pointer to the HW structure * @vf_number: the VF index * * returns SUCCESS if the VF has set the Status bit or else ERR_MBX **/ static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number) { s32 ret_val = IXGBE_ERR_MBX; s32 index = IXGBE_MBVFICR_INDEX(vf_number); u32 vf_bit = vf_number % 16; DEBUGFUNC("ixgbe_check_for_msg_pf"); if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit, index)) { ret_val = IXGBE_SUCCESS; hw->mbx.stats.reqs++; } return ret_val; } /** * ixgbe_check_for_ack_pf - checks to see if the VF has ACKed * @hw: pointer to the HW structure * @vf_number: the VF index * * returns SUCCESS if the VF has set the Status bit or else ERR_MBX **/ static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number) { s32 ret_val = IXGBE_ERR_MBX; s32 index = IXGBE_MBVFICR_INDEX(vf_number); u32 vf_bit = vf_number % 16; DEBUGFUNC("ixgbe_check_for_ack_pf"); if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit, index)) { ret_val = IXGBE_SUCCESS; hw->mbx.stats.acks++; } return ret_val; } /** * ixgbe_check_for_rst_pf - checks to see if the VF has reset * @hw: pointer to the HW structure * @vf_number: the VF index * * returns SUCCESS if the VF has set the Status bit or else ERR_MBX **/ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number) { u32 reg_offset = (vf_number < 32) ? 0 : 1; u32 vf_shift = vf_number % 32; u32 vflre = 0; s32 ret_val = IXGBE_ERR_MBX; DEBUGFUNC("ixgbe_check_for_rst_pf"); switch (hw->mac.type) { case ixgbe_mac_82599EB: vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset)); break; case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_X550EM_a: case ixgbe_mac_X540: vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset)); break; default: break; } if (vflre & (1 << vf_shift)) { ret_val = IXGBE_SUCCESS; IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift)); hw->mbx.stats.rsts++; } return ret_val; } /** * ixgbe_obtain_mbx_lock_pf - obtain mailbox lock * @hw: pointer to the HW structure * @vf_number: the VF index * * return SUCCESS if we obtained the mailbox lock **/ static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number) { s32 ret_val = IXGBE_ERR_MBX; u32 p2v_mailbox; DEBUGFUNC("ixgbe_obtain_mbx_lock_pf"); /* Take ownership of the buffer */ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU); /* reserve mailbox for vf use */ p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number)); if (p2v_mailbox & IXGBE_PFMAILBOX_PFU) ret_val = IXGBE_SUCCESS; else ERROR_REPORT2(IXGBE_ERROR_POLLING, "Failed to obtain mailbox lock for VF%d", vf_number); return ret_val; } /** * ixgbe_write_mbx_pf - Places a message in the mailbox * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * @vf_number: the VF index * * returns SUCCESS if it successfully copied message into the buffer **/ static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 vf_number) { s32 ret_val; u16 i; DEBUGFUNC("ixgbe_write_mbx_pf"); /* lock the mailbox to prevent pf/vf race condition */ ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number); if (ret_val) goto out_no_write; /* flush msg and acks as we are overwriting the message buffer */ ixgbe_check_for_msg_pf(hw, vf_number); ixgbe_check_for_ack_pf(hw, vf_number); /* copy the caller specified message to the mailbox memory buffer */ for (i = 0; i < size; i++) IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]); /* Interrupt VF to tell it a message has been sent and release buffer*/ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS); /* update stats */ hw->mbx.stats.msgs_tx++; out_no_write: return ret_val; } /** * ixgbe_read_mbx_pf - Read a message from the mailbox * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * @vf_number: the VF index * * This function copies a message from the mailbox buffer to the caller's * memory buffer. The presumption is that the caller knows that there was * a message due to a VF request so no polling for message is needed. **/ static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 vf_number) { s32 ret_val; u16 i; DEBUGFUNC("ixgbe_read_mbx_pf"); /* lock the mailbox to prevent pf/vf race condition */ ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number); if (ret_val) goto out_no_read; /* copy the message to the mailbox memory buffer */ for (i = 0; i < size; i++) msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i); /* Acknowledge the message and release buffer */ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK); /* update stats */ hw->mbx.stats.msgs_rx++; out_no_read: return ret_val; } /** * ixgbe_init_mbx_params_pf - set initial values for pf mailbox * @hw: pointer to the HW structure * * Initializes the hw->mbx struct to correct values for pf mailbox */ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw) { struct ixgbe_mbx_info *mbx = &hw->mbx; if (hw->mac.type != ixgbe_mac_82599EB && hw->mac.type != ixgbe_mac_X550 && hw->mac.type != ixgbe_mac_X550EM_x && hw->mac.type != ixgbe_mac_X550EM_a && hw->mac.type != ixgbe_mac_X540) return; mbx->timeout = 0; mbx->usec_delay = 0; mbx->size = IXGBE_VFMAILBOX_SIZE; mbx->ops.read = ixgbe_read_mbx_pf; mbx->ops.write = ixgbe_write_mbx_pf; mbx->ops.read_posted = ixgbe_read_posted_mbx; mbx->ops.write_posted = ixgbe_write_posted_mbx; mbx->ops.check_for_msg = ixgbe_check_for_msg_pf; mbx->ops.check_for_ack = ixgbe_check_for_ack_pf; mbx->ops.check_for_rst = ixgbe_check_for_rst_pf; mbx->stats.msgs_tx = 0; mbx->stats.msgs_rx = 0; mbx->stats.reqs = 0; mbx->stats.acks = 0; mbx->stats.rsts = 0; } diff --git a/sys/dev/ixgbe/ixgbe_mbx.h b/sys/dev/ixgbe/ixgbe_mbx.h index 3065d22567f3..44ccc071f311 100644 --- a/sys/dev/ixgbe/ixgbe_mbx.h +++ b/sys/dev/ixgbe/ixgbe_mbx.h @@ -1,160 +1,160 @@ /****************************************************************************** SPDX-License-Identifier: BSD-3-Clause Copyright (c) 2001-2017, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _IXGBE_MBX_H_ #define _IXGBE_MBX_H_ #include "ixgbe_type.h" #define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ #define IXGBE_ERR_MBX -100 #define IXGBE_VFMAILBOX 0x002FC #define IXGBE_VFMBMEM 0x00200 /* Define mailbox register bits */ #define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ #define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */ #define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ #define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ #define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ #define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ #define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */ #define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ #define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */ #define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */ #define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ #define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ #define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ #define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ #define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */ #define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ #define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */ #define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ /* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the - * PF. The reverse is TRUE if it is IXGBE_PF_*. + * PF. The reverse is true if it is IXGBE_PF_*. * Message ACK's are the value or'd with 0xF0000000 */ #define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with * this are the ACK */ #define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with * this are the NACK */ #define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still * clear to send requests */ #define IXGBE_VT_MSGINFO_SHIFT 16 /* bits 23:16 are used for extra info for certain messages */ #define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) /* definitions to support mailbox API version negotiation */ /* * each element denotes a version of the API; existing numbers may not * change; any additions must go at the end */ enum ixgbe_pfvf_api_rev { ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */ ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */ ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */ /* This value should always be last */ ixgbe_mbox_api_unknown, /* indicates that API version is not known */ }; /* mailbox API, legacy requests */ #define IXGBE_VF_RESET 0x01 /* VF requests reset */ #define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ #define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ #define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ /* mailbox API, version 1.0 VF requests */ #define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ #define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */ #define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */ /* mailbox API, version 1.1 VF requests */ #define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */ /* mailbox API, version 1.2 VF requests */ #define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */ #define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */ #define IXGBE_VF_UPDATE_XCAST_MODE 0x0c /* mode choices for IXGBE_VF_UPDATE_XCAST_MODE */ enum ixgbevf_xcast_modes { IXGBEVF_XCAST_MODE_NONE = 0, IXGBEVF_XCAST_MODE_MULTI, IXGBEVF_XCAST_MODE_ALLMULTI, IXGBEVF_XCAST_MODE_PROMISC, }; /* GET_QUEUES return data indices within the mailbox */ #define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */ #define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */ #define IXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */ #define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */ /* length of permanent address message returned from PF */ #define IXGBE_VF_PERMADDR_MSG_LEN 4 /* word in permanent address message with the current multicast type */ #define IXGBE_VF_MC_TYPE_WORD 3 #define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */ /* mailbox API, version 2.0 VF requests */ #define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */ #define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */ #define IXGBE_VF_ENABLE_MACADDR 0x0A /* enable MAC address */ #define IXGBE_VF_DISABLE_MACADDR 0x0B /* disable MAC address */ #define IXGBE_VF_GET_MACADDRS 0x0C /* get all configured MAC addrs */ #define IXGBE_VF_SET_MCAST_PROMISC 0x0D /* enable multicast promiscuous */ #define IXGBE_VF_GET_MTU 0x0E /* get bounds on MTU */ #define IXGBE_VF_SET_MTU 0x0F /* set a specific MTU */ /* mailbox API, version 2.0 PF requests */ #define IXGBE_PF_TRANSPARENT_VLAN 0x0101 /* enable transparent vlan */ #define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ #define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw); void ixgbe_init_mbx_params_vf(struct ixgbe_hw *); void ixgbe_init_mbx_params_pf(struct ixgbe_hw *); #endif /* _IXGBE_MBX_H_ */ diff --git a/sys/dev/ixgbe/ixgbe_osdep.h b/sys/dev/ixgbe/ixgbe_osdep.h index e1b964400988..281c221b59f0 100644 --- a/sys/dev/ixgbe/ixgbe_osdep.h +++ b/sys/dev/ixgbe/ixgbe_osdep.h @@ -1,241 +1,237 @@ /****************************************************************************** SPDX-License-Identifier: BSD-3-Clause Copyright (c) 2001-2017, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _IXGBE_OSDEP_H_ #define _IXGBE_OSDEP_H_ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define ASSERT(x) if(!(x)) panic("IXGBE: x") #define EWARN(H, W) printf(W) enum { IXGBE_ERROR_SOFTWARE, IXGBE_ERROR_POLLING, IXGBE_ERROR_INVALID_STATE, IXGBE_ERROR_UNSUPPORTED, IXGBE_ERROR_ARGUMENT, IXGBE_ERROR_CAUTION, }; /* The happy-fun DELAY macro is defined in /usr/src/sys/i386/include/clock.h */ #define usec_delay(x) DELAY(x) #define msec_delay(x) DELAY(1000*(x)) #define DBG 0 #define MSGOUT(S, A, B) printf(S "\n", A, B) #define DEBUGFUNC(F) DEBUGOUT(F); #if DBG #define DEBUGOUT(S) printf(S "\n") #define DEBUGOUT1(S,A) printf(S "\n",A) #define DEBUGOUT2(S,A,B) printf(S "\n",A,B) #define DEBUGOUT3(S,A,B,C) printf(S "\n",A,B,C) #define DEBUGOUT4(S,A,B,C,D) printf(S "\n",A,B,C,D) #define DEBUGOUT5(S,A,B,C,D,E) printf(S "\n",A,B,C,D,E) #define DEBUGOUT6(S,A,B,C,D,E,F) printf(S "\n",A,B,C,D,E,F) #define DEBUGOUT7(S,A,B,C,D,E,F,G) printf(S "\n",A,B,C,D,E,F,G) #define ERROR_REPORT1 ERROR_REPORT #define ERROR_REPORT2 ERROR_REPORT #define ERROR_REPORT3 ERROR_REPORT #define ERROR_REPORT(level, format, arg...) do { \ switch (level) { \ case IXGBE_ERROR_SOFTWARE: \ case IXGBE_ERROR_CAUTION: \ case IXGBE_ERROR_POLLING: \ case IXGBE_ERROR_INVALID_STATE: \ case IXGBE_ERROR_UNSUPPORTED: \ case IXGBE_ERROR_ARGUMENT: \ device_printf(ixgbe_dev_from_hw(hw), format, ## arg); \ break; \ default: \ break; \ } \ } while (0) #else #define DEBUGOUT(S) #define DEBUGOUT1(S,A) #define DEBUGOUT2(S,A,B) #define DEBUGOUT3(S,A,B,C) #define DEBUGOUT4(S,A,B,C,D) #define DEBUGOUT5(S,A,B,C,D,E) #define DEBUGOUT6(S,A,B,C,D,E,F) #define DEBUGOUT7(S,A,B,C,D,E,F,G) #define ERROR_REPORT1(S,A) #define ERROR_REPORT2(S,A,B) #define ERROR_REPORT3(S,A,B,C) #endif -#define FALSE 0 -#define false 0 /* shared code requires this */ -#define TRUE 1 -#define true 1 #define CMD_MEM_WRT_INVALIDATE 0x0010 /* BIT_4 */ #define PCI_COMMAND_REGISTER PCIR_COMMAND /* Shared code dropped this define.. */ #define IXGBE_INTEL_VENDOR_ID 0x8086 /* Bunch of defines for shared code bogosity */ #define UNREFERENCED_PARAMETER(_p) #define UNREFERENCED_1PARAMETER(_p) #define UNREFERENCED_2PARAMETER(_p, _q) #define UNREFERENCED_3PARAMETER(_p, _q, _r) #define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) #define IXGBE_NTOHL(_i) ntohl(_i) #define IXGBE_NTOHS(_i) ntohs(_i) /* XXX these need to be revisited */ #define IXGBE_CPU_TO_LE16 htole16 #define IXGBE_CPU_TO_LE32 htole32 #define IXGBE_LE32_TO_CPU le32toh #define IXGBE_LE32_TO_CPUS(x) #define IXGBE_CPU_TO_BE16 htobe16 #define IXGBE_CPU_TO_BE32 htobe32 #define IXGBE_BE32_TO_CPU be32toh typedef uint8_t u8; typedef int8_t s8; typedef uint16_t u16; typedef int16_t s16; typedef uint32_t u32; typedef int32_t s32; typedef uint64_t u64; #ifndef __bool_true_false_are_defined typedef boolean_t bool; #endif /* shared code requires this */ #define __le16 u16 #define __le32 u32 #define __le64 u64 #define __be16 u16 #define __be32 u32 #define __be64 u64 #define le16_to_cpu #if __FreeBSD_version < 800000 #if defined(__i386__) || defined(__amd64__) #define mb() __asm volatile("mfence" ::: "memory") #define wmb() __asm volatile("sfence" ::: "memory") #define rmb() __asm volatile("lfence" ::: "memory") #else #define mb() #define rmb() #define wmb() #endif #endif #if defined(__i386__) || defined(__amd64__) static __inline void prefetch(void *x) { __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); } #else #define prefetch(x) #endif /* * Optimized bcopy thanks to Luigi Rizzo's investigative work. Assumes * non-overlapping regions and 32-byte padding on both src and dst. */ static __inline int ixgbe_bcopy(void *restrict _src, void *restrict _dst, int l) { uint64_t *src = _src; uint64_t *dst = _dst; for (; l > 0; l -= 32) { *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; *dst++ = *src++; } return (0); } struct ixgbe_osdep { bus_space_tag_t mem_bus_space_tag; bus_space_handle_t mem_bus_space_handle; }; /* These routines need struct ixgbe_hw declared */ struct ixgbe_hw; /* These routines are needed by the shared code */ extern u16 ixgbe_read_pci_cfg(struct ixgbe_hw *, u32); #define IXGBE_READ_PCIE_WORD ixgbe_read_pci_cfg extern void ixgbe_write_pci_cfg(struct ixgbe_hw *, u32, u16); #define IXGBE_WRITE_PCIE_WORD ixgbe_write_pci_cfg #define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS) extern u32 ixgbe_read_reg(struct ixgbe_hw *, u32); #define IXGBE_READ_REG(a, reg) ixgbe_read_reg(a, reg) extern void ixgbe_write_reg(struct ixgbe_hw *, u32, u32); #define IXGBE_WRITE_REG(a, reg, val) ixgbe_write_reg(a, reg, val) extern u32 ixgbe_read_reg_array(struct ixgbe_hw *, u32, u32); #define IXGBE_READ_REG_ARRAY(a, reg, offset) \ ixgbe_read_reg_array(a, reg, offset) extern void ixgbe_write_reg_array(struct ixgbe_hw *, u32, u32, u32); #define IXGBE_WRITE_REG_ARRAY(a, reg, offset, val) \ ixgbe_write_reg_array(a, reg, offset, val) #endif /* _IXGBE_OSDEP_H_ */ diff --git a/sys/dev/ixgbe/ixgbe_phy.c b/sys/dev/ixgbe/ixgbe_phy.c index a9204e2ef236..28930ca3267d 100644 --- a/sys/dev/ixgbe/ixgbe_phy.c +++ b/sys/dev/ixgbe/ixgbe_phy.c @@ -1,2706 +1,2706 @@ /****************************************************************************** SPDX-License-Identifier: BSD-3-Clause Copyright (c) 2001-2017, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "ixgbe_api.h" #include "ixgbe_common.h" #include "ixgbe_phy.h" static void ixgbe_i2c_start(struct ixgbe_hw *hw); static void ixgbe_i2c_stop(struct ixgbe_hw *hw); static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data); static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data); static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw); static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data); static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data); static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data); static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl); static s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 *sff8472_data); /** * ixgbe_out_i2c_byte_ack - Send I2C byte with ack * @hw: pointer to the hardware structure * @byte: byte to send * * Returns an error code on error. */ static s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte) { s32 status; status = ixgbe_clock_out_i2c_byte(hw, byte); if (status) return status; return ixgbe_get_i2c_ack(hw); } /** * ixgbe_in_i2c_byte_ack - Receive an I2C byte and send ack * @hw: pointer to the hardware structure * @byte: pointer to a u8 to receive the byte * * Returns an error code on error. */ static s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte) { s32 status; status = ixgbe_clock_in_i2c_byte(hw, byte); if (status) return status; /* ACK */ - return ixgbe_clock_out_i2c_bit(hw, FALSE); + return ixgbe_clock_out_i2c_bit(hw, false); } /** * ixgbe_ones_comp_byte_add - Perform one's complement addition * @add1: addend 1 * @add2: addend 2 * * Returns one's complement 8-bit sum. */ static u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2) { u16 sum = add1 + add2; sum = (sum & 0xFF) + (sum >> 8); return sum & 0xFF; } /** * ixgbe_read_i2c_combined_generic_int - Perform I2C read combined operation * @hw: pointer to the hardware structure * @addr: I2C bus address to read from * @reg: I2C device register to read from * @val: pointer to location to receive read value - * @lock: TRUE if to take and release semaphore + * @lock: true if to take and release semaphore * * Returns an error code on error. */ s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val, bool lock) { u32 swfw_mask = hw->phy.phy_semaphore_mask; int max_retry = 3; int retry = 0; u8 csum_byte; u8 high_bits; u8 low_bits; u8 reg_high; u8 csum; reg_high = ((reg >> 7) & 0xFE) | 1; /* Indicate read combined */ csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF); csum = ~csum; do { if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) return IXGBE_ERR_SWFW_SYNC; ixgbe_i2c_start(hw); /* Device Address and write indication */ if (ixgbe_out_i2c_byte_ack(hw, addr)) goto fail; /* Write bits 14:8 */ if (ixgbe_out_i2c_byte_ack(hw, reg_high)) goto fail; /* Write bits 7:0 */ if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF)) goto fail; /* Write csum */ if (ixgbe_out_i2c_byte_ack(hw, csum)) goto fail; /* Re-start condition */ ixgbe_i2c_start(hw); /* Device Address and read indication */ if (ixgbe_out_i2c_byte_ack(hw, addr | 1)) goto fail; /* Get upper bits */ if (ixgbe_in_i2c_byte_ack(hw, &high_bits)) goto fail; /* Get low bits */ if (ixgbe_in_i2c_byte_ack(hw, &low_bits)) goto fail; /* Get csum */ if (ixgbe_clock_in_i2c_byte(hw, &csum_byte)) goto fail; /* NACK */ - if (ixgbe_clock_out_i2c_bit(hw, FALSE)) + if (ixgbe_clock_out_i2c_bit(hw, false)) goto fail; ixgbe_i2c_stop(hw); if (lock) hw->mac.ops.release_swfw_sync(hw, swfw_mask); *val = (high_bits << 8) | low_bits; return 0; fail: ixgbe_i2c_bus_clear(hw); if (lock) hw->mac.ops.release_swfw_sync(hw, swfw_mask); retry++; if (retry < max_retry) DEBUGOUT("I2C byte read combined error - Retrying.\n"); else DEBUGOUT("I2C byte read combined error.\n"); } while (retry < max_retry); return IXGBE_ERR_I2C; } /** * ixgbe_write_i2c_combined_generic_int - Perform I2C write combined operation * @hw: pointer to the hardware structure * @addr: I2C bus address to write to * @reg: I2C device register to write to * @val: value to write - * @lock: TRUE if to take and release semaphore + * @lock: true if to take and release semaphore * * Returns an error code on error. */ s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val, bool lock) { u32 swfw_mask = hw->phy.phy_semaphore_mask; int max_retry = 1; int retry = 0; u8 reg_high; u8 csum; reg_high = (reg >> 7) & 0xFE; /* Indicate write combined */ csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF); csum = ixgbe_ones_comp_byte_add(csum, val >> 8); csum = ixgbe_ones_comp_byte_add(csum, val & 0xFF); csum = ~csum; do { if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) return IXGBE_ERR_SWFW_SYNC; ixgbe_i2c_start(hw); /* Device Address and write indication */ if (ixgbe_out_i2c_byte_ack(hw, addr)) goto fail; /* Write bits 14:8 */ if (ixgbe_out_i2c_byte_ack(hw, reg_high)) goto fail; /* Write bits 7:0 */ if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF)) goto fail; /* Write data 15:8 */ if (ixgbe_out_i2c_byte_ack(hw, val >> 8)) goto fail; /* Write data 7:0 */ if (ixgbe_out_i2c_byte_ack(hw, val & 0xFF)) goto fail; /* Write csum */ if (ixgbe_out_i2c_byte_ack(hw, csum)) goto fail; ixgbe_i2c_stop(hw); if (lock) hw->mac.ops.release_swfw_sync(hw, swfw_mask); return 0; fail: ixgbe_i2c_bus_clear(hw); if (lock) hw->mac.ops.release_swfw_sync(hw, swfw_mask); retry++; if (retry < max_retry) DEBUGOUT("I2C byte write combined error - Retrying.\n"); else DEBUGOUT("I2C byte write combined error.\n"); } while (retry < max_retry); return IXGBE_ERR_I2C; } /** * ixgbe_init_phy_ops_generic - Inits PHY function ptrs * @hw: pointer to the hardware structure * * Initialize the function pointers. **/ s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw) { struct ixgbe_phy_info *phy = &hw->phy; DEBUGFUNC("ixgbe_init_phy_ops_generic"); /* PHY */ phy->ops.identify = ixgbe_identify_phy_generic; phy->ops.reset = ixgbe_reset_phy_generic; phy->ops.read_reg = ixgbe_read_phy_reg_generic; phy->ops.write_reg = ixgbe_write_phy_reg_generic; phy->ops.read_reg_mdi = ixgbe_read_phy_reg_mdi; phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi; phy->ops.setup_link = ixgbe_setup_phy_link_generic; phy->ops.setup_link_speed = ixgbe_setup_phy_link_speed_generic; phy->ops.check_link = NULL; phy->ops.get_firmware_version = ixgbe_get_phy_firmware_version_generic; phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_generic; phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_generic; phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_generic; phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_generic; phy->ops.write_i2c_eeprom = ixgbe_write_i2c_eeprom_generic; phy->ops.i2c_bus_clear = ixgbe_i2c_bus_clear; phy->ops.identify_sfp = ixgbe_identify_module_generic; phy->sfp_type = ixgbe_sfp_type_unknown; phy->ops.read_i2c_byte_unlocked = ixgbe_read_i2c_byte_generic_unlocked; phy->ops.write_i2c_byte_unlocked = ixgbe_write_i2c_byte_generic_unlocked; phy->ops.check_overtemp = ixgbe_tn_check_overtemp; return IXGBE_SUCCESS; } /** * ixgbe_probe_phy - Probe a single address for a PHY * @hw: pointer to hardware structure * @phy_addr: PHY address to probe * - * Returns TRUE if PHY found + * Returns true if PHY found */ static bool ixgbe_probe_phy(struct ixgbe_hw *hw, u16 phy_addr) { u16 ext_ability = 0; if (!ixgbe_validate_phy_addr(hw, phy_addr)) { DEBUGOUT1("Unable to validate PHY address 0x%04X\n", phy_addr); - return FALSE; + return false; } if (ixgbe_get_phy_id(hw)) - return FALSE; + return false; hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id); if (hw->phy.type == ixgbe_phy_unknown) { hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); if (ext_ability & (IXGBE_MDIO_PHY_10GBASET_ABILITY | IXGBE_MDIO_PHY_1000BASET_ABILITY)) hw->phy.type = ixgbe_phy_cu_unknown; else hw->phy.type = ixgbe_phy_generic; } - return TRUE; + return true; } /** * ixgbe_identify_phy_generic - Get physical layer module * @hw: pointer to hardware structure * * Determines the physical layer module found on the current adapter. **/ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) { s32 status = IXGBE_ERR_PHY_ADDR_INVALID; u16 phy_addr; DEBUGFUNC("ixgbe_identify_phy_generic"); if (!hw->phy.phy_semaphore_mask) { if (hw->bus.lan_id) hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; else hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; } if (hw->phy.type != ixgbe_phy_unknown) return IXGBE_SUCCESS; if (hw->phy.nw_mng_if_sel) { phy_addr = (hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >> IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT; if (ixgbe_probe_phy(hw, phy_addr)) return IXGBE_SUCCESS; else return IXGBE_ERR_PHY_ADDR_INVALID; } for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { if (ixgbe_probe_phy(hw, phy_addr)) { status = IXGBE_SUCCESS; break; } } /* Certain media types do not have a phy so an address will not * be found and the code will take this path. Caller has to * decide if it is an error or not. */ if (status != IXGBE_SUCCESS) hw->phy.addr = 0; return status; } /** * ixgbe_check_reset_blocked - check status of MNG FW veto bit * @hw: pointer to the hardware structure * * This function checks the MMNGC.MNG_VETO bit to see if there are * any constraints on link from manageability. For MAC's that don't * have this bit just return faluse since the link can not be blocked * via this method. **/ s32 ixgbe_check_reset_blocked(struct ixgbe_hw *hw) { u32 mmngc; DEBUGFUNC("ixgbe_check_reset_blocked"); /* If we don't have this bit, it can't be blocking */ if (hw->mac.type == ixgbe_mac_82598EB) - return FALSE; + return false; mmngc = IXGBE_READ_REG(hw, IXGBE_MMNGC); if (mmngc & IXGBE_MMNGC_MNG_VETO) { ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "MNG_VETO bit detected.\n"); - return TRUE; + return true; } - return FALSE; + return false; } /** * ixgbe_validate_phy_addr - Determines phy address is valid * @hw: pointer to hardware structure * @phy_addr: PHY address * **/ bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr) { u16 phy_id = 0; - bool valid = FALSE; + bool valid = false; DEBUGFUNC("ixgbe_validate_phy_addr"); hw->phy.addr = phy_addr; hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH, IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id); if (phy_id != 0xFFFF && phy_id != 0x0) - valid = TRUE; + valid = true; DEBUGOUT1("PHY ID HIGH is 0x%04X\n", phy_id); return valid; } /** * ixgbe_get_phy_id - Get the phy type * @hw: pointer to hardware structure * **/ s32 ixgbe_get_phy_id(struct ixgbe_hw *hw) { u32 status; u16 phy_id_high = 0; u16 phy_id_low = 0; DEBUGFUNC("ixgbe_get_phy_id"); status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH, IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id_high); if (status == IXGBE_SUCCESS) { hw->phy.id = (u32)(phy_id_high << 16); status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_LOW, IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id_low); hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK); hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK); } DEBUGOUT2("PHY_ID_HIGH 0x%04X, PHY_ID_LOW 0x%04X\n", phy_id_high, phy_id_low); return status; } /** * ixgbe_get_phy_type_from_id - Get the phy type * @phy_id: PHY ID information * **/ enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) { enum ixgbe_phy_type phy_type; DEBUGFUNC("ixgbe_get_phy_type_from_id"); switch (phy_id) { case TN1010_PHY_ID: phy_type = ixgbe_phy_tn; break; case X550_PHY_ID2: case X550_PHY_ID3: case X540_PHY_ID: phy_type = ixgbe_phy_aq; break; case QT2022_PHY_ID: phy_type = ixgbe_phy_qt; break; case ATH_PHY_ID: phy_type = ixgbe_phy_nl; break; case X557_PHY_ID: case X557_PHY_ID2: phy_type = ixgbe_phy_x550em_ext_t; break; case IXGBE_M88E1500_E_PHY_ID: case IXGBE_M88E1543_E_PHY_ID: phy_type = ixgbe_phy_ext_1g_t; break; default: phy_type = ixgbe_phy_unknown; break; } return phy_type; } /** * ixgbe_reset_phy_generic - Performs a PHY reset * @hw: pointer to hardware structure **/ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) { u32 i; u16 ctrl = 0; s32 status = IXGBE_SUCCESS; DEBUGFUNC("ixgbe_reset_phy_generic"); if (hw->phy.type == ixgbe_phy_unknown) status = ixgbe_identify_phy_generic(hw); if (status != IXGBE_SUCCESS || hw->phy.type == ixgbe_phy_none) goto out; /* Don't reset PHY if it's shut down due to overtemp. */ if (!hw->phy.reset_if_overtemp && (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw))) goto out; /* Blocked by MNG FW so bail */ if (ixgbe_check_reset_blocked(hw)) goto out; /* * Perform soft PHY reset to the PHY_XS. * This will cause a soft reset to the PHY */ hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, IXGBE_MDIO_PHY_XS_DEV_TYPE, IXGBE_MDIO_PHY_XS_RESET); /* * Poll for reset bit to self-clear indicating reset is complete. * Some PHYs could take up to 3 seconds to complete and need about * 1.7 usec delay after the reset is complete. */ for (i = 0; i < 30; i++) { msec_delay(100); if (hw->phy.type == ixgbe_phy_x550em_ext_t) { status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_TX_VENDOR_ALARMS_3, IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ctrl); if (status != IXGBE_SUCCESS) return status; if (ctrl & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) { usec_delay(2); break; } } else { status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, IXGBE_MDIO_PHY_XS_DEV_TYPE, &ctrl); if (status != IXGBE_SUCCESS) return status; if (!(ctrl & IXGBE_MDIO_PHY_XS_RESET)) { usec_delay(2); break; } } } if (ctrl & IXGBE_MDIO_PHY_XS_RESET) { status = IXGBE_ERR_RESET_FAILED; ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY reset polling failed to complete.\n"); } out: return status; } /** * ixgbe_read_phy_mdi - Reads a value from a specified PHY register without * the SWFW lock * @hw: pointer to hardware structure * @reg_addr: 32 bit address of PHY register to read * @device_type: 5 bit device type * @phy_data: Pointer to read data from PHY register **/ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 *phy_data) { u32 i, data, command; /* Setup and write the address cycle command */ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); /* * Check every 10 usec to see if the address cycle completed. * The MDI Command bit will clear when the operation is * complete */ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { usec_delay(10); command = IXGBE_READ_REG(hw, IXGBE_MSCA); if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) break; } if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY address command did not complete.\n"); DEBUGOUT("PHY address command did not complete, returning IXGBE_ERR_PHY\n"); return IXGBE_ERR_PHY; } /* * Address cycle complete, setup and write the read * command */ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND)); IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); /* * Check every 10 usec to see if the address cycle * completed. The MDI Command bit will clear when the * operation is complete */ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { usec_delay(10); command = IXGBE_READ_REG(hw, IXGBE_MSCA); if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) break; } if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY read command didn't complete\n"); DEBUGOUT("PHY read command didn't complete, returning IXGBE_ERR_PHY\n"); return IXGBE_ERR_PHY; } /* * Read operation is complete. Get the data * from MSRWD */ data = IXGBE_READ_REG(hw, IXGBE_MSRWD); data >>= IXGBE_MSRWD_READ_DATA_SHIFT; *phy_data = (u16)(data); return IXGBE_SUCCESS; } /** * ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register * using the SWFW lock - this function is needed in most cases * @hw: pointer to hardware structure * @reg_addr: 32 bit address of PHY register to read * @device_type: 5 bit device type * @phy_data: Pointer to read data from PHY register **/ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 *phy_data) { s32 status; u32 gssr = hw->phy.phy_semaphore_mask; DEBUGFUNC("ixgbe_read_phy_reg_generic"); if (hw->mac.ops.acquire_swfw_sync(hw, gssr)) return IXGBE_ERR_SWFW_SYNC; status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data); hw->mac.ops.release_swfw_sync(hw, gssr); return status; } /** * ixgbe_write_phy_reg_mdi - Writes a value to specified PHY register * without SWFW lock * @hw: pointer to hardware structure * @reg_addr: 32 bit PHY register to write * @device_type: 5 bit device type * @phy_data: Data to write to the PHY register **/ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 phy_data) { u32 i, command; /* Put the data in the MDI single read and write data register*/ IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data); /* Setup and write the address cycle command */ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); /* * Check every 10 usec to see if the address cycle completed. * The MDI Command bit will clear when the operation is * complete */ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { usec_delay(10); command = IXGBE_READ_REG(hw, IXGBE_MSCA); if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) break; } if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY address cmd didn't complete\n"); return IXGBE_ERR_PHY; } /* * Address cycle complete, setup and write the write * command */ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND)); IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); /* * Check every 10 usec to see if the address cycle * completed. The MDI Command bit will clear when the * operation is complete */ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { usec_delay(10); command = IXGBE_READ_REG(hw, IXGBE_MSCA); if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) break; } if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY write cmd didn't complete\n"); return IXGBE_ERR_PHY; } return IXGBE_SUCCESS; } /** * ixgbe_write_phy_reg_generic - Writes a value to specified PHY register * using SWFW lock- this function is needed in most cases * @hw: pointer to hardware structure * @reg_addr: 32 bit PHY register to write * @device_type: 5 bit device type * @phy_data: Data to write to the PHY register **/ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 phy_data) { s32 status; u32 gssr = hw->phy.phy_semaphore_mask; DEBUGFUNC("ixgbe_write_phy_reg_generic"); if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == IXGBE_SUCCESS) { status = hw->phy.ops.write_reg_mdi(hw, reg_addr, device_type, phy_data); hw->mac.ops.release_swfw_sync(hw, gssr); } else { status = IXGBE_ERR_SWFW_SYNC; } return status; } /** * ixgbe_setup_phy_link_generic - Set and restart auto-neg * @hw: pointer to hardware structure * * Restart auto-negotiation and PHY and waits for completion. **/ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) { s32 status = IXGBE_SUCCESS; u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; - bool autoneg = FALSE; + bool autoneg = false; ixgbe_link_speed speed; DEBUGFUNC("ixgbe_setup_phy_link_generic"); ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg); /* Set or unset auto-negotiation 10G advertisement */ hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE; if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) && (speed & IXGBE_LINK_SPEED_10GB_FULL)) autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE; hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); if (hw->mac.type == ixgbe_mac_X550) { /* Set or unset auto-negotiation 5G advertisement */ autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE; if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) && (speed & IXGBE_LINK_SPEED_5GB_FULL)) autoneg_reg |= IXGBE_MII_5GBASE_T_ADVERTISE; /* Set or unset auto-negotiation 2.5G advertisement */ autoneg_reg &= ~IXGBE_MII_2_5GBASE_T_ADVERTISE; if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) && (speed & IXGBE_LINK_SPEED_2_5GB_FULL)) autoneg_reg |= IXGBE_MII_2_5GBASE_T_ADVERTISE; } /* Set or unset auto-negotiation 1G advertisement */ autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE; if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) && (speed & IXGBE_LINK_SPEED_1GB_FULL)) autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE; hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); /* Set or unset auto-negotiation 100M advertisement */ hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG, IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); autoneg_reg &= ~(IXGBE_MII_100BASE_T_ADVERTISE | IXGBE_MII_100BASE_T_ADVERTISE_HALF); if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) && (speed & IXGBE_LINK_SPEED_100_FULL)) autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE; hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG, IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); /* Blocked by MNG FW so don't reset PHY */ if (ixgbe_check_reset_blocked(hw)) return status; /* Restart PHY auto-negotiation. */ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); autoneg_reg |= IXGBE_MII_RESTART; hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); return status; } /** * ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities * @hw: pointer to hardware structure * @speed: new link speed * @autoneg_wait_to_complete: unused **/ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete) { UNREFERENCED_1PARAMETER(autoneg_wait_to_complete); DEBUGFUNC("ixgbe_setup_phy_link_speed_generic"); /* * Clear autoneg_advertised and set new values based on input link * speed. */ hw->phy.autoneg_advertised = 0; if (speed & IXGBE_LINK_SPEED_10GB_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; if (speed & IXGBE_LINK_SPEED_5GB_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_5GB_FULL; if (speed & IXGBE_LINK_SPEED_2_5GB_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_2_5GB_FULL; if (speed & IXGBE_LINK_SPEED_1GB_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; if (speed & IXGBE_LINK_SPEED_100_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; if (speed & IXGBE_LINK_SPEED_10_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10_FULL; /* Setup link based on the new speed settings */ ixgbe_setup_phy_link(hw); return IXGBE_SUCCESS; } /** * ixgbe_get_copper_speeds_supported - Get copper link speeds from phy * @hw: pointer to hardware structure * * Determines the supported link capabilities by reading the PHY auto * negotiation register. **/ static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw) { s32 status; u16 speed_ability; status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY, IXGBE_MDIO_PMA_PMD_DEV_TYPE, &speed_ability); if (status) return status; if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G) hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL; if (speed_ability & IXGBE_MDIO_PHY_SPEED_1G) hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL; if (speed_ability & IXGBE_MDIO_PHY_SPEED_100M) hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL; switch (hw->mac.type) { case ixgbe_mac_X550: hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL; hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL; break; case ixgbe_mac_X550EM_x: case ixgbe_mac_X550EM_a: hw->phy.speeds_supported &= ~IXGBE_LINK_SPEED_100_FULL; break; default: break; } return status; } /** * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities * @hw: pointer to hardware structure * @speed: pointer to link speed * @autoneg: boolean auto-negotiation value **/ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *autoneg) { s32 status = IXGBE_SUCCESS; DEBUGFUNC("ixgbe_get_copper_link_capabilities_generic"); - *autoneg = TRUE; + *autoneg = true; if (!hw->phy.speeds_supported) status = ixgbe_get_copper_speeds_supported(hw); *speed = hw->phy.speeds_supported; return status; } /** * ixgbe_check_phy_link_tnx - Determine link and speed status * @hw: pointer to hardware structure * @speed: current link speed - * @link_up: TRUE is link is up, FALSE otherwise + * @link_up: true is link is up, false otherwise * * Reads the VS1 register to determine if link is up and the current speed for * the PHY. **/ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *link_up) { s32 status = IXGBE_SUCCESS; u32 time_out; u32 max_time_out = 10; u16 phy_link = 0; u16 phy_speed = 0; u16 phy_data = 0; DEBUGFUNC("ixgbe_check_phy_link_tnx"); /* Initialize speed and link to default case */ - *link_up = FALSE; + *link_up = false; *speed = IXGBE_LINK_SPEED_10GB_FULL; /* * Check current speed and link status of the PHY register. * This is a vendor specific register and may have to * be changed for other copper PHYs. */ for (time_out = 0; time_out < max_time_out; time_out++) { usec_delay(10); status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS, IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data); phy_link = phy_data & IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS; phy_speed = phy_data & IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS; if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) { - *link_up = TRUE; + *link_up = true; if (phy_speed == IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS) *speed = IXGBE_LINK_SPEED_1GB_FULL; break; } } return status; } /** * ixgbe_setup_phy_link_tnx - Set and restart auto-neg * @hw: pointer to hardware structure * * Restart auto-negotiation and PHY and waits for completion. **/ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) { s32 status = IXGBE_SUCCESS; u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; - bool autoneg = FALSE; + bool autoneg = false; ixgbe_link_speed speed; DEBUGFUNC("ixgbe_setup_phy_link_tnx"); ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg); if (speed & IXGBE_LINK_SPEED_10GB_FULL) { /* Set or unset auto-negotiation 10G advertisement */ hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE; if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE; hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); } if (speed & IXGBE_LINK_SPEED_1GB_FULL) { /* Set or unset auto-negotiation 1G advertisement */ hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG, IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX; if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX; hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG, IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); } if (speed & IXGBE_LINK_SPEED_100_FULL) { /* Set or unset auto-negotiation 100M advertisement */ hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG, IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); autoneg_reg &= ~IXGBE_MII_100BASE_T_ADVERTISE; if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE; hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG, IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); } /* Blocked by MNG FW so don't reset PHY */ if (ixgbe_check_reset_blocked(hw)) return status; /* Restart PHY auto-negotiation. */ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); autoneg_reg |= IXGBE_MII_RESTART; hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); return status; } /** * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version * @hw: pointer to hardware structure * @firmware_version: pointer to the PHY Firmware Version **/ s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, u16 *firmware_version) { s32 status; DEBUGFUNC("ixgbe_get_phy_firmware_version_tnx"); status = hw->phy.ops.read_reg(hw, TNX_FW_REV, IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, firmware_version); return status; } /** * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version * @hw: pointer to hardware structure * @firmware_version: pointer to the PHY Firmware Version **/ s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, u16 *firmware_version) { s32 status; DEBUGFUNC("ixgbe_get_phy_firmware_version_generic"); status = hw->phy.ops.read_reg(hw, AQ_FW_REV, IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, firmware_version); return status; } /** * ixgbe_reset_phy_nl - Performs a PHY reset * @hw: pointer to hardware structure **/ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) { u16 phy_offset, control, eword, edata, block_crc; - bool end_data = FALSE; + bool end_data = false; u16 list_offset, data_offset; u16 phy_data = 0; s32 ret_val = IXGBE_SUCCESS; u32 i; DEBUGFUNC("ixgbe_reset_phy_nl"); /* Blocked by MNG FW so bail */ if (ixgbe_check_reset_blocked(hw)) goto out; hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data); /* reset the PHY and poll for completion */ hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, IXGBE_MDIO_PHY_XS_DEV_TYPE, (phy_data | IXGBE_MDIO_PHY_XS_RESET)); for (i = 0; i < 100; i++) { hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data); if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) == 0) break; msec_delay(10); } if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) != 0) { DEBUGOUT("PHY reset did not complete.\n"); ret_val = IXGBE_ERR_PHY; goto out; } /* Get init offsets */ ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, &data_offset); if (ret_val != IXGBE_SUCCESS) goto out; ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc); data_offset++; while (!end_data) { /* * Read control word from PHY init contents offset */ ret_val = hw->eeprom.ops.read(hw, data_offset, &eword); if (ret_val) goto err_eeprom; control = (eword & IXGBE_CONTROL_MASK_NL) >> IXGBE_CONTROL_SHIFT_NL; edata = eword & IXGBE_DATA_MASK_NL; switch (control) { case IXGBE_DELAY_NL: data_offset++; DEBUGOUT1("DELAY: %d MS\n", edata); msec_delay(edata); break; case IXGBE_DATA_NL: DEBUGOUT("DATA:\n"); data_offset++; ret_val = hw->eeprom.ops.read(hw, data_offset, &phy_offset); if (ret_val) goto err_eeprom; data_offset++; for (i = 0; i < edata; i++) { ret_val = hw->eeprom.ops.read(hw, data_offset, &eword); if (ret_val) goto err_eeprom; hw->phy.ops.write_reg(hw, phy_offset, IXGBE_TWINAX_DEV, eword); DEBUGOUT2("Wrote %4.4x to %4.4x\n", eword, phy_offset); data_offset++; phy_offset++; } break; case IXGBE_CONTROL_NL: data_offset++; DEBUGOUT("CONTROL:\n"); if (edata == IXGBE_CONTROL_EOL_NL) { DEBUGOUT("EOL\n"); - end_data = TRUE; + end_data = true; } else if (edata == IXGBE_CONTROL_SOL_NL) { DEBUGOUT("SOL\n"); } else { DEBUGOUT("Bad control value\n"); ret_val = IXGBE_ERR_PHY; goto out; } break; default: DEBUGOUT("Bad control type\n"); ret_val = IXGBE_ERR_PHY; goto out; } } out: return ret_val; err_eeprom: ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, "eeprom read at offset %d failed", data_offset); return IXGBE_ERR_PHY; } /** * ixgbe_identify_module_generic - Identifies module type * @hw: pointer to hardware structure * * Determines HW type and calls appropriate function. **/ s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw) { s32 status = IXGBE_ERR_SFP_NOT_PRESENT; DEBUGFUNC("ixgbe_identify_module_generic"); switch (hw->mac.ops.get_media_type(hw)) { case ixgbe_media_type_fiber: status = ixgbe_identify_sfp_module_generic(hw); break; case ixgbe_media_type_fiber_qsfp: status = ixgbe_identify_qsfp_module_generic(hw); break; default: hw->phy.sfp_type = ixgbe_sfp_type_not_present; status = IXGBE_ERR_SFP_NOT_PRESENT; break; } return status; } /** * ixgbe_identify_sfp_module_generic - Identifies SFP modules * @hw: pointer to hardware structure * * Searches for and identifies the SFP module and assigns appropriate PHY type. **/ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) { s32 status = IXGBE_ERR_PHY_ADDR_INVALID; u32 vendor_oui = 0; enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type; u8 identifier = 0; u8 comp_codes_1g = 0; u8 comp_codes_10g = 0; u8 oui_bytes[3] = {0, 0, 0}; u8 cable_tech = 0; u8 cable_spec = 0; u16 enforce_sfp = 0; DEBUGFUNC("ixgbe_identify_sfp_module_generic"); if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) { hw->phy.sfp_type = ixgbe_sfp_type_not_present; status = IXGBE_ERR_SFP_NOT_PRESENT; goto out; } /* LAN ID is needed for I2C access */ hw->mac.ops.set_lan_id(hw); status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER, &identifier); if (status != IXGBE_SUCCESS) goto err_read_i2c_eeprom; if (identifier != IXGBE_SFF_IDENTIFIER_SFP) { hw->phy.type = ixgbe_phy_sfp_unsupported; status = IXGBE_ERR_SFP_NOT_SUPPORTED; } else { status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g); if (status != IXGBE_SUCCESS) goto err_read_i2c_eeprom; status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g); if (status != IXGBE_SUCCESS) goto err_read_i2c_eeprom; status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_CABLE_TECHNOLOGY, &cable_tech); if (status != IXGBE_SUCCESS) goto err_read_i2c_eeprom; /* ID Module * ========= * 0 SFP_DA_CU * 1 SFP_SR * 2 SFP_LR * 3 SFP_DA_CORE0 - 82599-specific * 4 SFP_DA_CORE1 - 82599-specific * 5 SFP_SR/LR_CORE0 - 82599-specific * 6 SFP_SR/LR_CORE1 - 82599-specific * 7 SFP_act_lmt_DA_CORE0 - 82599-specific * 8 SFP_act_lmt_DA_CORE1 - 82599-specific * 9 SFP_1g_cu_CORE0 - 82599-specific * 10 SFP_1g_cu_CORE1 - 82599-specific * 11 SFP_1g_sx_CORE0 - 82599-specific * 12 SFP_1g_sx_CORE1 - 82599-specific */ if (hw->mac.type == ixgbe_mac_82598EB) { if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) hw->phy.sfp_type = ixgbe_sfp_type_da_cu; else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) hw->phy.sfp_type = ixgbe_sfp_type_sr; else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) hw->phy.sfp_type = ixgbe_sfp_type_lr; else hw->phy.sfp_type = ixgbe_sfp_type_unknown; } else { if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) { if (hw->bus.lan_id == 0) hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core0; else hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core1; } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) { hw->phy.ops.read_i2c_eeprom( hw, IXGBE_SFF_CABLE_SPEC_COMP, &cable_spec); if (cable_spec & IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) { if (hw->bus.lan_id == 0) hw->phy.sfp_type = ixgbe_sfp_type_da_act_lmt_core0; else hw->phy.sfp_type = ixgbe_sfp_type_da_act_lmt_core1; } else { hw->phy.sfp_type = ixgbe_sfp_type_unknown; } } else if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE | IXGBE_SFF_10GBASELR_CAPABLE)) { if (hw->bus.lan_id == 0) hw->phy.sfp_type = ixgbe_sfp_type_srlr_core0; else hw->phy.sfp_type = ixgbe_sfp_type_srlr_core1; } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) { if (hw->bus.lan_id == 0) hw->phy.sfp_type = ixgbe_sfp_type_1g_cu_core0; else hw->phy.sfp_type = ixgbe_sfp_type_1g_cu_core1; } else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) { if (hw->bus.lan_id == 0) hw->phy.sfp_type = ixgbe_sfp_type_1g_sx_core0; else hw->phy.sfp_type = ixgbe_sfp_type_1g_sx_core1; } else if (comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) { if (hw->bus.lan_id == 0) hw->phy.sfp_type = ixgbe_sfp_type_1g_lx_core0; else hw->phy.sfp_type = ixgbe_sfp_type_1g_lx_core1; } else { hw->phy.sfp_type = ixgbe_sfp_type_unknown; } } if (hw->phy.sfp_type != stored_sfp_type) - hw->phy.sfp_setup_needed = TRUE; + hw->phy.sfp_setup_needed = true; /* Determine if the SFP+ PHY is dual speed or not. */ - hw->phy.multispeed_fiber = FALSE; + hw->phy.multispeed_fiber = false; if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) && (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) || ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) && (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE))) - hw->phy.multispeed_fiber = TRUE; + hw->phy.multispeed_fiber = true; /* Determine PHY vendor */ if (hw->phy.type != ixgbe_phy_nl) { hw->phy.id = identifier; status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_VENDOR_OUI_BYTE0, &oui_bytes[0]); if (status != IXGBE_SUCCESS) goto err_read_i2c_eeprom; status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_VENDOR_OUI_BYTE1, &oui_bytes[1]); if (status != IXGBE_SUCCESS) goto err_read_i2c_eeprom; status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_VENDOR_OUI_BYTE2, &oui_bytes[2]); if (status != IXGBE_SUCCESS) goto err_read_i2c_eeprom; vendor_oui = ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT)); switch (vendor_oui) { case IXGBE_SFF_VENDOR_OUI_TYCO: if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) hw->phy.type = ixgbe_phy_sfp_passive_tyco; break; case IXGBE_SFF_VENDOR_OUI_FTL: if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) hw->phy.type = ixgbe_phy_sfp_ftl_active; else hw->phy.type = ixgbe_phy_sfp_ftl; break; case IXGBE_SFF_VENDOR_OUI_AVAGO: hw->phy.type = ixgbe_phy_sfp_avago; break; case IXGBE_SFF_VENDOR_OUI_INTEL: hw->phy.type = ixgbe_phy_sfp_intel; break; default: hw->phy.type = ixgbe_phy_sfp_unknown; break; } } /* Allow any DA cable vendor */ if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE | IXGBE_SFF_DA_ACTIVE_CABLE)) { if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) hw->phy.type = ixgbe_phy_sfp_passive_unknown; else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) hw->phy.type = ixgbe_phy_sfp_active_unknown; status = IXGBE_SUCCESS; goto out; } /* Verify supported 1G SFP modules */ if (comp_codes_10g == 0 && !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 || hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) { hw->phy.type = ixgbe_phy_sfp_unsupported; status = IXGBE_ERR_SFP_NOT_SUPPORTED; goto out; } /* Anything else 82598-based is supported */ if (hw->mac.type == ixgbe_mac_82598EB) { status = IXGBE_SUCCESS; goto out; } ixgbe_get_device_caps(hw, &enforce_sfp); if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) && !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 || hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) { /* Make sure we're a supported PHY type */ if (hw->phy.type == ixgbe_phy_sfp_intel) { status = IXGBE_SUCCESS; } else { - if (hw->allow_unsupported_sfp == TRUE) { + if (hw->allow_unsupported_sfp == true) { EWARN(hw, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n"); status = IXGBE_SUCCESS; } else { DEBUGOUT("SFP+ module not supported\n"); hw->phy.type = ixgbe_phy_sfp_unsupported; status = IXGBE_ERR_SFP_NOT_SUPPORTED; } } } else { status = IXGBE_SUCCESS; } } out: return status; err_read_i2c_eeprom: hw->phy.sfp_type = ixgbe_sfp_type_not_present; if (hw->phy.type != ixgbe_phy_nl) { hw->phy.id = 0; hw->phy.type = ixgbe_phy_unknown; } return IXGBE_ERR_SFP_NOT_PRESENT; } /** * ixgbe_get_supported_phy_sfp_layer_generic - Returns physical layer type * @hw: pointer to hardware structure * * Determines physical layer capabilities of the current SFP. */ u64 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw) { u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; u8 comp_codes_10g = 0; u8 comp_codes_1g = 0; DEBUGFUNC("ixgbe_get_supported_phy_sfp_layer_generic"); hw->phy.ops.identify_sfp(hw); if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) return physical_layer; switch (hw->phy.type) { case ixgbe_phy_sfp_passive_tyco: case ixgbe_phy_sfp_passive_unknown: case ixgbe_phy_qsfp_passive_unknown: physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; break; case ixgbe_phy_sfp_ftl_active: case ixgbe_phy_sfp_active_unknown: case ixgbe_phy_qsfp_active_unknown: physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA; break; case ixgbe_phy_sfp_avago: case ixgbe_phy_sfp_ftl: case ixgbe_phy_sfp_intel: case ixgbe_phy_sfp_unknown: hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g); hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g); if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T; else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_SX; break; case ixgbe_phy_qsfp_intel: case ixgbe_phy_qsfp_unknown: hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_10GBE_COMP, &comp_codes_10g); if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; break; default: break; } return physical_layer; } /** * ixgbe_identify_qsfp_module_generic - Identifies QSFP modules * @hw: pointer to hardware structure * * Searches for and identifies the QSFP module and assigns appropriate PHY type **/ s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) { s32 status = IXGBE_ERR_PHY_ADDR_INVALID; u32 vendor_oui = 0; enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type; u8 identifier = 0; u8 comp_codes_1g = 0; u8 comp_codes_10g = 0; u8 oui_bytes[3] = {0, 0, 0}; u16 enforce_sfp = 0; u8 connector = 0; u8 cable_length = 0; u8 device_tech = 0; - bool active_cable = FALSE; + bool active_cable = false; DEBUGFUNC("ixgbe_identify_qsfp_module_generic"); if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) { hw->phy.sfp_type = ixgbe_sfp_type_not_present; status = IXGBE_ERR_SFP_NOT_PRESENT; goto out; } /* LAN ID is needed for I2C access */ hw->mac.ops.set_lan_id(hw); status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER, &identifier); if (status != IXGBE_SUCCESS) goto err_read_i2c_eeprom; if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) { hw->phy.type = ixgbe_phy_sfp_unsupported; status = IXGBE_ERR_SFP_NOT_SUPPORTED; goto out; } hw->phy.id = identifier; status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_10GBE_COMP, &comp_codes_10g); if (status != IXGBE_SUCCESS) goto err_read_i2c_eeprom; status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_1GBE_COMP, &comp_codes_1g); if (status != IXGBE_SUCCESS) goto err_read_i2c_eeprom; if (comp_codes_10g & IXGBE_SFF_QSFP_DA_PASSIVE_CABLE) { hw->phy.type = ixgbe_phy_qsfp_passive_unknown; if (hw->bus.lan_id == 0) hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core0; else hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core1; } else if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE | IXGBE_SFF_10GBASELR_CAPABLE)) { if (hw->bus.lan_id == 0) hw->phy.sfp_type = ixgbe_sfp_type_srlr_core0; else hw->phy.sfp_type = ixgbe_sfp_type_srlr_core1; } else { if (comp_codes_10g & IXGBE_SFF_QSFP_DA_ACTIVE_CABLE) - active_cable = TRUE; + active_cable = true; if (!active_cable) { /* check for active DA cables that pre-date * SFF-8436 v3.6 */ hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_CONNECTOR, &connector); hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_CABLE_LENGTH, &cable_length); hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_DEVICE_TECH, &device_tech); if ((connector == IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE) && (cable_length > 0) && ((device_tech >> 4) == IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL)) - active_cable = TRUE; + active_cable = true; } if (active_cable) { hw->phy.type = ixgbe_phy_qsfp_active_unknown; if (hw->bus.lan_id == 0) hw->phy.sfp_type = ixgbe_sfp_type_da_act_lmt_core0; else hw->phy.sfp_type = ixgbe_sfp_type_da_act_lmt_core1; } else { /* unsupported module type */ hw->phy.type = ixgbe_phy_sfp_unsupported; status = IXGBE_ERR_SFP_NOT_SUPPORTED; goto out; } } if (hw->phy.sfp_type != stored_sfp_type) - hw->phy.sfp_setup_needed = TRUE; + hw->phy.sfp_setup_needed = true; /* Determine if the QSFP+ PHY is dual speed or not. */ - hw->phy.multispeed_fiber = FALSE; + hw->phy.multispeed_fiber = false; if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) && (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) || ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) && (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE))) - hw->phy.multispeed_fiber = TRUE; + hw->phy.multispeed_fiber = true; /* Determine PHY vendor for optical modules */ if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE | IXGBE_SFF_10GBASELR_CAPABLE)) { status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0, &oui_bytes[0]); if (status != IXGBE_SUCCESS) goto err_read_i2c_eeprom; status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1, &oui_bytes[1]); if (status != IXGBE_SUCCESS) goto err_read_i2c_eeprom; status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2, &oui_bytes[2]); if (status != IXGBE_SUCCESS) goto err_read_i2c_eeprom; vendor_oui = ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT)); if (vendor_oui == IXGBE_SFF_VENDOR_OUI_INTEL) hw->phy.type = ixgbe_phy_qsfp_intel; else hw->phy.type = ixgbe_phy_qsfp_unknown; ixgbe_get_device_caps(hw, &enforce_sfp); if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) { /* Make sure we're a supported PHY type */ if (hw->phy.type == ixgbe_phy_qsfp_intel) { status = IXGBE_SUCCESS; } else { - if (hw->allow_unsupported_sfp == TRUE) { + if (hw->allow_unsupported_sfp == true) { EWARN(hw, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n"); status = IXGBE_SUCCESS; } else { DEBUGOUT("QSFP module not supported\n"); hw->phy.type = ixgbe_phy_sfp_unsupported; status = IXGBE_ERR_SFP_NOT_SUPPORTED; } } } else { status = IXGBE_SUCCESS; } } out: return status; err_read_i2c_eeprom: hw->phy.sfp_type = ixgbe_sfp_type_not_present; hw->phy.id = 0; hw->phy.type = ixgbe_phy_unknown; return IXGBE_ERR_SFP_NOT_PRESENT; } /** * ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence * @hw: pointer to hardware structure * @list_offset: offset to the SFP ID list * @data_offset: offset to the SFP data block * * Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if * so it returns the offsets to the phy init sequence block. **/ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, u16 *list_offset, u16 *data_offset) { u16 sfp_id; u16 sfp_type = hw->phy.sfp_type; DEBUGFUNC("ixgbe_get_sfp_init_sequence_offsets"); if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) return IXGBE_ERR_SFP_NOT_SUPPORTED; if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) return IXGBE_ERR_SFP_NOT_PRESENT; if ((hw->device_id == IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) && (hw->phy.sfp_type == ixgbe_sfp_type_da_cu)) return IXGBE_ERR_SFP_NOT_SUPPORTED; /* * Limiting active cables and 1G Phys must be initialized as * SR modules */ if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 || sfp_type == ixgbe_sfp_type_1g_lx_core0 || sfp_type == ixgbe_sfp_type_1g_cu_core0 || sfp_type == ixgbe_sfp_type_1g_sx_core0) sfp_type = ixgbe_sfp_type_srlr_core0; else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 || sfp_type == ixgbe_sfp_type_1g_lx_core1 || sfp_type == ixgbe_sfp_type_1g_cu_core1 || sfp_type == ixgbe_sfp_type_1g_sx_core1) sfp_type = ixgbe_sfp_type_srlr_core1; /* Read offset to PHY init contents */ if (hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset)) { ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, "eeprom read at offset %d failed", IXGBE_PHY_INIT_OFFSET_NL); return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT; } if ((!*list_offset) || (*list_offset == 0xFFFF)) return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT; /* Shift offset to first ID word */ (*list_offset)++; /* * Find the matching SFP ID in the EEPROM * and program the init sequence */ if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id)) goto err_phy; while (sfp_id != IXGBE_PHY_INIT_END_NL) { if (sfp_id == sfp_type) { (*list_offset)++; if (hw->eeprom.ops.read(hw, *list_offset, data_offset)) goto err_phy; if ((!*data_offset) || (*data_offset == 0xFFFF)) { DEBUGOUT("SFP+ module not supported\n"); return IXGBE_ERR_SFP_NOT_SUPPORTED; } else { break; } } else { (*list_offset) += 2; if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id)) goto err_phy; } } if (sfp_id == IXGBE_PHY_INIT_END_NL) { DEBUGOUT("No matching SFP+ module found\n"); return IXGBE_ERR_SFP_NOT_SUPPORTED; } return IXGBE_SUCCESS; err_phy: ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, "eeprom read at offset %d failed", *list_offset); return IXGBE_ERR_PHY; } /** * ixgbe_read_i2c_eeprom_generic - Reads 8 bit EEPROM word over I2C interface * @hw: pointer to hardware structure * @byte_offset: EEPROM byte offset to read * @eeprom_data: value read * * Performs byte read operation to SFP module's EEPROM over I2C interface. **/ s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data) { DEBUGFUNC("ixgbe_read_i2c_eeprom_generic"); return hw->phy.ops.read_i2c_byte(hw, byte_offset, IXGBE_I2C_EEPROM_DEV_ADDR, eeprom_data); } /** * ixgbe_read_i2c_sff8472_generic - Reads 8 bit word over I2C interface * @hw: pointer to hardware structure * @byte_offset: byte offset at address 0xA2 * @sff8472_data: value read * * Performs byte read operation to SFP module's SFF-8472 data over I2C **/ static s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 *sff8472_data) { return hw->phy.ops.read_i2c_byte(hw, byte_offset, IXGBE_I2C_EEPROM_DEV_ADDR2, sff8472_data); } /** * ixgbe_write_i2c_eeprom_generic - Writes 8 bit EEPROM word over I2C interface * @hw: pointer to hardware structure * @byte_offset: EEPROM byte offset to write * @eeprom_data: value to write * * Performs byte write operation to SFP module's EEPROM over I2C interface. **/ s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 eeprom_data) { DEBUGFUNC("ixgbe_write_i2c_eeprom_generic"); return hw->phy.ops.write_i2c_byte(hw, byte_offset, IXGBE_I2C_EEPROM_DEV_ADDR, eeprom_data); } /** - * ixgbe_is_sfp_probe - Returns TRUE if SFP is being detected + * ixgbe_is_sfp_probe - Returns true if SFP is being detected * @hw: pointer to hardware structure * @offset: eeprom offset to be read * @addr: I2C address to be read */ static bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr) { if (addr == IXGBE_I2C_EEPROM_DEV_ADDR && offset == IXGBE_SFF_IDENTIFIER && hw->phy.sfp_type == ixgbe_sfp_type_not_present) - return TRUE; - return FALSE; + return true; + return false; } /** * ixgbe_read_i2c_byte_generic_int - Reads 8 bit word over I2C * @hw: pointer to hardware structure * @byte_offset: byte offset to read * @dev_addr: address to read from * @data: value read - * @lock: TRUE if to take and release semaphore + * @lock: true if to take and release semaphore * * Performs byte read operation to SFP module's EEPROM over I2C interface at * a specified device address. **/ static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 *data, bool lock) { s32 status; u32 max_retry = 10; u32 retry = 0; u32 swfw_mask = hw->phy.phy_semaphore_mask; bool nack = 1; *data = 0; DEBUGFUNC("ixgbe_read_i2c_byte_generic"); if (hw->mac.type >= ixgbe_mac_X550) max_retry = 3; if (ixgbe_is_sfp_probe(hw, byte_offset, dev_addr)) max_retry = IXGBE_SFP_DETECT_RETRIES; do { if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) return IXGBE_ERR_SWFW_SYNC; ixgbe_i2c_start(hw); /* Device Address and write indication */ status = ixgbe_clock_out_i2c_byte(hw, dev_addr); if (status != IXGBE_SUCCESS) goto fail; status = ixgbe_get_i2c_ack(hw); if (status != IXGBE_SUCCESS) goto fail; status = ixgbe_clock_out_i2c_byte(hw, byte_offset); if (status != IXGBE_SUCCESS) goto fail; status = ixgbe_get_i2c_ack(hw); if (status != IXGBE_SUCCESS) goto fail; ixgbe_i2c_start(hw); /* Device Address and read indication */ status = ixgbe_clock_out_i2c_byte(hw, (dev_addr | 0x1)); if (status != IXGBE_SUCCESS) goto fail; status = ixgbe_get_i2c_ack(hw); if (status != IXGBE_SUCCESS) goto fail; status = ixgbe_clock_in_i2c_byte(hw, data); if (status != IXGBE_SUCCESS) goto fail; status = ixgbe_clock_out_i2c_bit(hw, nack); if (status != IXGBE_SUCCESS) goto fail; ixgbe_i2c_stop(hw); if (lock) hw->mac.ops.release_swfw_sync(hw, swfw_mask); return IXGBE_SUCCESS; fail: ixgbe_i2c_bus_clear(hw); if (lock) { hw->mac.ops.release_swfw_sync(hw, swfw_mask); msec_delay(100); } retry++; if (retry < max_retry) DEBUGOUT("I2C byte read error - Retrying.\n"); else DEBUGOUT("I2C byte read error.\n"); } while (retry < max_retry); return status; } /** * ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C * @hw: pointer to hardware structure * @byte_offset: byte offset to read * @dev_addr: address to read from * @data: value read * * Performs byte read operation to SFP module's EEPROM over I2C interface at * a specified device address. **/ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 *data) { return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr, - data, TRUE); + data, true); } /** * ixgbe_read_i2c_byte_generic_unlocked - Reads 8 bit word over I2C * @hw: pointer to hardware structure * @byte_offset: byte offset to read * @dev_addr: address to read from * @data: value read * * Performs byte read operation to SFP module's EEPROM over I2C interface at * a specified device address. **/ s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 *data) { return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr, - data, FALSE); + data, false); } /** * ixgbe_write_i2c_byte_generic_int - Writes 8 bit word over I2C * @hw: pointer to hardware structure * @byte_offset: byte offset to write * @dev_addr: address to write to * @data: value to write - * @lock: TRUE if to take and release semaphore + * @lock: true if to take and release semaphore * * Performs byte write operation to SFP module's EEPROM over I2C interface at * a specified device address. **/ static s32 ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 data, bool lock) { s32 status; u32 max_retry = 1; u32 retry = 0; u32 swfw_mask = hw->phy.phy_semaphore_mask; DEBUGFUNC("ixgbe_write_i2c_byte_generic"); if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != IXGBE_SUCCESS) return IXGBE_ERR_SWFW_SYNC; do { ixgbe_i2c_start(hw); status = ixgbe_clock_out_i2c_byte(hw, dev_addr); if (status != IXGBE_SUCCESS) goto fail; status = ixgbe_get_i2c_ack(hw); if (status != IXGBE_SUCCESS) goto fail; status = ixgbe_clock_out_i2c_byte(hw, byte_offset); if (status != IXGBE_SUCCESS) goto fail; status = ixgbe_get_i2c_ack(hw); if (status != IXGBE_SUCCESS) goto fail; status = ixgbe_clock_out_i2c_byte(hw, data); if (status != IXGBE_SUCCESS) goto fail; status = ixgbe_get_i2c_ack(hw); if (status != IXGBE_SUCCESS) goto fail; ixgbe_i2c_stop(hw); if (lock) hw->mac.ops.release_swfw_sync(hw, swfw_mask); return IXGBE_SUCCESS; fail: ixgbe_i2c_bus_clear(hw); retry++; if (retry < max_retry) DEBUGOUT("I2C byte write error - Retrying.\n"); else DEBUGOUT("I2C byte write error.\n"); } while (retry < max_retry); if (lock) hw->mac.ops.release_swfw_sync(hw, swfw_mask); return status; } /** * ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C * @hw: pointer to hardware structure * @byte_offset: byte offset to write * @dev_addr: address to write to * @data: value to write * * Performs byte write operation to SFP module's EEPROM over I2C interface at * a specified device address. **/ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 data) { return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr, - data, TRUE); + data, true); } /** * ixgbe_write_i2c_byte_generic_unlocked - Writes 8 bit word over I2C * @hw: pointer to hardware structure * @byte_offset: byte offset to write * @dev_addr: address to write to * @data: value to write * * Performs byte write operation to SFP module's EEPROM over I2C interface at * a specified device address. **/ s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 data) { return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr, - data, FALSE); + data, false); } /** * ixgbe_i2c_start - Sets I2C start condition * @hw: pointer to hardware structure * * Sets I2C start condition (High -> Low on SDA while SCL is High) * Set bit-bang mode on X550 hardware. **/ static void ixgbe_i2c_start(struct ixgbe_hw *hw) { u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); DEBUGFUNC("ixgbe_i2c_start"); i2cctl |= IXGBE_I2C_BB_EN_BY_MAC(hw); /* Start condition must begin with data and clock high */ ixgbe_set_i2c_data(hw, &i2cctl, 1); ixgbe_raise_i2c_clk(hw, &i2cctl); /* Setup time for start condition (4.7us) */ usec_delay(IXGBE_I2C_T_SU_STA); ixgbe_set_i2c_data(hw, &i2cctl, 0); /* Hold time for start condition (4us) */ usec_delay(IXGBE_I2C_T_HD_STA); ixgbe_lower_i2c_clk(hw, &i2cctl); /* Minimum low period of clock is 4.7 us */ usec_delay(IXGBE_I2C_T_LOW); } /** * ixgbe_i2c_stop - Sets I2C stop condition * @hw: pointer to hardware structure * * Sets I2C stop condition (Low -> High on SDA while SCL is High) * Disables bit-bang mode and negates data output enable on X550 * hardware. **/ static void ixgbe_i2c_stop(struct ixgbe_hw *hw) { u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw); u32 bb_en_bit = IXGBE_I2C_BB_EN_BY_MAC(hw); DEBUGFUNC("ixgbe_i2c_stop"); /* Stop condition must begin with data low and clock high */ ixgbe_set_i2c_data(hw, &i2cctl, 0); ixgbe_raise_i2c_clk(hw, &i2cctl); /* Setup time for stop condition (4us) */ usec_delay(IXGBE_I2C_T_SU_STO); ixgbe_set_i2c_data(hw, &i2cctl, 1); /* bus free time between stop and start (4.7us)*/ usec_delay(IXGBE_I2C_T_BUF); if (bb_en_bit || data_oe_bit || clk_oe_bit) { i2cctl &= ~bb_en_bit; i2cctl |= data_oe_bit | clk_oe_bit; IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl); IXGBE_WRITE_FLUSH(hw); } } /** * ixgbe_clock_in_i2c_byte - Clocks in one byte via I2C * @hw: pointer to hardware structure * @data: data byte to clock in * * Clocks in one byte data via I2C data/clock **/ static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data) { s32 i; bool bit = 0; DEBUGFUNC("ixgbe_clock_in_i2c_byte"); *data = 0; for (i = 7; i >= 0; i--) { ixgbe_clock_in_i2c_bit(hw, &bit); *data |= bit << i; } return IXGBE_SUCCESS; } /** * ixgbe_clock_out_i2c_byte - Clocks out one byte via I2C * @hw: pointer to hardware structure * @data: data byte clocked out * * Clocks out one byte data via I2C data/clock **/ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data) { s32 status = IXGBE_SUCCESS; s32 i; u32 i2cctl; bool bit; DEBUGFUNC("ixgbe_clock_out_i2c_byte"); for (i = 7; i >= 0; i--) { bit = (data >> i) & 0x1; status = ixgbe_clock_out_i2c_bit(hw, bit); if (status != IXGBE_SUCCESS) break; } /* Release SDA line (set high) */ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); i2cctl |= IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl); IXGBE_WRITE_FLUSH(hw); return status; } /** * ixgbe_get_i2c_ack - Polls for I2C ACK * @hw: pointer to hardware structure * * Clocks in/out one bit via I2C data/clock **/ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) { u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); s32 status = IXGBE_SUCCESS; u32 i = 0; u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); u32 timeout = 10; bool ack = 1; DEBUGFUNC("ixgbe_get_i2c_ack"); if (data_oe_bit) { i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); i2cctl |= data_oe_bit; IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl); IXGBE_WRITE_FLUSH(hw); } ixgbe_raise_i2c_clk(hw, &i2cctl); /* Minimum high period of clock is 4us */ usec_delay(IXGBE_I2C_T_HIGH); /* Poll for ACK. Note that ACK in I2C spec is * transition from 1 to 0 */ for (i = 0; i < timeout; i++) { i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); ack = ixgbe_get_i2c_data(hw, &i2cctl); usec_delay(1); if (!ack) break; } if (ack) { DEBUGOUT("I2C ack was not received.\n"); status = IXGBE_ERR_I2C; } ixgbe_lower_i2c_clk(hw, &i2cctl); /* Minimum low period of clock is 4.7 us */ usec_delay(IXGBE_I2C_T_LOW); return status; } /** * ixgbe_clock_in_i2c_bit - Clocks in one bit via I2C data/clock * @hw: pointer to hardware structure * @data: read data value * * Clocks in one bit via I2C data/clock **/ static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data) { u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); DEBUGFUNC("ixgbe_clock_in_i2c_bit"); if (data_oe_bit) { i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); i2cctl |= data_oe_bit; IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl); IXGBE_WRITE_FLUSH(hw); } ixgbe_raise_i2c_clk(hw, &i2cctl); /* Minimum high period of clock is 4us */ usec_delay(IXGBE_I2C_T_HIGH); i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); *data = ixgbe_get_i2c_data(hw, &i2cctl); ixgbe_lower_i2c_clk(hw, &i2cctl); /* Minimum low period of clock is 4.7 us */ usec_delay(IXGBE_I2C_T_LOW); return IXGBE_SUCCESS; } /** * ixgbe_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock * @hw: pointer to hardware structure * @data: data value to write * * Clocks out one bit via I2C data/clock **/ static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data) { s32 status; u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); DEBUGFUNC("ixgbe_clock_out_i2c_bit"); status = ixgbe_set_i2c_data(hw, &i2cctl, data); if (status == IXGBE_SUCCESS) { ixgbe_raise_i2c_clk(hw, &i2cctl); /* Minimum high period of clock is 4us */ usec_delay(IXGBE_I2C_T_HIGH); ixgbe_lower_i2c_clk(hw, &i2cctl); /* Minimum low period of clock is 4.7 us. * This also takes care of the data hold time. */ usec_delay(IXGBE_I2C_T_LOW); } else { status = IXGBE_ERR_I2C; ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, "I2C data was not set to %X\n", data); } return status; } /** * ixgbe_raise_i2c_clk - Raises the I2C SCL clock * @hw: pointer to hardware structure * @i2cctl: Current value of I2CCTL register * * Raises the I2C clock line '0'->'1' * Negates the I2C clock output enable on X550 hardware. **/ static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) { u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw); u32 i = 0; u32 timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT; u32 i2cctl_r = 0; DEBUGFUNC("ixgbe_raise_i2c_clk"); if (clk_oe_bit) { *i2cctl |= clk_oe_bit; IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); } for (i = 0; i < timeout; i++) { *i2cctl |= IXGBE_I2C_CLK_OUT_BY_MAC(hw); IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); IXGBE_WRITE_FLUSH(hw); /* SCL rise time (1000ns) */ usec_delay(IXGBE_I2C_T_RISE); i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); if (i2cctl_r & IXGBE_I2C_CLK_IN_BY_MAC(hw)) break; } } /** * ixgbe_lower_i2c_clk - Lowers the I2C SCL clock * @hw: pointer to hardware structure * @i2cctl: Current value of I2CCTL register * * Lowers the I2C clock line '1'->'0' * Asserts the I2C clock output enable on X550 hardware. **/ static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) { DEBUGFUNC("ixgbe_lower_i2c_clk"); *i2cctl &= ~(IXGBE_I2C_CLK_OUT_BY_MAC(hw)); *i2cctl &= ~IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw); IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); IXGBE_WRITE_FLUSH(hw); /* SCL fall time (300ns) */ usec_delay(IXGBE_I2C_T_FALL); } /** * ixgbe_set_i2c_data - Sets the I2C data bit * @hw: pointer to hardware structure * @i2cctl: Current value of I2CCTL register * @data: I2C data value (0 or 1) to set * * Sets the I2C data bit * Asserts the I2C data output enable on X550 hardware. **/ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) { u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); s32 status = IXGBE_SUCCESS; DEBUGFUNC("ixgbe_set_i2c_data"); if (data) *i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); else *i2cctl &= ~(IXGBE_I2C_DATA_OUT_BY_MAC(hw)); *i2cctl &= ~data_oe_bit; IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); IXGBE_WRITE_FLUSH(hw); /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ usec_delay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA); if (!data) /* Can't verify data in this case */ return IXGBE_SUCCESS; if (data_oe_bit) { *i2cctl |= data_oe_bit; IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); IXGBE_WRITE_FLUSH(hw); } /* Verify data was set correctly */ *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); if (data != ixgbe_get_i2c_data(hw, i2cctl)) { status = IXGBE_ERR_I2C; ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, "Error - I2C data was not set to %X.\n", data); } return status; } /** * ixgbe_get_i2c_data - Reads the I2C SDA data bit * @hw: pointer to hardware structure * @i2cctl: Current value of I2CCTL register * * Returns the I2C data bit value * Negates the I2C data output enable on X550 hardware. **/ static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl) { u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); bool data; UNREFERENCED_1PARAMETER(hw); DEBUGFUNC("ixgbe_get_i2c_data"); if (data_oe_bit) { *i2cctl |= data_oe_bit; IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); IXGBE_WRITE_FLUSH(hw); usec_delay(IXGBE_I2C_T_FALL); } if (*i2cctl & IXGBE_I2C_DATA_IN_BY_MAC(hw)) data = 1; else data = 0; return data; } /** * ixgbe_i2c_bus_clear - Clears the I2C bus * @hw: pointer to hardware structure * * Clears the I2C bus by sending nine clock pulses. * Used when data line is stuck low. **/ void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw) { u32 i2cctl; u32 i; DEBUGFUNC("ixgbe_i2c_bus_clear"); ixgbe_i2c_start(hw); i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); ixgbe_set_i2c_data(hw, &i2cctl, 1); for (i = 0; i < 9; i++) { ixgbe_raise_i2c_clk(hw, &i2cctl); /* Min high period of clock is 4us */ usec_delay(IXGBE_I2C_T_HIGH); ixgbe_lower_i2c_clk(hw, &i2cctl); /* Min low period of clock is 4.7us*/ usec_delay(IXGBE_I2C_T_LOW); } ixgbe_i2c_start(hw); /* Put the i2c bus back to default state */ ixgbe_i2c_stop(hw); } /** * ixgbe_tn_check_overtemp - Checks if an overtemp occurred. * @hw: pointer to hardware structure * * Checks if the LASI temp alarm status was triggered due to overtemp **/ s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw) { s32 status = IXGBE_SUCCESS; u16 phy_data = 0; DEBUGFUNC("ixgbe_tn_check_overtemp"); if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM) goto out; /* Check that the LASI temp alarm status was triggered */ hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG, IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_data); if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM)) goto out; status = IXGBE_ERR_OVERTEMP; ERROR_REPORT1(IXGBE_ERROR_CAUTION, "Device over temperature"); out: return status; } /** * ixgbe_set_copper_phy_power - Control power for copper phy * @hw: pointer to hardware structure - * @on: TRUE for on, FALSE for off + * @on: true for on, false for off */ s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on) { u32 status; u16 reg; if (!on && ixgbe_mng_present(hw)) return 0; status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL, IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®); if (status) return status; if (on) { reg &= ~IXGBE_MDIO_PHY_SET_LOW_POWER_MODE; } else { if (ixgbe_check_reset_blocked(hw)) return 0; reg |= IXGBE_MDIO_PHY_SET_LOW_POWER_MODE; } status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL, IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, reg); return status; } diff --git a/sys/dev/ixgbe/ixgbe_vf.c b/sys/dev/ixgbe/ixgbe_vf.c index e692f12ebb0d..a125bcbb8386 100644 --- a/sys/dev/ixgbe/ixgbe_vf.c +++ b/sys/dev/ixgbe/ixgbe_vf.c @@ -1,784 +1,784 @@ /****************************************************************************** SPDX-License-Identifier: BSD-3-Clause Copyright (c) 2001-2017, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "ixgbe.h" #ifndef IXGBE_VFWRITE_REG #define IXGBE_VFWRITE_REG IXGBE_WRITE_REG #endif #ifndef IXGBE_VFREAD_REG #define IXGBE_VFREAD_REG IXGBE_READ_REG #endif /** * ixgbe_init_ops_vf - Initialize the pointers for vf * @hw: pointer to hardware structure * * This will assign function pointers, adapter-specific functions can * override the assignment of generic function pointers by assigning * their own adapter-specific function pointers. * Does not touch the hardware. **/ s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw) { /* MAC */ hw->mac.ops.init_hw = ixgbe_init_hw_vf; hw->mac.ops.reset_hw = ixgbe_reset_hw_vf; hw->mac.ops.start_hw = ixgbe_start_hw_vf; /* Cannot clear stats on VF */ hw->mac.ops.clear_hw_cntrs = NULL; hw->mac.ops.get_media_type = NULL; hw->mac.ops.get_mac_addr = ixgbe_get_mac_addr_vf; hw->mac.ops.stop_adapter = ixgbe_stop_adapter_vf; hw->mac.ops.get_bus_info = NULL; hw->mac.ops.negotiate_api_version = ixgbevf_negotiate_api_version; /* Link */ hw->mac.ops.setup_link = ixgbe_setup_mac_link_vf; hw->mac.ops.check_link = ixgbe_check_mac_link_vf; hw->mac.ops.get_link_capabilities = NULL; /* RAR, Multicast, VLAN */ hw->mac.ops.set_rar = ixgbe_set_rar_vf; hw->mac.ops.set_uc_addr = ixgbevf_set_uc_addr_vf; hw->mac.ops.init_rx_addrs = NULL; hw->mac.ops.update_mc_addr_list = ixgbe_update_mc_addr_list_vf; hw->mac.ops.update_xcast_mode = ixgbevf_update_xcast_mode; hw->mac.ops.enable_mc = NULL; hw->mac.ops.disable_mc = NULL; hw->mac.ops.clear_vfta = NULL; hw->mac.ops.set_vfta = ixgbe_set_vfta_vf; hw->mac.ops.set_rlpml = ixgbevf_rlpml_set_vf; hw->mac.max_tx_queues = 1; hw->mac.max_rx_queues = 1; hw->mbx.ops.init_params = ixgbe_init_mbx_params_vf; return IXGBE_SUCCESS; } /* ixgbe_virt_clr_reg - Set register to default (power on) state. * @hw: pointer to hardware structure */ static void ixgbe_virt_clr_reg(struct ixgbe_hw *hw) { int i; u32 vfsrrctl; u32 vfdca_rxctrl; u32 vfdca_txctrl; /* VRSRRCTL default values (BSIZEPACKET = 2048, BSIZEHEADER = 256) */ vfsrrctl = 0x100 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; vfsrrctl |= 0x800 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; /* DCA_RXCTRL default value */ vfdca_rxctrl = IXGBE_DCA_RXCTRL_DESC_RRO_EN | IXGBE_DCA_RXCTRL_DATA_WRO_EN | IXGBE_DCA_RXCTRL_HEAD_WRO_EN; /* DCA_TXCTRL default value */ vfdca_txctrl = IXGBE_DCA_TXCTRL_DESC_RRO_EN | IXGBE_DCA_TXCTRL_DESC_WRO_EN | IXGBE_DCA_TXCTRL_DATA_RRO_EN; IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0); for (i = 0; i < 8; i++) { IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0); IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0); IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), 0); IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), vfsrrctl); IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0); IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0); IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), 0); IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(i), 0); IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(i), 0); IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(i), vfdca_rxctrl); IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), vfdca_txctrl); } IXGBE_WRITE_FLUSH(hw); } /** * ixgbe_start_hw_vf - Prepare hardware for Tx/Rx * @hw: pointer to hardware structure * * Starts the hardware by filling the bus info structure and media type, clears * all on chip counters, initializes receive address registers, multicast * table, VLAN filter table, calls routine to set up link and flow control * settings, and leaves transmit and receive units disabled and uninitialized **/ s32 ixgbe_start_hw_vf(struct ixgbe_hw *hw) { /* Clear adapter stopped flag */ - hw->adapter_stopped = FALSE; + hw->adapter_stopped = false; return IXGBE_SUCCESS; } /** * ixgbe_init_hw_vf - virtual function hardware initialization * @hw: pointer to hardware structure * * Initialize the hardware by resetting the hardware and then starting * the hardware **/ s32 ixgbe_init_hw_vf(struct ixgbe_hw *hw) { s32 status = hw->mac.ops.start_hw(hw); hw->mac.ops.get_mac_addr(hw, hw->mac.addr); return status; } /** * ixgbe_reset_hw_vf - Performs hardware reset * @hw: pointer to hardware structure * * Resets the hardware by reseting the transmit and receive units, masks and * clears all interrupts. **/ s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw) { struct ixgbe_mbx_info *mbx = &hw->mbx; u32 timeout = IXGBE_VF_INIT_TIMEOUT; s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR; u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN]; u8 *addr = (u8 *)(&msgbuf[1]); DEBUGFUNC("ixgbevf_reset_hw_vf"); /* Call adapter stop to disable tx/rx and clear interrupts */ hw->mac.ops.stop_adapter(hw); /* reset the api version */ hw->api_version = ixgbe_mbox_api_10; DEBUGOUT("Issuing a function level reset to MAC\n"); IXGBE_VFWRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST); IXGBE_WRITE_FLUSH(hw); msec_delay(50); /* we cannot reset while the RSTI / RSTD bits are asserted */ while (!mbx->ops.check_for_rst(hw, 0) && timeout) { timeout--; usec_delay(5); } if (!timeout) return IXGBE_ERR_RESET_FAILED; /* Reset VF registers to initial values */ ixgbe_virt_clr_reg(hw); /* mailbox timeout can now become active */ mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT; msgbuf[0] = IXGBE_VF_RESET; mbx->ops.write_posted(hw, msgbuf, 1, 0); msec_delay(10); /* * set our "perm_addr" based on info provided by PF * also set up the mc_filter_type which is piggy backed * on the mac address in word 3 */ ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, 0); if (ret_val) return ret_val; if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) && msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK)) return IXGBE_ERR_INVALID_MAC_ADDR; if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK)) memcpy(hw->mac.perm_addr, addr, IXGBE_ETH_LENGTH_OF_ADDRESS); hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD]; return ret_val; } /** * ixgbe_stop_adapter_vf - Generic stop Tx/Rx units * @hw: pointer to hardware structure * * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, * disables transmit and receive units. The adapter_stopped flag is used by * the shared code and drivers to determine if the adapter is in a stopped * state and should not touch the hardware. **/ s32 ixgbe_stop_adapter_vf(struct ixgbe_hw *hw) { u32 reg_val; u16 i; /* * Set the adapter_stopped flag so other driver functions stop touching * the hardware */ - hw->adapter_stopped = TRUE; + hw->adapter_stopped = true; /* Clear interrupt mask to stop from interrupts being generated */ IXGBE_VFWRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK); /* Clear any pending interrupts, flush previous writes */ IXGBE_VFREAD_REG(hw, IXGBE_VTEICR); /* Disable the transmit unit. Each queue must be disabled. */ for (i = 0; i < hw->mac.max_tx_queues; i++) IXGBE_VFWRITE_REG(hw, IXGBE_VFTXDCTL(i), IXGBE_TXDCTL_SWFLSH); /* Disable the receive unit by stopping each queue */ for (i = 0; i < hw->mac.max_rx_queues; i++) { reg_val = IXGBE_VFREAD_REG(hw, IXGBE_VFRXDCTL(i)); reg_val &= ~IXGBE_RXDCTL_ENABLE; IXGBE_VFWRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val); } /* Clear packet split and pool config */ IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0); /* flush all queues disables */ IXGBE_WRITE_FLUSH(hw); msec_delay(2); return IXGBE_SUCCESS; } /** * ixgbe_mta_vector - Determines bit-vector in multicast table to set * @hw: pointer to hardware structure * @mc_addr: the multicast address * * Extracts the 12 bits, from a multicast address, to determine which * bit-vector to set in the multicast table. The hardware uses 12 bits, from * incoming rx multicast addresses, to determine the bit-vector to check in * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set * by the MO field of the MCSTCTRL. The MO field is set during initialization * to mc_filter_type. **/ static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) { u32 vector = 0; switch (hw->mac.mc_filter_type) { case 0: /* use bits [47:36] of the address */ vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); break; case 1: /* use bits [46:35] of the address */ vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); break; case 2: /* use bits [45:34] of the address */ vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); break; case 3: /* use bits [43:32] of the address */ vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); break; default: /* Invalid mc_filter_type */ DEBUGOUT("MC filter type param set incorrectly\n"); ASSERT(0); break; } /* vector can only be 12-bits or boundary will be exceeded */ vector &= 0xFFF; return vector; } static s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg, u32 *retmsg, u16 size) { struct ixgbe_mbx_info *mbx = &hw->mbx; s32 retval = mbx->ops.write_posted(hw, msg, size, 0); if (retval) return retval; return mbx->ops.read_posted(hw, retmsg, size, 0); } /** * ixgbe_set_rar_vf - set device MAC address * @hw: pointer to hardware structure * @index: Receive address register to write * @addr: Address to put into receive address register * @vmdq: VMDq "set" or "pool" index * @enable_addr: set flag that address is active **/ s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, u32 enable_addr) { u32 msgbuf[3]; u8 *msg_addr = (u8 *)(&msgbuf[1]); s32 ret_val; UNREFERENCED_3PARAMETER(vmdq, enable_addr, index); memset(msgbuf, 0, 12); msgbuf[0] = IXGBE_VF_SET_MAC_ADDR; memcpy(msg_addr, addr, 6); ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 3); msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; /* if nacked the address was rejected, use "perm_addr" */ if (!ret_val && (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) { ixgbe_get_mac_addr_vf(hw, hw->mac.addr); return IXGBE_ERR_MBX; } return ret_val; } /** * ixgbe_update_mc_addr_list_vf - Update Multicast addresses * @hw: pointer to the HW structure * @mc_addr_list: array of multicast addresses to program * @mc_addr_count: number of multicast addresses to program * @next: caller supplied function to return next address in list * @clear: unused * * Updates the Multicast Table Array. **/ s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list, u32 mc_addr_count, ixgbe_mc_addr_itr next, bool clear) { struct ixgbe_mbx_info *mbx = &hw->mbx; u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; u16 *vector_list = (u16 *)&msgbuf[1]; u32 vector; u32 cnt, i; u32 vmdq; UNREFERENCED_1PARAMETER(clear); DEBUGFUNC("ixgbe_update_mc_addr_list_vf"); /* Each entry in the list uses 1 16 bit word. We have 30 * 16 bit words available in our HW msg buffer (minus 1 for the * msg type). That's 30 hash values if we pack 'em right. If * there are more than 30 MC addresses to add then punt the * extras for now and then add code to handle more than 30 later. * It would be unusual for a server to request that many multi-cast * addresses except for in large enterprise network environments. */ DEBUGOUT1("MC Addr Count = %d\n", mc_addr_count); cnt = (mc_addr_count > 30) ? 30 : mc_addr_count; msgbuf[0] = IXGBE_VF_SET_MULTICAST; msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT; for (i = 0; i < cnt; i++) { vector = ixgbe_mta_vector(hw, next(hw, &mc_addr_list, &vmdq)); DEBUGOUT1("Hash value = 0x%03X\n", vector); vector_list[i] = (u16)vector; } return mbx->ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE, 0); } /** * ixgbevf_update_xcast_mode - Update Multicast mode * @hw: pointer to the HW structure * @xcast_mode: new multicast mode * * Updates the Multicast Mode of VF. **/ s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) { u32 msgbuf[2]; s32 err; switch (hw->api_version) { case ixgbe_mbox_api_12: /* New modes were introduced in 1.3 version */ if (xcast_mode > IXGBEVF_XCAST_MODE_ALLMULTI) return IXGBE_ERR_FEATURE_NOT_SUPPORTED; /* Fall through */ case ixgbe_mbox_api_13: break; default: return IXGBE_ERR_FEATURE_NOT_SUPPORTED; } msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE; msgbuf[1] = xcast_mode; err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2); if (err) return err; msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK)) return IXGBE_ERR_FEATURE_NOT_SUPPORTED; return IXGBE_SUCCESS; } /** * ixgbe_set_vfta_vf - Set/Unset vlan filter table address * @hw: pointer to the HW structure * @vlan: 12 bit VLAN ID * @vind: unused by VF drivers - * @vlan_on: if TRUE then set bit, else clear bit + * @vlan_on: if true then set bit, else clear bit * @vlvf_bypass: boolean flag indicating updating default pool is okay * * Turn on/off specified VLAN in the VLAN filter table. **/ s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on, bool vlvf_bypass) { u32 msgbuf[2]; s32 ret_val; UNREFERENCED_2PARAMETER(vind, vlvf_bypass); msgbuf[0] = IXGBE_VF_SET_VLAN; msgbuf[1] = vlan; - /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */ + /* Setting the 8 bit field MSG INFO to true indicates "add" */ msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT; ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2); if (!ret_val && (msgbuf[0] & IXGBE_VT_MSGTYPE_ACK)) return IXGBE_SUCCESS; return ret_val | (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK); } /** * ixgbe_get_num_of_tx_queues_vf - Get number of TX queues * @hw: pointer to hardware structure * * Returns the number of transmit queues for the given adapter. **/ u32 ixgbe_get_num_of_tx_queues_vf(struct ixgbe_hw *hw) { UNREFERENCED_1PARAMETER(hw); return IXGBE_VF_MAX_TX_QUEUES; } /** * ixgbe_get_num_of_rx_queues_vf - Get number of RX queues * @hw: pointer to hardware structure * * Returns the number of receive queues for the given adapter. **/ u32 ixgbe_get_num_of_rx_queues_vf(struct ixgbe_hw *hw) { UNREFERENCED_1PARAMETER(hw); return IXGBE_VF_MAX_RX_QUEUES; } /** * ixgbe_get_mac_addr_vf - Read device MAC address * @hw: pointer to the HW structure * @mac_addr: the MAC address **/ s32 ixgbe_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr) { int i; for (i = 0; i < IXGBE_ETH_LENGTH_OF_ADDRESS; i++) mac_addr[i] = hw->mac.perm_addr[i]; return IXGBE_SUCCESS; } s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) { u32 msgbuf[3], msgbuf_chk; u8 *msg_addr = (u8 *)(&msgbuf[1]); s32 ret_val; memset(msgbuf, 0, sizeof(msgbuf)); /* * If index is one then this is the start of a new list and needs * indication to the PF so it can do it's own list management. * If it is zero then that tells the PF to just clear all of * this VF's macvlans and there is no new list. */ msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT; msgbuf[0] |= IXGBE_VF_SET_MACVLAN; msgbuf_chk = msgbuf[0]; if (addr) memcpy(msg_addr, addr, 6); ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 3); if (!ret_val) { msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_NACK)) return IXGBE_ERR_OUT_OF_MEM; } return ret_val; } /** * ixgbe_setup_mac_link_vf - Setup MAC link settings * @hw: pointer to hardware structure * @speed: new link speed - * @autoneg_wait_to_complete: TRUE when waiting for completion is needed + * @autoneg_wait_to_complete: true when waiting for completion is needed * * Set the link speed in the AUTOC register and restarts link. **/ s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete) { UNREFERENCED_3PARAMETER(hw, speed, autoneg_wait_to_complete); return IXGBE_SUCCESS; } /** * ixgbe_check_mac_link_vf - Get link/speed status * @hw: pointer to hardware structure * @speed: pointer to link speed - * @link_up: TRUE is link is up, FALSE otherwise - * @autoneg_wait_to_complete: TRUE when waiting for completion is needed + * @link_up: true is link is up, false otherwise + * @autoneg_wait_to_complete: true when waiting for completion is needed * * Reads the links register to determine if link is up and the current speed **/ s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *link_up, bool autoneg_wait_to_complete) { struct ixgbe_mbx_info *mbx = &hw->mbx; struct ixgbe_mac_info *mac = &hw->mac; s32 ret_val = IXGBE_SUCCESS; u32 links_reg; u32 in_msg = 0; UNREFERENCED_1PARAMETER(autoneg_wait_to_complete); /* If we were hit with a reset drop the link */ if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout) - mac->get_link_status = TRUE; + mac->get_link_status = true; if (!mac->get_link_status) goto out; /* if link status is down no point in checking to see if pf is up */ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); if (!(links_reg & IXGBE_LINKS_UP)) goto out; /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs * before the link status is correct */ if (mac->type == ixgbe_mac_82599_vf) { int i; for (i = 0; i < 5; i++) { usec_delay(100); links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); if (!(links_reg & IXGBE_LINKS_UP)) goto out; } } switch (links_reg & IXGBE_LINKS_SPEED_82599) { case IXGBE_LINKS_SPEED_10G_82599: *speed = IXGBE_LINK_SPEED_10GB_FULL; if (hw->mac.type >= ixgbe_mac_X550) { if (links_reg & IXGBE_LINKS_SPEED_NON_STD) *speed = IXGBE_LINK_SPEED_2_5GB_FULL; } break; case IXGBE_LINKS_SPEED_1G_82599: *speed = IXGBE_LINK_SPEED_1GB_FULL; break; case IXGBE_LINKS_SPEED_100_82599: *speed = IXGBE_LINK_SPEED_100_FULL; if (hw->mac.type == ixgbe_mac_X550) { if (links_reg & IXGBE_LINKS_SPEED_NON_STD) *speed = IXGBE_LINK_SPEED_5GB_FULL; } break; case IXGBE_LINKS_SPEED_10_X550EM_A: *speed = IXGBE_LINK_SPEED_UNKNOWN; /* Since Reserved in older MAC's */ if (hw->mac.type >= ixgbe_mac_X550) *speed = IXGBE_LINK_SPEED_10_FULL; break; default: *speed = IXGBE_LINK_SPEED_UNKNOWN; } /* if the read failed it could just be a mailbox collision, best wait * until we are called again and don't report an error */ if (mbx->ops.read(hw, &in_msg, 1, 0)) goto out; if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) { /* msg is not CTS and is NACK we must have lost CTS status */ if (in_msg & IXGBE_VT_MSGTYPE_NACK) ret_val = -1; goto out; } /* the pf is talking, if we timed out in the past we reinit */ if (!mbx->timeout) { ret_val = -1; goto out; } /* if we passed all the tests above then the link is up and we no * longer need to check for link */ - mac->get_link_status = FALSE; + mac->get_link_status = false; out: *link_up = !mac->get_link_status; return ret_val; } /** * ixgbevf_rlpml_set_vf - Set the maximum receive packet length * @hw: pointer to the HW structure * @max_size: value to assign to max frame size **/ s32 ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size) { u32 msgbuf[2]; s32 retval; msgbuf[0] = IXGBE_VF_SET_LPE; msgbuf[1] = max_size; retval = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2); if (retval) return retval; if ((msgbuf[0] & IXGBE_VF_SET_LPE) && (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK)) return IXGBE_ERR_MBX; return 0; } /** * ixgbevf_negotiate_api_version - Negotiate supported API version * @hw: pointer to the HW structure * @api: integer containing requested API version **/ int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api) { int err; u32 msg[3]; /* Negotiate the mailbox API version */ msg[0] = IXGBE_VF_API_NEGOTIATE; msg[1] = api; msg[2] = 0; err = ixgbevf_write_msg_read_ack(hw, msg, msg, 3); if (!err) { msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; /* Store value and return 0 on success */ if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) { hw->api_version = api; return 0; } err = IXGBE_ERR_INVALID_ARGUMENT; } return err; } int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, unsigned int *default_tc) { int err; u32 msg[5]; /* do nothing if API doesn't support ixgbevf_get_queues */ switch (hw->api_version) { case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: break; default: return 0; } /* Fetch queue configuration from the PF */ msg[0] = IXGBE_VF_GET_QUEUES; msg[1] = msg[2] = msg[3] = msg[4] = 0; err = ixgbevf_write_msg_read_ack(hw, msg, msg, 5); if (!err) { msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; /* * if we we didn't get an ACK there must have been * some sort of mailbox error so we should treat it * as such */ if (msg[0] != (IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK)) return IXGBE_ERR_MBX; /* record and validate values from message */ hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES]; if (hw->mac.max_tx_queues == 0 || hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES) hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES; hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES]; if (hw->mac.max_rx_queues == 0 || hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES) hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES; *num_tcs = msg[IXGBE_VF_TRANS_VLAN]; /* in case of unknown state assume we cannot tag frames */ if (*num_tcs > hw->mac.max_rx_queues) *num_tcs = 1; *default_tc = msg[IXGBE_VF_DEF_QUEUE]; /* default to queue 0 on out-of-bounds queue number */ if (*default_tc >= hw->mac.max_tx_queues) *default_tc = 0; } return err; } diff --git a/sys/dev/ixgbe/ixgbe_x540.c b/sys/dev/ixgbe/ixgbe_x540.c index a7f470d7942a..db2d90e1230e 100644 --- a/sys/dev/ixgbe/ixgbe_x540.c +++ b/sys/dev/ixgbe/ixgbe_x540.c @@ -1,1069 +1,1069 @@ /****************************************************************************** SPDX-License-Identifier: BSD-3-Clause Copyright (c) 2001-2017, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "ixgbe_x540.h" #include "ixgbe_type.h" #include "ixgbe_api.h" #include "ixgbe_common.h" #include "ixgbe_phy.h" #define IXGBE_X540_MAX_TX_QUEUES 128 #define IXGBE_X540_MAX_RX_QUEUES 128 #define IXGBE_X540_RAR_ENTRIES 128 #define IXGBE_X540_MC_TBL_SIZE 128 #define IXGBE_X540_VFT_TBL_SIZE 128 #define IXGBE_X540_RX_PB_SIZE 384 static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw); static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw); static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw); /** * ixgbe_init_ops_X540 - Inits func ptrs and MAC type * @hw: pointer to hardware structure * * Initialize the function pointers and assign the MAC type for X540. * Does not touch the hardware. **/ s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; struct ixgbe_phy_info *phy = &hw->phy; struct ixgbe_eeprom_info *eeprom = &hw->eeprom; s32 ret_val; DEBUGFUNC("ixgbe_init_ops_X540"); ret_val = ixgbe_init_phy_ops_generic(hw); ret_val = ixgbe_init_ops_generic(hw); /* EEPROM */ eeprom->ops.init_params = ixgbe_init_eeprom_params_X540; eeprom->ops.read = ixgbe_read_eerd_X540; eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_X540; eeprom->ops.write = ixgbe_write_eewr_X540; eeprom->ops.write_buffer = ixgbe_write_eewr_buffer_X540; eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X540; eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X540; eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X540; /* PHY */ phy->ops.init = ixgbe_init_phy_ops_generic; phy->ops.reset = NULL; phy->ops.set_phy_power = ixgbe_set_copper_phy_power; /* MAC */ mac->ops.reset_hw = ixgbe_reset_hw_X540; mac->ops.enable_relaxed_ordering = ixgbe_enable_relaxed_ordering_gen2; mac->ops.get_media_type = ixgbe_get_media_type_X540; mac->ops.get_supported_physical_layer = ixgbe_get_supported_physical_layer_X540; mac->ops.read_analog_reg8 = NULL; mac->ops.write_analog_reg8 = NULL; mac->ops.start_hw = ixgbe_start_hw_X540; mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic; mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic; mac->ops.get_device_caps = ixgbe_get_device_caps_generic; mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic; mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic; mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X540; mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X540; mac->ops.init_swfw_sync = ixgbe_init_swfw_sync_X540; mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic; mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic; /* RAR, Multicast, VLAN */ mac->ops.set_vmdq = ixgbe_set_vmdq_generic; mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic; mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic; mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic; mac->rar_highwater = 1; mac->ops.set_vfta = ixgbe_set_vfta_generic; mac->ops.set_vlvf = ixgbe_set_vlvf_generic; mac->ops.clear_vfta = ixgbe_clear_vfta_generic; mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic; mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing; mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing; /* Link */ mac->ops.get_link_capabilities = ixgbe_get_copper_link_capabilities_generic; mac->ops.setup_link = ixgbe_setup_mac_link_X540; mac->ops.setup_rxpba = ixgbe_set_rxpba_generic; mac->ops.check_link = ixgbe_check_mac_link_generic; mac->ops.bypass_rw = ixgbe_bypass_rw_generic; mac->ops.bypass_valid_rd = ixgbe_bypass_valid_rd_generic; mac->ops.bypass_set = ixgbe_bypass_set_generic; mac->ops.bypass_rd_eep = ixgbe_bypass_rd_eep_generic; mac->mcft_size = IXGBE_X540_MC_TBL_SIZE; mac->vft_size = IXGBE_X540_VFT_TBL_SIZE; mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES; mac->rx_pb_size = IXGBE_X540_RX_PB_SIZE; mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES; mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES; mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); /* * FWSM register * ARC supported; valid only if manageability features are * enabled. */ mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)) & IXGBE_FWSM_MODE_MASK); hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf; /* LEDs */ mac->ops.blink_led_start = ixgbe_blink_led_start_X540; mac->ops.blink_led_stop = ixgbe_blink_led_stop_X540; /* Manageability interface */ mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic; mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic; return ret_val; } /** * ixgbe_get_link_capabilities_X540 - Determines link capabilities * @hw: pointer to hardware structure * @speed: pointer to link speed - * @autoneg: TRUE when autoneg or autotry is enabled + * @autoneg: true when autoneg or autotry is enabled * * Determines the link capabilities by reading the AUTOC register. **/ s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *autoneg) { ixgbe_get_copper_link_capabilities_generic(hw, speed, autoneg); return IXGBE_SUCCESS; } /** * ixgbe_get_media_type_X540 - Get media type * @hw: pointer to hardware structure * * Returns the media type (fiber, copper, backplane) **/ enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw) { UNREFERENCED_1PARAMETER(hw); return ixgbe_media_type_copper; } /** * ixgbe_setup_mac_link_X540 - Sets the auto advertised capabilities * @hw: pointer to hardware structure * @speed: new link speed - * @autoneg_wait_to_complete: TRUE when waiting for completion is needed + * @autoneg_wait_to_complete: true when waiting for completion is needed **/ s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete) { DEBUGFUNC("ixgbe_setup_mac_link_X540"); return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete); } /** * ixgbe_reset_hw_X540 - Perform hardware reset * @hw: pointer to hardware structure * * Resets the hardware by resetting the transmit and receive units, masks * and clears all interrupts, and perform a reset. **/ s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) { s32 status; u32 ctrl, i; u32 swfw_mask = hw->phy.phy_semaphore_mask; DEBUGFUNC("ixgbe_reset_hw_X540"); /* Call adapter stop to disable tx/rx and clear interrupts */ status = hw->mac.ops.stop_adapter(hw); if (status != IXGBE_SUCCESS) goto reset_hw_out; /* flush pending Tx transactions */ ixgbe_clear_tx_pending(hw); mac_reset_top: status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); if (status != IXGBE_SUCCESS) { ERROR_REPORT2(IXGBE_ERROR_CAUTION, "semaphore failed with %d", status); return IXGBE_ERR_SWFW_SYNC; } ctrl = IXGBE_CTRL_RST; ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); IXGBE_WRITE_FLUSH(hw); hw->mac.ops.release_swfw_sync(hw, swfw_mask); /* Poll for reset bit to self-clear indicating reset is complete */ for (i = 0; i < 10; i++) { usec_delay(1); ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); if (!(ctrl & IXGBE_CTRL_RST_MASK)) break; } if (ctrl & IXGBE_CTRL_RST_MASK) { status = IXGBE_ERR_RESET_FAILED; ERROR_REPORT1(IXGBE_ERROR_POLLING, "Reset polling failed to complete.\n"); } msec_delay(100); /* * Double resets are required for recovery from certain error * conditions. Between resets, it is necessary to stall to allow time * for any pending HW events to complete. */ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; goto mac_reset_top; } /* Set the Rx packet buffer size. */ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT); /* Store the permanent mac address */ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); /* * Store MAC address from RAR0, clear receive address registers, and * clear the multicast table. Also reset num_rar_entries to 128, * since we modify this value when programming the SAN MAC address. */ hw->mac.num_rar_entries = 128; hw->mac.ops.init_rx_addrs(hw); /* Store the permanent SAN mac address */ hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); /* Add the SAN MAC address to the RAR only if it's a valid address */ if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { /* Save the SAN MAC RAR index */ hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index, hw->mac.san_addr, 0, IXGBE_RAH_AV); /* clear VMDq pool/queue selection for this RAR */ hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index, IXGBE_CLEAR_VMDQ_ALL); /* Reserve the last RAR for the SAN MAC address */ hw->mac.num_rar_entries--; } /* Store the alternative WWNN/WWPN prefix */ hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, &hw->mac.wwpn_prefix); reset_hw_out: return status; } /** * ixgbe_start_hw_X540 - Prepare hardware for Tx/Rx * @hw: pointer to hardware structure * * Starts the hardware using the generic start_hw function * and the generation start_hw function. * Then performs revision-specific operations, if any. **/ s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw) { s32 ret_val = IXGBE_SUCCESS; DEBUGFUNC("ixgbe_start_hw_X540"); ret_val = ixgbe_start_hw_generic(hw); if (ret_val != IXGBE_SUCCESS) goto out; ret_val = ixgbe_start_hw_gen2(hw); out: return ret_val; } /** * ixgbe_get_supported_physical_layer_X540 - Returns physical layer type * @hw: pointer to hardware structure * * Determines physical layer capabilities of the current configuration. **/ u64 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw) { u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; u16 ext_ability = 0; DEBUGFUNC("ixgbe_get_supported_physical_layer_X540"); hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; return physical_layer; } /** * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params * @hw: pointer to hardware structure * * Initializes the EEPROM parameters ixgbe_eeprom_info within the * ixgbe_hw struct in order to set up EEPROM access. **/ s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) { struct ixgbe_eeprom_info *eeprom = &hw->eeprom; u32 eec; u16 eeprom_size; DEBUGFUNC("ixgbe_init_eeprom_params_X540"); if (eeprom->type == ixgbe_eeprom_uninitialized) { eeprom->semaphore_delay = 10; eeprom->type = ixgbe_flash; eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> IXGBE_EEC_SIZE_SHIFT); eeprom->word_size = 1 << (eeprom_size + IXGBE_EEPROM_WORD_SIZE_SHIFT); DEBUGOUT2("Eeprom params: type = %d, size = %d\n", eeprom->type, eeprom->word_size); } return IXGBE_SUCCESS; } /** * ixgbe_read_eerd_X540- Read EEPROM word using EERD * @hw: pointer to hardware structure * @offset: offset of word in the EEPROM to read * @data: word read from the EEPROM * * Reads a 16 bit word from the EEPROM using the EERD register. **/ s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data) { s32 status = IXGBE_SUCCESS; DEBUGFUNC("ixgbe_read_eerd_X540"); if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == IXGBE_SUCCESS) { status = ixgbe_read_eerd_generic(hw, offset, data); hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); } else { status = IXGBE_ERR_SWFW_SYNC; } return status; } /** * ixgbe_read_eerd_buffer_X540- Read EEPROM word(s) using EERD * @hw: pointer to hardware structure * @offset: offset of word in the EEPROM to read * @words: number of words * @data: word(s) read from the EEPROM * * Reads a 16 bit word(s) from the EEPROM using the EERD register. **/ s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { s32 status = IXGBE_SUCCESS; DEBUGFUNC("ixgbe_read_eerd_buffer_X540"); if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == IXGBE_SUCCESS) { status = ixgbe_read_eerd_buffer_generic(hw, offset, words, data); hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); } else { status = IXGBE_ERR_SWFW_SYNC; } return status; } /** * ixgbe_write_eewr_X540 - Write EEPROM word using EEWR * @hw: pointer to hardware structure * @offset: offset of word in the EEPROM to write * @data: word write to the EEPROM * * Write a 16 bit word to the EEPROM using the EEWR register. **/ s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data) { s32 status = IXGBE_SUCCESS; DEBUGFUNC("ixgbe_write_eewr_X540"); if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == IXGBE_SUCCESS) { status = ixgbe_write_eewr_generic(hw, offset, data); hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); } else { status = IXGBE_ERR_SWFW_SYNC; } return status; } /** * ixgbe_write_eewr_buffer_X540 - Write EEPROM word(s) using EEWR * @hw: pointer to hardware structure * @offset: offset of word in the EEPROM to write * @words: number of words * @data: word(s) write to the EEPROM * * Write a 16 bit word(s) to the EEPROM using the EEWR register. **/ s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { s32 status = IXGBE_SUCCESS; DEBUGFUNC("ixgbe_write_eewr_buffer_X540"); if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == IXGBE_SUCCESS) { status = ixgbe_write_eewr_buffer_generic(hw, offset, words, data); hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); } else { status = IXGBE_ERR_SWFW_SYNC; } return status; } /** * ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum * * This function does not use synchronization for EERD and EEWR. It can * be used internally by function which utilize ixgbe_acquire_swfw_sync_X540. * * @hw: pointer to hardware structure * * Returns a negative error code on error, or the 16-bit checksum **/ s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) { u16 i, j; u16 checksum = 0; u16 length = 0; u16 pointer = 0; u16 word = 0; u16 ptr_start = IXGBE_PCIE_ANALOG_PTR; /* Do not use hw->eeprom.ops.read because we do not want to take * the synchronization semaphores here. Instead use * ixgbe_read_eerd_generic */ DEBUGFUNC("ixgbe_calc_eeprom_checksum_X540"); /* Include 0x0 up to IXGBE_EEPROM_CHECKSUM; do not include the * checksum itself */ for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { if (ixgbe_read_eerd_generic(hw, i, &word)) { DEBUGOUT("EEPROM read failed\n"); return IXGBE_ERR_EEPROM; } checksum += word; } /* Include all data from pointers 0x3, 0x6-0xE. This excludes the * FW, PHY module, and PCIe Expansion/Option ROM pointers. */ for (i = ptr_start; i < IXGBE_FW_PTR; i++) { if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR) continue; if (ixgbe_read_eerd_generic(hw, i, &pointer)) { DEBUGOUT("EEPROM read failed\n"); return IXGBE_ERR_EEPROM; } /* Skip pointer section if the pointer is invalid. */ if (pointer == 0xFFFF || pointer == 0 || pointer >= hw->eeprom.word_size) continue; if (ixgbe_read_eerd_generic(hw, pointer, &length)) { DEBUGOUT("EEPROM read failed\n"); return IXGBE_ERR_EEPROM; } /* Skip pointer section if length is invalid. */ if (length == 0xFFFF || length == 0 || (pointer + length) >= hw->eeprom.word_size) continue; for (j = pointer + 1; j <= pointer + length; j++) { if (ixgbe_read_eerd_generic(hw, j, &word)) { DEBUGOUT("EEPROM read failed\n"); return IXGBE_ERR_EEPROM; } checksum += word; } } checksum = (u16)IXGBE_EEPROM_SUM - checksum; return (s32)checksum; } /** * ixgbe_validate_eeprom_checksum_X540 - Validate EEPROM checksum * @hw: pointer to hardware structure * @checksum_val: calculated checksum * * Performs checksum calculation and validates the EEPROM checksum. If the * caller does not need checksum_val, the value can be NULL. **/ s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, u16 *checksum_val) { s32 status; u16 checksum; u16 read_checksum = 0; DEBUGFUNC("ixgbe_validate_eeprom_checksum_X540"); /* Read the first word from the EEPROM. If this times out or fails, do * not continue or we could be in for a very long wait while every * EEPROM read fails */ status = hw->eeprom.ops.read(hw, 0, &checksum); if (status) { DEBUGOUT("EEPROM read failed\n"); return status; } if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) return IXGBE_ERR_SWFW_SYNC; status = hw->eeprom.ops.calc_checksum(hw); if (status < 0) goto out; checksum = (u16)(status & 0xffff); /* Do not use hw->eeprom.ops.read because we do not want to take * the synchronization semaphores twice here. */ status = ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); if (status) goto out; /* Verify read checksum from EEPROM is the same as * calculated checksum */ if (read_checksum != checksum) { ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE, "Invalid EEPROM checksum"); status = IXGBE_ERR_EEPROM_CHECKSUM; } /* If the user cares, return the calculated checksum */ if (checksum_val) *checksum_val = checksum; out: hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); return status; } /** * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash * @hw: pointer to hardware structure * * After writing EEPROM to shadow RAM using EEWR register, software calculates * checksum and updates the EEPROM and instructs the hardware to update * the flash. **/ s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw) { s32 status; u16 checksum; DEBUGFUNC("ixgbe_update_eeprom_checksum_X540"); /* Read the first word from the EEPROM. If this times out or fails, do * not continue or we could be in for a very long wait while every * EEPROM read fails */ status = hw->eeprom.ops.read(hw, 0, &checksum); if (status) { DEBUGOUT("EEPROM read failed\n"); return status; } if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) return IXGBE_ERR_SWFW_SYNC; status = hw->eeprom.ops.calc_checksum(hw); if (status < 0) goto out; checksum = (u16)(status & 0xffff); /* Do not use hw->eeprom.ops.write because we do not want to * take the synchronization semaphores twice here. */ status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM, checksum); if (status) goto out; status = ixgbe_update_flash_X540(hw); out: hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); return status; } /** * ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device * @hw: pointer to hardware structure * * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy * EEPROM from shadow RAM to the flash device. **/ s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw) { u32 flup; s32 status; DEBUGFUNC("ixgbe_update_flash_X540"); status = ixgbe_poll_flash_update_done_X540(hw); if (status == IXGBE_ERR_EEPROM) { DEBUGOUT("Flash update time out\n"); goto out; } flup = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)) | IXGBE_EEC_FLUP; IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), flup); status = ixgbe_poll_flash_update_done_X540(hw); if (status == IXGBE_SUCCESS) DEBUGOUT("Flash update complete\n"); else DEBUGOUT("Flash update time out\n"); if (hw->mac.type == ixgbe_mac_X540 && hw->revision_id == 0) { flup = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); if (flup & IXGBE_EEC_SEC1VAL) { flup |= IXGBE_EEC_FLUP; IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), flup); } status = ixgbe_poll_flash_update_done_X540(hw); if (status == IXGBE_SUCCESS) DEBUGOUT("Flash update complete\n"); else DEBUGOUT("Flash update time out\n"); } out: return status; } /** * ixgbe_poll_flash_update_done_X540 - Poll flash update status * @hw: pointer to hardware structure * * Polls the FLUDONE (bit 26) of the EEC Register to determine when the * flash update is done. **/ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw) { u32 i; u32 reg; s32 status = IXGBE_ERR_EEPROM; DEBUGFUNC("ixgbe_poll_flash_update_done_X540"); for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) { reg = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); if (reg & IXGBE_EEC_FLUDONE) { status = IXGBE_SUCCESS; break; } msec_delay(5); } if (i == IXGBE_FLUDONE_ATTEMPTS) ERROR_REPORT1(IXGBE_ERROR_POLLING, "Flash update status polling timed out"); return status; } /** * ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore * @hw: pointer to hardware structure * @mask: Mask to specify which semaphore to acquire * * Acquires the SWFW semaphore thought the SW_FW_SYNC register for * the specified function (CSR, PHY0, PHY1, NVM, Flash) **/ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) { u32 swmask = mask & IXGBE_GSSR_NVM_PHY_MASK; u32 fwmask = swmask << 5; u32 swi2c_mask = mask & IXGBE_GSSR_I2C_MASK; u32 timeout = 200; u32 hwmask = 0; u32 swfw_sync; u32 i; DEBUGFUNC("ixgbe_acquire_swfw_sync_X540"); if (swmask & IXGBE_GSSR_EEP_SM) hwmask |= IXGBE_GSSR_FLASH_SM; /* SW only mask doesn't have FW bit pair */ if (mask & IXGBE_GSSR_SW_MNG_SM) swmask |= IXGBE_GSSR_SW_MNG_SM; swmask |= swi2c_mask; fwmask |= swi2c_mask << 2; if (hw->mac.type >= ixgbe_mac_X550) timeout = 1000; for (i = 0; i < timeout; i++) { /* SW NVM semaphore bit is used for access to all * SW_FW_SYNC bits (not just NVM) */ if (ixgbe_get_swfw_sync_semaphore(hw)) { DEBUGOUT("Failed to get NVM access and register semaphore, returning IXGBE_ERR_SWFW_SYNC\n"); return IXGBE_ERR_SWFW_SYNC; } swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw)); if (!(swfw_sync & (fwmask | swmask | hwmask))) { swfw_sync |= swmask; IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swfw_sync); ixgbe_release_swfw_sync_semaphore(hw); return IXGBE_SUCCESS; } /* Firmware currently using resource (fwmask), hardware * currently using resource (hwmask), or other software * thread currently using resource (swmask) */ ixgbe_release_swfw_sync_semaphore(hw); msec_delay(5); } /* If the resource is not released by the FW/HW the SW can assume that * the FW/HW malfunctions. In that case the SW should set the SW bit(s) * of the requested resource(s) while ignoring the corresponding FW/HW * bits in the SW_FW_SYNC register. */ if (ixgbe_get_swfw_sync_semaphore(hw)) { DEBUGOUT("Failed to get NVM sempahore and register semaphore while forcefully ignoring FW sempahore bit(s) and setting SW semaphore bit(s), returning IXGBE_ERR_SWFW_SYNC\n"); return IXGBE_ERR_SWFW_SYNC; } swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw)); if (swfw_sync & (fwmask | hwmask)) { swfw_sync |= swmask; IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swfw_sync); ixgbe_release_swfw_sync_semaphore(hw); msec_delay(5); return IXGBE_SUCCESS; } /* If the resource is not released by other SW the SW can assume that * the other SW malfunctions. In that case the SW should clear all SW * flags that it does not own and then repeat the whole process once * again. */ if (swfw_sync & swmask) { u32 rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM | IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM; if (swi2c_mask) rmask |= IXGBE_GSSR_I2C_MASK; ixgbe_release_swfw_sync_X540(hw, rmask); ixgbe_release_swfw_sync_semaphore(hw); DEBUGOUT("Resource not released by other SW, returning IXGBE_ERR_SWFW_SYNC\n"); return IXGBE_ERR_SWFW_SYNC; } ixgbe_release_swfw_sync_semaphore(hw); DEBUGOUT("Returning error IXGBE_ERR_SWFW_SYNC\n"); return IXGBE_ERR_SWFW_SYNC; } /** * ixgbe_release_swfw_sync_X540 - Release SWFW semaphore * @hw: pointer to hardware structure * @mask: Mask to specify which semaphore to release * * Releases the SWFW semaphore through the SW_FW_SYNC register * for the specified function (CSR, PHY0, PHY1, EVM, Flash) **/ void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) { u32 swmask = mask & (IXGBE_GSSR_NVM_PHY_MASK | IXGBE_GSSR_SW_MNG_SM); u32 swfw_sync; DEBUGFUNC("ixgbe_release_swfw_sync_X540"); if (mask & IXGBE_GSSR_I2C_MASK) swmask |= mask & IXGBE_GSSR_I2C_MASK; ixgbe_get_swfw_sync_semaphore(hw); swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw)); swfw_sync &= ~swmask; IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swfw_sync); ixgbe_release_swfw_sync_semaphore(hw); msec_delay(2); } /** * ixgbe_get_swfw_sync_semaphore - Get hardware semaphore * @hw: pointer to hardware structure * * Sets the hardware semaphores so SW/FW can gain control of shared resources **/ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw) { s32 status = IXGBE_ERR_EEPROM; u32 timeout = 2000; u32 i; u32 swsm; DEBUGFUNC("ixgbe_get_swfw_sync_semaphore"); /* Get SMBI software semaphore between device drivers first */ for (i = 0; i < timeout; i++) { /* * If the SMBI bit is 0 when we read it, then the bit will be * set and we have the semaphore */ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); if (!(swsm & IXGBE_SWSM_SMBI)) { status = IXGBE_SUCCESS; break; } usec_delay(50); } /* Now get the semaphore between SW/FW through the REGSMP bit */ if (status == IXGBE_SUCCESS) { for (i = 0; i < timeout; i++) { swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw)); if (!(swsm & IXGBE_SWFW_REGSMP)) break; usec_delay(50); } /* * Release semaphores and return error if SW NVM semaphore * was not granted because we don't have access to the EEPROM */ if (i >= timeout) { ERROR_REPORT1(IXGBE_ERROR_POLLING, "REGSMP Software NVM semaphore not granted.\n"); ixgbe_release_swfw_sync_semaphore(hw); status = IXGBE_ERR_EEPROM; } } else { ERROR_REPORT1(IXGBE_ERROR_POLLING, "Software semaphore SMBI between device drivers " "not granted.\n"); } return status; } /** * ixgbe_release_swfw_sync_semaphore - Release hardware semaphore * @hw: pointer to hardware structure * * This function clears hardware semaphore bits. **/ static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw) { u32 swsm; DEBUGFUNC("ixgbe_release_swfw_sync_semaphore"); /* Release both semaphores by writing 0 to the bits REGSMP and SMBI */ swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw)); swsm &= ~IXGBE_SWFW_REGSMP; IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swsm); swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); swsm &= ~IXGBE_SWSM_SMBI; IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm); IXGBE_WRITE_FLUSH(hw); } /** * ixgbe_init_swfw_sync_X540 - Release hardware semaphore * @hw: pointer to hardware structure * * This function reset hardware semaphore bits for a semaphore that may * have be left locked due to a catastrophic failure. **/ void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw) { u32 rmask; /* First try to grab the semaphore but we don't need to bother * looking to see whether we got the lock or not since we do * the same thing regardless of whether we got the lock or not. * We got the lock - we release it. * We timeout trying to get the lock - we force its release. */ ixgbe_get_swfw_sync_semaphore(hw); ixgbe_release_swfw_sync_semaphore(hw); /* Acquire and release all software resources. */ rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM | IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM | IXGBE_GSSR_SW_MNG_SM; rmask |= IXGBE_GSSR_I2C_MASK; ixgbe_acquire_swfw_sync_X540(hw, rmask); ixgbe_release_swfw_sync_X540(hw, rmask); } /** * ixgbe_blink_led_start_X540 - Blink LED based on index. * @hw: pointer to hardware structure * @index: led number to blink * * Devices that implement the version 2 interface: * X540 **/ s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) { u32 macc_reg; u32 ledctl_reg; ixgbe_link_speed speed; bool link_up; DEBUGFUNC("ixgbe_blink_led_start_X540"); if (index > 3) return IXGBE_ERR_PARAM; /* * Link should be up in order for the blink bit in the LED control * register to work. Force link and speed in the MAC if link is down. * This will be reversed when we stop the blinking. */ - hw->mac.ops.check_link(hw, &speed, &link_up, FALSE); - if (link_up == FALSE) { + hw->mac.ops.check_link(hw, &speed, &link_up, false); + if (link_up == false) { macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC); macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS; IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg); } /* Set the LED to LINK_UP + BLINK. */ ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); ledctl_reg &= ~IXGBE_LED_MODE_MASK(index); ledctl_reg |= IXGBE_LED_BLINK(index); IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg); IXGBE_WRITE_FLUSH(hw); return IXGBE_SUCCESS; } /** * ixgbe_blink_led_stop_X540 - Stop blinking LED based on index. * @hw: pointer to hardware structure * @index: led number to stop blinking * * Devices that implement the version 2 interface: * X540 **/ s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) { u32 macc_reg; u32 ledctl_reg; if (index > 3) return IXGBE_ERR_PARAM; DEBUGFUNC("ixgbe_blink_led_stop_X540"); /* Restore the LED to its default value. */ ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); ledctl_reg &= ~IXGBE_LED_MODE_MASK(index); ledctl_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); ledctl_reg &= ~IXGBE_LED_BLINK(index); IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg); /* Unforce link and speed in the MAC. */ macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC); macc_reg &= ~(IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS); IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg); IXGBE_WRITE_FLUSH(hw); return IXGBE_SUCCESS; } diff --git a/sys/dev/ixgbe/ixgbe_x550.c b/sys/dev/ixgbe/ixgbe_x550.c index aa91c4da7019..bcfbe14b30d2 100644 --- a/sys/dev/ixgbe/ixgbe_x550.c +++ b/sys/dev/ixgbe/ixgbe_x550.c @@ -1,4694 +1,4694 @@ /****************************************************************************** Copyright (c) 2001-2017, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "ixgbe_x550.h" #include "ixgbe_x540.h" #include "ixgbe_type.h" #include "ixgbe_api.h" #include "ixgbe_common.h" #include "ixgbe_phy.h" static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed); static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *, u32 mask); static void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *, u32 mask); static s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw); /** * ixgbe_init_ops_X550 - Inits func ptrs and MAC type * @hw: pointer to hardware structure * * Initialize the function pointers and assign the MAC type for X550. * Does not touch the hardware. **/ s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; struct ixgbe_eeprom_info *eeprom = &hw->eeprom; s32 ret_val; DEBUGFUNC("ixgbe_init_ops_X550"); ret_val = ixgbe_init_ops_X540(hw); mac->ops.dmac_config = ixgbe_dmac_config_X550; mac->ops.dmac_config_tcs = ixgbe_dmac_config_tcs_X550; mac->ops.dmac_update_tcs = ixgbe_dmac_update_tcs_X550; mac->ops.setup_eee = NULL; mac->ops.set_source_address_pruning = ixgbe_set_source_address_pruning_X550; mac->ops.set_ethertype_anti_spoofing = ixgbe_set_ethertype_anti_spoofing_X550; mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic; eeprom->ops.init_params = ixgbe_init_eeprom_params_X550; eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550; eeprom->ops.read = ixgbe_read_ee_hostif_X550; eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550; eeprom->ops.write = ixgbe_write_ee_hostif_X550; eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550; eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550; eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550; mac->ops.disable_mdd = ixgbe_disable_mdd_X550; mac->ops.enable_mdd = ixgbe_enable_mdd_X550; mac->ops.mdd_event = ixgbe_mdd_event_X550; mac->ops.restore_mdd_vf = ixgbe_restore_mdd_vf_X550; mac->ops.fw_recovery_mode = ixgbe_fw_recovery_mode_X550; mac->ops.disable_rx = ixgbe_disable_rx_x550; /* Manageability interface */ mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_x550; switch (hw->device_id) { case IXGBE_DEV_ID_X550EM_X_1G_T: hw->mac.ops.led_on = NULL; hw->mac.ops.led_off = NULL; break; case IXGBE_DEV_ID_X550EM_X_10G_T: case IXGBE_DEV_ID_X550EM_A_10G_T: hw->mac.ops.led_on = ixgbe_led_on_t_X550em; hw->mac.ops.led_off = ixgbe_led_off_t_X550em; break; default: break; } return ret_val; } /** * ixgbe_read_cs4227 - Read CS4227 register * @hw: pointer to hardware structure * @reg: register number to write * @value: pointer to receive value read * * Returns status code **/ static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value) { return hw->link.ops.read_link_unlocked(hw, hw->link.addr, reg, value); } /** * ixgbe_write_cs4227 - Write CS4227 register * @hw: pointer to hardware structure * @reg: register number to write * @value: value to write to register * * Returns status code **/ static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value) { return hw->link.ops.write_link_unlocked(hw, hw->link.addr, reg, value); } /** * ixgbe_read_pe - Read register from port expander * @hw: pointer to hardware structure * @reg: register number to read * @value: pointer to receive read value * * Returns status code **/ static s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value) { s32 status; status = ixgbe_read_i2c_byte_unlocked(hw, reg, IXGBE_PE, value); if (status != IXGBE_SUCCESS) ERROR_REPORT2(IXGBE_ERROR_CAUTION, "port expander access failed with %d\n", status); return status; } /** * ixgbe_write_pe - Write register to port expander * @hw: pointer to hardware structure * @reg: register number to write * @value: value to write * * Returns status code **/ static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value) { s32 status; status = ixgbe_write_i2c_byte_unlocked(hw, reg, IXGBE_PE, value); if (status != IXGBE_SUCCESS) ERROR_REPORT2(IXGBE_ERROR_CAUTION, "port expander access failed with %d\n", status); return status; } /** * ixgbe_reset_cs4227 - Reset CS4227 using port expander * @hw: pointer to hardware structure * * This function assumes that the caller has acquired the proper semaphore. * Returns error code **/ static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw) { s32 status; u32 retry; u16 value; u8 reg; /* Trigger hard reset. */ status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®); if (status != IXGBE_SUCCESS) return status; reg |= IXGBE_PE_BIT1; status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg); if (status != IXGBE_SUCCESS) return status; status = ixgbe_read_pe(hw, IXGBE_PE_CONFIG, ®); if (status != IXGBE_SUCCESS) return status; reg &= ~IXGBE_PE_BIT1; status = ixgbe_write_pe(hw, IXGBE_PE_CONFIG, reg); if (status != IXGBE_SUCCESS) return status; status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®); if (status != IXGBE_SUCCESS) return status; reg &= ~IXGBE_PE_BIT1; status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg); if (status != IXGBE_SUCCESS) return status; usec_delay(IXGBE_CS4227_RESET_HOLD); status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®); if (status != IXGBE_SUCCESS) return status; reg |= IXGBE_PE_BIT1; status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg); if (status != IXGBE_SUCCESS) return status; /* Wait for the reset to complete. */ msec_delay(IXGBE_CS4227_RESET_DELAY); for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) { status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EFUSE_STATUS, &value); if (status == IXGBE_SUCCESS && value == IXGBE_CS4227_EEPROM_LOAD_OK) break; msec_delay(IXGBE_CS4227_CHECK_DELAY); } if (retry == IXGBE_CS4227_RETRIES) { ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE, "CS4227 reset did not complete."); return IXGBE_ERR_PHY; } status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EEPROM_STATUS, &value); if (status != IXGBE_SUCCESS || !(value & IXGBE_CS4227_EEPROM_LOAD_OK)) { ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE, "CS4227 EEPROM did not load successfully."); return IXGBE_ERR_PHY; } return IXGBE_SUCCESS; } /** * ixgbe_check_cs4227 - Check CS4227 and reset as needed * @hw: pointer to hardware structure **/ static void ixgbe_check_cs4227(struct ixgbe_hw *hw) { s32 status = IXGBE_SUCCESS; u32 swfw_mask = hw->phy.phy_semaphore_mask; u16 value = 0; u8 retry; for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) { status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); if (status != IXGBE_SUCCESS) { ERROR_REPORT2(IXGBE_ERROR_CAUTION, "semaphore failed with %d", status); msec_delay(IXGBE_CS4227_CHECK_DELAY); continue; } /* Get status of reset flow. */ status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value); if (status == IXGBE_SUCCESS && value == IXGBE_CS4227_RESET_COMPLETE) goto out; if (status != IXGBE_SUCCESS || value != IXGBE_CS4227_RESET_PENDING) break; /* Reset is pending. Wait and check again. */ hw->mac.ops.release_swfw_sync(hw, swfw_mask); msec_delay(IXGBE_CS4227_CHECK_DELAY); } /* If still pending, assume other instance failed. */ if (retry == IXGBE_CS4227_RETRIES) { status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); if (status != IXGBE_SUCCESS) { ERROR_REPORT2(IXGBE_ERROR_CAUTION, "semaphore failed with %d", status); return; } } /* Reset the CS4227. */ status = ixgbe_reset_cs4227(hw); if (status != IXGBE_SUCCESS) { ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, "CS4227 reset failed: %d", status); goto out; } /* Reset takes so long, temporarily release semaphore in case the * other driver instance is waiting for the reset indication. */ ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH, IXGBE_CS4227_RESET_PENDING); hw->mac.ops.release_swfw_sync(hw, swfw_mask); msec_delay(10); status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); if (status != IXGBE_SUCCESS) { ERROR_REPORT2(IXGBE_ERROR_CAUTION, "semaphore failed with %d", status); return; } /* Record completion for next time. */ status = ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH, IXGBE_CS4227_RESET_COMPLETE); out: hw->mac.ops.release_swfw_sync(hw, swfw_mask); msec_delay(hw->eeprom.semaphore_delay); } /** * ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control * @hw: pointer to hardware structure **/ static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw) { u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); if (hw->bus.lan_id) { esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1); esdp |= IXGBE_ESDP_SDP1_DIR; } esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR); IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); IXGBE_WRITE_FLUSH(hw); } /** * ixgbe_identify_phy_x550em - Get PHY type based on device id * @hw: pointer to hardware structure * * Returns error code */ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) { hw->mac.ops.set_lan_id(hw); ixgbe_read_mng_if_sel_x550em(hw); switch (hw->device_id) { case IXGBE_DEV_ID_X550EM_A_SFP: return ixgbe_identify_module_generic(hw); case IXGBE_DEV_ID_X550EM_X_SFP: /* set up for CS4227 usage */ ixgbe_setup_mux_ctl(hw); ixgbe_check_cs4227(hw); /* Fallthrough */ case IXGBE_DEV_ID_X550EM_A_SFP_N: return ixgbe_identify_module_generic(hw); break; case IXGBE_DEV_ID_X550EM_X_KX4: hw->phy.type = ixgbe_phy_x550em_kx4; break; case IXGBE_DEV_ID_X550EM_X_XFI: hw->phy.type = ixgbe_phy_x550em_xfi; break; case IXGBE_DEV_ID_X550EM_X_KR: case IXGBE_DEV_ID_X550EM_A_KR: case IXGBE_DEV_ID_X550EM_A_KR_L: hw->phy.type = ixgbe_phy_x550em_kr; break; case IXGBE_DEV_ID_X550EM_A_10G_T: case IXGBE_DEV_ID_X550EM_X_10G_T: return ixgbe_identify_phy_generic(hw); case IXGBE_DEV_ID_X550EM_X_1G_T: hw->phy.type = ixgbe_phy_ext_1g_t; break; case IXGBE_DEV_ID_X550EM_A_1G_T: case IXGBE_DEV_ID_X550EM_A_1G_T_L: hw->phy.type = ixgbe_phy_fw; if (hw->bus.lan_id) hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM; else hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM; break; default: break; } return IXGBE_SUCCESS; } /** * ixgbe_fw_phy_activity - Perform an activity on a PHY * @hw: pointer to hardware structure * @activity: activity to perform * @data: Pointer to 4 32-bit words of data */ s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity, u32 (*data)[FW_PHY_ACT_DATA_COUNT]) { union { struct ixgbe_hic_phy_activity_req cmd; struct ixgbe_hic_phy_activity_resp rsp; } hic; u16 retries = FW_PHY_ACT_RETRIES; s32 rc; u16 i; do { memset(&hic, 0, sizeof(hic)); hic.cmd.hdr.cmd = FW_PHY_ACT_REQ_CMD; hic.cmd.hdr.buf_len = FW_PHY_ACT_REQ_LEN; hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; hic.cmd.port_number = hw->bus.lan_id; hic.cmd.activity_id = IXGBE_CPU_TO_LE16(activity); for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i) hic.cmd.data[i] = IXGBE_CPU_TO_BE32((*data)[i]); rc = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd, sizeof(hic.cmd), IXGBE_HI_COMMAND_TIMEOUT, - TRUE); + true); if (rc != IXGBE_SUCCESS) return rc; if (hic.rsp.hdr.cmd_or_resp.ret_status == FW_CEM_RESP_STATUS_SUCCESS) { for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i) (*data)[i] = IXGBE_BE32_TO_CPU(hic.rsp.data[i]); return IXGBE_SUCCESS; } usec_delay(20); --retries; } while (retries > 0); return IXGBE_ERR_HOST_INTERFACE_COMMAND; } static const struct { u16 fw_speed; ixgbe_link_speed phy_speed; } ixgbe_fw_map[] = { { FW_PHY_ACT_LINK_SPEED_10, IXGBE_LINK_SPEED_10_FULL }, { FW_PHY_ACT_LINK_SPEED_100, IXGBE_LINK_SPEED_100_FULL }, { FW_PHY_ACT_LINK_SPEED_1G, IXGBE_LINK_SPEED_1GB_FULL }, { FW_PHY_ACT_LINK_SPEED_2_5G, IXGBE_LINK_SPEED_2_5GB_FULL }, { FW_PHY_ACT_LINK_SPEED_5G, IXGBE_LINK_SPEED_5GB_FULL }, { FW_PHY_ACT_LINK_SPEED_10G, IXGBE_LINK_SPEED_10GB_FULL }, }; /** * ixgbe_get_phy_id_fw - Get the phy ID via firmware command * @hw: pointer to hardware structure * * Returns error code */ static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw) { u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; u16 phy_speeds; u16 phy_id_lo; s32 rc; u16 i; rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_PHY_INFO, &info); if (rc) return rc; hw->phy.speeds_supported = 0; phy_speeds = info[0] & FW_PHY_INFO_SPEED_MASK; for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) { if (phy_speeds & ixgbe_fw_map[i].fw_speed) hw->phy.speeds_supported |= ixgbe_fw_map[i].phy_speed; } if (!hw->phy.autoneg_advertised) hw->phy.autoneg_advertised = hw->phy.speeds_supported; hw->phy.id = info[0] & FW_PHY_INFO_ID_HI_MASK; phy_id_lo = info[1] & FW_PHY_INFO_ID_LO_MASK; hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK; hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK; if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK) return IXGBE_ERR_PHY_ADDR_INVALID; return IXGBE_SUCCESS; } /** * ixgbe_identify_phy_fw - Get PHY type based on firmware command * @hw: pointer to hardware structure * * Returns error code */ static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw) { if (hw->bus.lan_id) hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; else hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; hw->phy.type = ixgbe_phy_fw; hw->phy.ops.read_reg = NULL; hw->phy.ops.write_reg = NULL; return ixgbe_get_phy_id_fw(hw); } /** * ixgbe_shutdown_fw_phy - Shutdown a firmware-controlled PHY * @hw: pointer to hardware structure * * Returns error code */ s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw) { u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 }; setup[0] = FW_PHY_ACT_FORCE_LINK_DOWN_OFF; return ixgbe_fw_phy_activity(hw, FW_PHY_ACT_FORCE_LINK_DOWN, &setup); } static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 *phy_data) { UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, *phy_data); return IXGBE_NOT_IMPLEMENTED; } static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 phy_data) { UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, phy_data); return IXGBE_NOT_IMPLEMENTED; } /** * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation * @hw: pointer to the hardware structure * @addr: I2C bus address to read from * @reg: I2C device register to read from * @val: pointer to location to receive read value * * Returns an error code on error. **/ static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val) { - return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, TRUE); + return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true); } /** * ixgbe_read_i2c_combined_generic_unlocked - Do I2C read combined operation * @hw: pointer to the hardware structure * @addr: I2C bus address to read from * @reg: I2C device register to read from * @val: pointer to location to receive read value * * Returns an error code on error. **/ static s32 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val) { - return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, FALSE); + return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, false); } /** * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation * @hw: pointer to the hardware structure * @addr: I2C bus address to write to * @reg: I2C device register to write to * @val: value to write * * Returns an error code on error. **/ static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val) { - return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, TRUE); + return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true); } /** * ixgbe_write_i2c_combined_generic_unlocked - Do I2C write combined operation * @hw: pointer to the hardware structure * @addr: I2C bus address to write to * @reg: I2C device register to write to * @val: value to write * * Returns an error code on error. **/ static s32 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val) { - return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, FALSE); + return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, false); } /** * ixgbe_init_ops_X550EM - Inits func ptrs and MAC type * @hw: pointer to hardware structure * * Initialize the function pointers and for MAC type X550EM. * Does not touch the hardware. **/ s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; struct ixgbe_eeprom_info *eeprom = &hw->eeprom; struct ixgbe_phy_info *phy = &hw->phy; s32 ret_val; DEBUGFUNC("ixgbe_init_ops_X550EM"); /* Similar to X550 so start there. */ ret_val = ixgbe_init_ops_X550(hw); /* Since this function eventually calls * ixgbe_init_ops_540 by design, we are setting * the pointers to NULL explicitly here to overwrite * the values being set in the x540 function. */ /* Bypass not supported in x550EM */ mac->ops.bypass_rw = NULL; mac->ops.bypass_valid_rd = NULL; mac->ops.bypass_set = NULL; mac->ops.bypass_rd_eep = NULL; /* FCOE not supported in x550EM */ mac->ops.get_san_mac_addr = NULL; mac->ops.set_san_mac_addr = NULL; mac->ops.get_wwn_prefix = NULL; mac->ops.get_fcoe_boot_status = NULL; /* IPsec not supported in x550EM */ mac->ops.disable_sec_rx_path = NULL; mac->ops.enable_sec_rx_path = NULL; /* AUTOC register is not present in x550EM. */ mac->ops.prot_autoc_read = NULL; mac->ops.prot_autoc_write = NULL; /* X550EM bus type is internal*/ hw->bus.type = ixgbe_bus_type_internal; mac->ops.get_bus_info = ixgbe_get_bus_info_X550em; mac->ops.get_media_type = ixgbe_get_media_type_X550em; mac->ops.setup_sfp = ixgbe_setup_sfp_modules_X550em; mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_X550em; mac->ops.reset_hw = ixgbe_reset_hw_X550em; mac->ops.get_supported_physical_layer = ixgbe_get_supported_physical_layer_X550em; if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) mac->ops.setup_fc = ixgbe_setup_fc_generic; else mac->ops.setup_fc = ixgbe_setup_fc_X550em; /* PHY */ phy->ops.init = ixgbe_init_phy_ops_X550em; switch (hw->device_id) { case IXGBE_DEV_ID_X550EM_A_1G_T: case IXGBE_DEV_ID_X550EM_A_1G_T_L: mac->ops.setup_fc = NULL; phy->ops.identify = ixgbe_identify_phy_fw; phy->ops.set_phy_power = NULL; phy->ops.get_firmware_version = NULL; break; case IXGBE_DEV_ID_X550EM_X_1G_T: mac->ops.setup_fc = NULL; phy->ops.identify = ixgbe_identify_phy_x550em; phy->ops.set_phy_power = NULL; break; default: phy->ops.identify = ixgbe_identify_phy_x550em; } if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper) phy->ops.set_phy_power = NULL; /* EEPROM */ eeprom->ops.init_params = ixgbe_init_eeprom_params_X540; eeprom->ops.read = ixgbe_read_ee_hostif_X550; eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550; eeprom->ops.write = ixgbe_write_ee_hostif_X550; eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550; eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550; eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550; eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550; return ret_val; } /** * ixgbe_setup_fw_link - Setup firmware-controlled PHYs * @hw: pointer to hardware structure */ static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw) { u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 }; s32 rc; u16 i; if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw)) return 0; if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); return IXGBE_ERR_INVALID_LINK_SETTINGS; } switch (hw->fc.requested_mode) { case ixgbe_fc_full: setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX << FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT; break; case ixgbe_fc_rx_pause: setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RX << FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT; break; case ixgbe_fc_tx_pause: setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_TX << FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT; break; default: break; } for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) { if (hw->phy.autoneg_advertised & ixgbe_fw_map[i].phy_speed) setup[0] |= (u32)(ixgbe_fw_map[i].fw_speed); } setup[0] |= FW_PHY_ACT_SETUP_LINK_HP | FW_PHY_ACT_SETUP_LINK_AN; if (hw->phy.eee_speeds_advertised) setup[0] |= FW_PHY_ACT_SETUP_LINK_EEE; rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup); if (rc) return rc; if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN) return IXGBE_ERR_OVERTEMP; return IXGBE_SUCCESS; } /** * ixgbe_fc_autoneg_fw _ Set up flow control for FW-controlled PHYs * @hw: pointer to hardware structure * * Called at init time to set up flow control. */ static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw) { if (hw->fc.requested_mode == ixgbe_fc_default) hw->fc.requested_mode = ixgbe_fc_full; return ixgbe_setup_fw_link(hw); } /** * ixgbe_setup_eee_fw - Enable/disable EEE support * @hw: pointer to the HW structure * @enable_eee: boolean flag to enable EEE * * Enable/disable EEE based on enable_eee flag. * This function controls EEE for firmware-based PHY implementations. */ static s32 ixgbe_setup_eee_fw(struct ixgbe_hw *hw, bool enable_eee) { if (!!hw->phy.eee_speeds_advertised == enable_eee) return IXGBE_SUCCESS; if (enable_eee) hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported; else hw->phy.eee_speeds_advertised = 0; return hw->phy.ops.setup_link(hw); } /** * ixgbe_init_ops_X550EM_a - Inits func ptrs and MAC type * @hw: pointer to hardware structure * * Initialize the function pointers and for MAC type X550EM_a. * Does not touch the hardware. **/ s32 ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; s32 ret_val; DEBUGFUNC("ixgbe_init_ops_X550EM_a"); /* Start with generic X550EM init */ ret_val = ixgbe_init_ops_X550EM(hw); if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII || hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) { mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550; mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550; } else { mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a; mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a; } mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550a; mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550a; switch (mac->ops.get_media_type(hw)) { case ixgbe_media_type_fiber: mac->ops.setup_fc = NULL; mac->ops.fc_autoneg = ixgbe_fc_autoneg_fiber_x550em_a; break; case ixgbe_media_type_backplane: mac->ops.fc_autoneg = ixgbe_fc_autoneg_backplane_x550em_a; mac->ops.setup_fc = ixgbe_setup_fc_backplane_x550em_a; break; default: break; } switch (hw->device_id) { case IXGBE_DEV_ID_X550EM_A_1G_T: case IXGBE_DEV_ID_X550EM_A_1G_T_L: mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a; mac->ops.setup_fc = ixgbe_fc_autoneg_fw; mac->ops.setup_eee = ixgbe_setup_eee_fw; hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL | IXGBE_LINK_SPEED_1GB_FULL; hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported; break; default: break; } return ret_val; } /** * ixgbe_init_ops_X550EM_x - Inits func ptrs and MAC type * @hw: pointer to hardware structure * * Initialize the function pointers and for MAC type X550EM_x. * Does not touch the hardware. **/ s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; struct ixgbe_link_info *link = &hw->link; s32 ret_val; DEBUGFUNC("ixgbe_init_ops_X550EM_x"); /* Start with generic X550EM init */ ret_val = ixgbe_init_ops_X550EM(hw); mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550; mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550; mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550em; mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550em; link->ops.read_link = ixgbe_read_i2c_combined_generic; link->ops.read_link_unlocked = ixgbe_read_i2c_combined_generic_unlocked; link->ops.write_link = ixgbe_write_i2c_combined_generic; link->ops.write_link_unlocked = ixgbe_write_i2c_combined_generic_unlocked; link->addr = IXGBE_CS4227; if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) { mac->ops.setup_fc = NULL; mac->ops.setup_eee = NULL; mac->ops.init_led_link_act = NULL; } return ret_val; } /** * ixgbe_dmac_config_X550 * @hw: pointer to hardware structure * * Configure DMA coalescing. If enabling dmac, dmac is activated. * When disabling dmac, dmac enable dmac bit is cleared. **/ s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw) { u32 reg, high_pri_tc; DEBUGFUNC("ixgbe_dmac_config_X550"); /* Disable DMA coalescing before configuring */ reg = IXGBE_READ_REG(hw, IXGBE_DMACR); reg &= ~IXGBE_DMACR_DMAC_EN; IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg); /* Disable DMA Coalescing if the watchdog timer is 0 */ if (!hw->mac.dmac_config.watchdog_timer) goto out; ixgbe_dmac_config_tcs_X550(hw); /* Configure DMA Coalescing Control Register */ reg = IXGBE_READ_REG(hw, IXGBE_DMACR); /* Set the watchdog timer in units of 40.96 usec */ reg &= ~IXGBE_DMACR_DMACWT_MASK; reg |= (hw->mac.dmac_config.watchdog_timer * 100) / 4096; reg &= ~IXGBE_DMACR_HIGH_PRI_TC_MASK; /* If fcoe is enabled, set high priority traffic class */ if (hw->mac.dmac_config.fcoe_en) { high_pri_tc = 1 << hw->mac.dmac_config.fcoe_tc; reg |= ((high_pri_tc << IXGBE_DMACR_HIGH_PRI_TC_SHIFT) & IXGBE_DMACR_HIGH_PRI_TC_MASK); } reg |= IXGBE_DMACR_EN_MNG_IND; /* Enable DMA coalescing after configuration */ reg |= IXGBE_DMACR_DMAC_EN; IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg); out: return IXGBE_SUCCESS; } /** * ixgbe_dmac_config_tcs_X550 * @hw: pointer to hardware structure * * Configure DMA coalescing threshold per TC. The dmac enable bit must * be cleared before configuring. **/ s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw) { u32 tc, reg, pb_headroom, rx_pb_size, maxframe_size_kb; DEBUGFUNC("ixgbe_dmac_config_tcs_X550"); /* Configure DMA coalescing enabled */ switch (hw->mac.dmac_config.link_speed) { case IXGBE_LINK_SPEED_10_FULL: case IXGBE_LINK_SPEED_100_FULL: pb_headroom = IXGBE_DMACRXT_100M; break; case IXGBE_LINK_SPEED_1GB_FULL: pb_headroom = IXGBE_DMACRXT_1G; break; default: pb_headroom = IXGBE_DMACRXT_10G; break; } maxframe_size_kb = ((IXGBE_READ_REG(hw, IXGBE_MAXFRS) >> IXGBE_MHADD_MFS_SHIFT) / 1024); /* Set the per Rx packet buffer receive threshold */ for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) { reg = IXGBE_READ_REG(hw, IXGBE_DMCTH(tc)); reg &= ~IXGBE_DMCTH_DMACRXT_MASK; if (tc < hw->mac.dmac_config.num_tcs) { /* Get Rx PB size */ rx_pb_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc)); rx_pb_size = (rx_pb_size & IXGBE_RXPBSIZE_MASK) >> IXGBE_RXPBSIZE_SHIFT; /* Calculate receive buffer threshold in kilobytes */ if (rx_pb_size > pb_headroom) rx_pb_size = rx_pb_size - pb_headroom; else rx_pb_size = 0; /* Minimum of MFS shall be set for DMCTH */ reg |= (rx_pb_size > maxframe_size_kb) ? rx_pb_size : maxframe_size_kb; } IXGBE_WRITE_REG(hw, IXGBE_DMCTH(tc), reg); } return IXGBE_SUCCESS; } /** * ixgbe_dmac_update_tcs_X550 * @hw: pointer to hardware structure * * Disables dmac, updates per TC settings, and then enables dmac. **/ s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw) { u32 reg; DEBUGFUNC("ixgbe_dmac_update_tcs_X550"); /* Disable DMA coalescing before configuring */ reg = IXGBE_READ_REG(hw, IXGBE_DMACR); reg &= ~IXGBE_DMACR_DMAC_EN; IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg); ixgbe_dmac_config_tcs_X550(hw); /* Enable DMA coalescing after configuration */ reg = IXGBE_READ_REG(hw, IXGBE_DMACR); reg |= IXGBE_DMACR_DMAC_EN; IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg); return IXGBE_SUCCESS; } /** * ixgbe_init_eeprom_params_X550 - Initialize EEPROM params * @hw: pointer to hardware structure * * Initializes the EEPROM parameters ixgbe_eeprom_info within the * ixgbe_hw struct in order to set up EEPROM access. **/ s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw) { struct ixgbe_eeprom_info *eeprom = &hw->eeprom; u32 eec; u16 eeprom_size; DEBUGFUNC("ixgbe_init_eeprom_params_X550"); if (eeprom->type == ixgbe_eeprom_uninitialized) { eeprom->semaphore_delay = 10; eeprom->type = ixgbe_flash; eec = IXGBE_READ_REG(hw, IXGBE_EEC); eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> IXGBE_EEC_SIZE_SHIFT); eeprom->word_size = 1 << (eeprom_size + IXGBE_EEPROM_WORD_SIZE_SHIFT); DEBUGOUT2("Eeprom params: type = %d, size = %d\n", eeprom->type, eeprom->word_size); } return IXGBE_SUCCESS; } /** * ixgbe_set_source_address_pruning_X550 - Enable/Disbale source address pruning * @hw: pointer to hardware structure * @enable: enable or disable source address pruning * @pool: Rx pool to set source address pruning for **/ void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable, unsigned int pool) { u64 pfflp; /* max rx pool is 63 */ if (pool > 63) return; pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL); pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32; if (enable) pfflp |= (1ULL << pool); else pfflp &= ~(1ULL << pool); IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp); IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32)); } /** * ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype anti-spoofing * @hw: pointer to hardware structure * @enable: enable or disable switch for Ethertype anti-spoofing * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing * **/ void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, bool enable, int vf) { int vf_target_reg = vf >> 3; int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT; u32 pfvfspoof; DEBUGFUNC("ixgbe_set_ethertype_anti_spoofing_X550"); pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); if (enable) pfvfspoof |= (1 << vf_target_shift); else pfvfspoof &= ~(1 << vf_target_shift); IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); } /** * ixgbe_iosf_wait - Wait for IOSF command completion * @hw: pointer to hardware structure * @ctrl: pointer to location to receive final IOSF control value * * Returns failing status on timeout * * Note: ctrl can be NULL if the IOSF control register value is not needed **/ static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl) { u32 i, command = 0; /* Check every 10 usec to see if the address cycle completed. * The SB IOSF BUSY bit will clear when the operation is * complete */ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL); if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0) break; usec_delay(10); } if (ctrl) *ctrl = command; if (i == IXGBE_MDIO_COMMAND_TIMEOUT) { ERROR_REPORT1(IXGBE_ERROR_POLLING, "Wait timed out\n"); return IXGBE_ERR_PHY; } return IXGBE_SUCCESS; } /** * ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register * of the IOSF device * @hw: pointer to hardware structure * @reg_addr: 32 bit PHY register to write * @device_type: 3 bit device type * @data: Data to write to the register **/ s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u32 data) { u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM; u32 command, error __unused; s32 ret; ret = ixgbe_acquire_swfw_semaphore(hw, gssr); if (ret != IXGBE_SUCCESS) return ret; ret = ixgbe_iosf_wait(hw, NULL); if (ret != IXGBE_SUCCESS) goto out; command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) | (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT)); /* Write IOSF control register */ IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command); /* Write IOSF data register */ IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data); ret = ixgbe_iosf_wait(hw, &command); if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) { error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >> IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT; ERROR_REPORT2(IXGBE_ERROR_POLLING, "Failed to write, error %x\n", error); ret = IXGBE_ERR_PHY; } out: ixgbe_release_swfw_semaphore(hw, gssr); return ret; } /** * ixgbe_read_iosf_sb_reg_x550 - Reads specified register of the IOSF device * @hw: pointer to hardware structure * @reg_addr: 32 bit PHY register to write * @device_type: 3 bit device type * @data: Pointer to read data from the register **/ s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u32 *data) { u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM; u32 command, error __unused; s32 ret; ret = ixgbe_acquire_swfw_semaphore(hw, gssr); if (ret != IXGBE_SUCCESS) return ret; ret = ixgbe_iosf_wait(hw, NULL); if (ret != IXGBE_SUCCESS) goto out; command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) | (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT)); /* Write IOSF control register */ IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command); ret = ixgbe_iosf_wait(hw, &command); if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) { error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >> IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT; ERROR_REPORT2(IXGBE_ERROR_POLLING, "Failed to read, error %x\n", error); ret = IXGBE_ERR_PHY; } if (ret == IXGBE_SUCCESS) *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA); out: ixgbe_release_swfw_semaphore(hw, gssr); return ret; } /** * ixgbe_get_phy_token - Get the token for shared phy access * @hw: Pointer to hardware structure */ s32 ixgbe_get_phy_token(struct ixgbe_hw *hw) { struct ixgbe_hic_phy_token_req token_cmd; s32 status; token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD; token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN; token_cmd.hdr.cmd_or_resp.cmd_resv = 0; token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; token_cmd.port_number = hw->bus.lan_id; token_cmd.command_type = FW_PHY_TOKEN_REQ; token_cmd.pad = 0; status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd, sizeof(token_cmd), IXGBE_HI_COMMAND_TIMEOUT, - TRUE); + true); if (status) { DEBUGOUT1("Issuing host interface command failed with Status = %d\n", status); return status; } if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK) return IXGBE_SUCCESS; if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY) { DEBUGOUT1("Host interface command returned 0x%08x , returning IXGBE_ERR_FW_RESP_INVALID\n", token_cmd.hdr.cmd_or_resp.ret_status); return IXGBE_ERR_FW_RESP_INVALID; } DEBUGOUT("Returning IXGBE_ERR_TOKEN_RETRY\n"); return IXGBE_ERR_TOKEN_RETRY; } /** * ixgbe_put_phy_token - Put the token for shared phy access * @hw: Pointer to hardware structure */ s32 ixgbe_put_phy_token(struct ixgbe_hw *hw) { struct ixgbe_hic_phy_token_req token_cmd; s32 status; token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD; token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN; token_cmd.hdr.cmd_or_resp.cmd_resv = 0; token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; token_cmd.port_number = hw->bus.lan_id; token_cmd.command_type = FW_PHY_TOKEN_REL; token_cmd.pad = 0; status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd, sizeof(token_cmd), IXGBE_HI_COMMAND_TIMEOUT, - TRUE); + true); if (status) return status; if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK) return IXGBE_SUCCESS; DEBUGOUT("Put PHY Token host interface command failed"); return IXGBE_ERR_FW_RESP_INVALID; } /** * ixgbe_write_iosf_sb_reg_x550a - Writes a value to specified register * of the IOSF device * @hw: pointer to hardware structure * @reg_addr: 32 bit PHY register to write * @device_type: 3 bit device type * @data: Data to write to the register **/ s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u32 data) { struct ixgbe_hic_internal_phy_req write_cmd; s32 status; UNREFERENCED_1PARAMETER(device_type); memset(&write_cmd, 0, sizeof(write_cmd)); write_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD; write_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN; write_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; write_cmd.port_number = hw->bus.lan_id; write_cmd.command_type = FW_INT_PHY_REQ_WRITE; write_cmd.address = IXGBE_CPU_TO_BE16(reg_addr); write_cmd.write_data = IXGBE_CPU_TO_BE32(data); status = ixgbe_host_interface_command(hw, (u32 *)&write_cmd, sizeof(write_cmd), - IXGBE_HI_COMMAND_TIMEOUT, FALSE); + IXGBE_HI_COMMAND_TIMEOUT, false); return status; } /** * ixgbe_read_iosf_sb_reg_x550a - Reads specified register of the IOSF device * @hw: pointer to hardware structure * @reg_addr: 32 bit PHY register to write * @device_type: 3 bit device type * @data: Pointer to read data from the register **/ s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u32 *data) { union { struct ixgbe_hic_internal_phy_req cmd; struct ixgbe_hic_internal_phy_resp rsp; } hic; s32 status; UNREFERENCED_1PARAMETER(device_type); memset(&hic, 0, sizeof(hic)); hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD; hic.cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN; hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; hic.cmd.port_number = hw->bus.lan_id; hic.cmd.command_type = FW_INT_PHY_REQ_READ; hic.cmd.address = IXGBE_CPU_TO_BE16(reg_addr); status = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd, sizeof(hic.cmd), - IXGBE_HI_COMMAND_TIMEOUT, TRUE); + IXGBE_HI_COMMAND_TIMEOUT, true); /* Extract the register value from the response. */ *data = IXGBE_BE32_TO_CPU(hic.rsp.read_data); return status; } /** * ixgbe_disable_mdd_X550 * @hw: pointer to hardware structure * * Disable malicious driver detection **/ void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw) { u32 reg; DEBUGFUNC("ixgbe_disable_mdd_X550"); /* Disable MDD for TX DMA and interrupt */ reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); reg &= ~(IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN); IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); /* Disable MDD for RX and interrupt */ reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); reg &= ~(IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN); IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg); } /** * ixgbe_enable_mdd_X550 * @hw: pointer to hardware structure * * Enable malicious driver detection **/ void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw) { u32 reg; DEBUGFUNC("ixgbe_enable_mdd_X550"); /* Enable MDD for TX DMA and interrupt */ reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); reg |= (IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN); IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); /* Enable MDD for RX and interrupt */ reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); reg |= (IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN); IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg); } /** * ixgbe_restore_mdd_vf_X550 * @hw: pointer to hardware structure * @vf: vf index * * Restore VF that was disabled during malicious driver detection event **/ void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf) { u32 idx, reg, num_qs, start_q, bitmask; DEBUGFUNC("ixgbe_restore_mdd_vf_X550"); /* Map VF to queues */ reg = IXGBE_READ_REG(hw, IXGBE_MRQC); switch (reg & IXGBE_MRQC_MRQE_MASK) { case IXGBE_MRQC_VMDQRT8TCEN: num_qs = 8; /* 16 VFs / pools */ bitmask = 0x000000FF; break; case IXGBE_MRQC_VMDQRSS32EN: case IXGBE_MRQC_VMDQRT4TCEN: num_qs = 4; /* 32 VFs / pools */ bitmask = 0x0000000F; break; default: /* 64 VFs / pools */ num_qs = 2; bitmask = 0x00000003; break; } start_q = vf * num_qs; /* Release vf's queues by clearing WQBR_TX and WQBR_RX (RW1C) */ idx = start_q / 32; reg = 0; reg |= (bitmask << (start_q % 32)); IXGBE_WRITE_REG(hw, IXGBE_WQBR_TX(idx), reg); IXGBE_WRITE_REG(hw, IXGBE_WQBR_RX(idx), reg); } /** * ixgbe_mdd_event_X550 * @hw: pointer to hardware structure * @vf_bitmap: vf bitmap of malicious vfs * * Handle malicious driver detection event. **/ void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap) { u32 wqbr; u32 i, j, reg, q, shift, vf, idx; DEBUGFUNC("ixgbe_mdd_event_X550"); /* figure out pool size for mapping to vf's */ reg = IXGBE_READ_REG(hw, IXGBE_MRQC); switch (reg & IXGBE_MRQC_MRQE_MASK) { case IXGBE_MRQC_VMDQRT8TCEN: shift = 3; /* 16 VFs / pools */ break; case IXGBE_MRQC_VMDQRSS32EN: case IXGBE_MRQC_VMDQRT4TCEN: shift = 2; /* 32 VFs / pools */ break; default: shift = 1; /* 64 VFs / pools */ break; } /* Read WQBR_TX and WQBR_RX and check for malicious queues */ for (i = 0; i < 4; i++) { wqbr = IXGBE_READ_REG(hw, IXGBE_WQBR_TX(i)); wqbr |= IXGBE_READ_REG(hw, IXGBE_WQBR_RX(i)); if (!wqbr) continue; /* Get malicious queue */ for (j = 0; j < 32 && wqbr; j++) { if (!(wqbr & (1 << j))) continue; /* Get queue from bitmask */ q = j + (i * 32); /* Map queue to vf */ vf = (q >> shift); /* Set vf bit in vf_bitmap */ idx = vf / 32; vf_bitmap[idx] |= (1 << (vf % 32)); wqbr &= ~(1 << j); } } } /** * ixgbe_get_media_type_X550em - Get media type * @hw: pointer to hardware structure * * Returns the media type (fiber, copper, backplane) */ enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) { enum ixgbe_media_type media_type; DEBUGFUNC("ixgbe_get_media_type_X550em"); /* Detect if there is a copper PHY attached. */ switch (hw->device_id) { case IXGBE_DEV_ID_X550EM_X_KR: case IXGBE_DEV_ID_X550EM_X_KX4: case IXGBE_DEV_ID_X550EM_X_XFI: case IXGBE_DEV_ID_X550EM_A_KR: case IXGBE_DEV_ID_X550EM_A_KR_L: media_type = ixgbe_media_type_backplane; break; case IXGBE_DEV_ID_X550EM_X_SFP: case IXGBE_DEV_ID_X550EM_A_SFP: case IXGBE_DEV_ID_X550EM_A_SFP_N: case IXGBE_DEV_ID_X550EM_A_QSFP: case IXGBE_DEV_ID_X550EM_A_QSFP_N: media_type = ixgbe_media_type_fiber; break; case IXGBE_DEV_ID_X550EM_X_1G_T: case IXGBE_DEV_ID_X550EM_X_10G_T: case IXGBE_DEV_ID_X550EM_A_10G_T: media_type = ixgbe_media_type_copper; break; case IXGBE_DEV_ID_X550EM_A_SGMII: case IXGBE_DEV_ID_X550EM_A_SGMII_L: media_type = ixgbe_media_type_backplane; hw->phy.type = ixgbe_phy_sgmii; break; case IXGBE_DEV_ID_X550EM_A_1G_T: case IXGBE_DEV_ID_X550EM_A_1G_T_L: media_type = ixgbe_media_type_copper; break; default: media_type = ixgbe_media_type_unknown; break; } return media_type; } /** * ixgbe_supported_sfp_modules_X550em - Check if SFP module type is supported * @hw: pointer to hardware structure - * @linear: TRUE if SFP module is linear + * @linear: true if SFP module is linear */ static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear) { DEBUGFUNC("ixgbe_supported_sfp_modules_X550em"); switch (hw->phy.sfp_type) { case ixgbe_sfp_type_not_present: return IXGBE_ERR_SFP_NOT_PRESENT; case ixgbe_sfp_type_da_cu_core0: case ixgbe_sfp_type_da_cu_core1: - *linear = TRUE; + *linear = true; break; case ixgbe_sfp_type_srlr_core0: case ixgbe_sfp_type_srlr_core1: case ixgbe_sfp_type_da_act_lmt_core0: case ixgbe_sfp_type_da_act_lmt_core1: case ixgbe_sfp_type_1g_sx_core0: case ixgbe_sfp_type_1g_sx_core1: case ixgbe_sfp_type_1g_lx_core0: case ixgbe_sfp_type_1g_lx_core1: - *linear = FALSE; + *linear = false; break; case ixgbe_sfp_type_unknown: case ixgbe_sfp_type_1g_cu_core0: case ixgbe_sfp_type_1g_cu_core1: default: return IXGBE_ERR_SFP_NOT_SUPPORTED; } return IXGBE_SUCCESS; } /** * ixgbe_identify_sfp_module_X550em - Identifies SFP modules * @hw: pointer to hardware structure * * Searches for and identifies the SFP module and assigns appropriate PHY type. **/ s32 ixgbe_identify_sfp_module_X550em(struct ixgbe_hw *hw) { s32 status; bool linear; DEBUGFUNC("ixgbe_identify_sfp_module_X550em"); status = ixgbe_identify_module_generic(hw); if (status != IXGBE_SUCCESS) return status; /* Check if SFP module is supported */ status = ixgbe_supported_sfp_modules_X550em(hw, &linear); return status; } /** * ixgbe_setup_sfp_modules_X550em - Setup MAC link ops * @hw: pointer to hardware structure */ s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw) { s32 status; bool linear; DEBUGFUNC("ixgbe_setup_sfp_modules_X550em"); /* Check if SFP module is supported */ status = ixgbe_supported_sfp_modules_X550em(hw, &linear); if (status != IXGBE_SUCCESS) return status; ixgbe_init_mac_link_ops_X550em(hw); hw->phy.ops.reset = NULL; return IXGBE_SUCCESS; } /** * ixgbe_restart_an_internal_phy_x550em - restart autonegotiation for the * internal PHY * @hw: pointer to hardware structure **/ static s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw) { s32 status; u32 link_ctrl; /* Restart auto-negotiation. */ status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &link_ctrl); if (status) { DEBUGOUT("Auto-negotiation did not complete\n"); return status; } link_ctrl |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; status = hw->mac.ops.write_iosf_sb_reg(hw, IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, link_ctrl); if (hw->mac.type == ixgbe_mac_X550EM_a) { u32 flx_mask_st20; /* Indicate to FW that AN restart has been asserted */ status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_mask_st20); if (status) { DEBUGOUT("Auto-negotiation did not complete\n"); return status; } flx_mask_st20 |= IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART; status = hw->mac.ops.write_iosf_sb_reg(hw, IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, flx_mask_st20); } return status; } /** * ixgbe_setup_sgmii - Set up link for sgmii * @hw: pointer to hardware structure * @speed: new link speed - * @autoneg_wait: TRUE when waiting for completion is needed + * @autoneg_wait: true when waiting for completion is needed */ static s32 ixgbe_setup_sgmii(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait) { struct ixgbe_mac_info *mac = &hw->mac; u32 lval, sval, flx_val; s32 rc; rc = mac->ops.read_iosf_sb_reg(hw, IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &lval); if (rc) return rc; lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN; lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN; lval |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; rc = mac->ops.write_iosf_sb_reg(hw, IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, lval); if (rc) return rc; rc = mac->ops.read_iosf_sb_reg(hw, IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &sval); if (rc) return rc; sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D; sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D; rc = mac->ops.write_iosf_sb_reg(hw, IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, sval); if (rc) return rc; rc = mac->ops.read_iosf_sb_reg(hw, IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val); if (rc) return rc; flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G; flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; rc = mac->ops.write_iosf_sb_reg(hw, IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val); if (rc) return rc; rc = ixgbe_restart_an_internal_phy_x550em(hw); if (rc) return rc; return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait); } /** * ixgbe_setup_sgmii_fw - Set up link for internal PHY SGMII auto-negotiation * @hw: pointer to hardware structure * @speed: new link speed - * @autoneg_wait: TRUE when waiting for completion is needed + * @autoneg_wait: true when waiting for completion is needed */ static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait) { struct ixgbe_mac_info *mac = &hw->mac; u32 lval, sval, flx_val; s32 rc; rc = mac->ops.read_iosf_sb_reg(hw, IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &lval); if (rc) return rc; lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN; lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN; lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; rc = mac->ops.write_iosf_sb_reg(hw, IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, lval); if (rc) return rc; rc = mac->ops.read_iosf_sb_reg(hw, IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &sval); if (rc) return rc; sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D; sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D; rc = mac->ops.write_iosf_sb_reg(hw, IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, sval); if (rc) return rc; rc = mac->ops.write_iosf_sb_reg(hw, IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, lval); if (rc) return rc; rc = mac->ops.read_iosf_sb_reg(hw, IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val); if (rc) return rc; flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN; flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; rc = mac->ops.write_iosf_sb_reg(hw, IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val); if (rc) return rc; rc = ixgbe_restart_an_internal_phy_x550em(hw); return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait); } /** * ixgbe_init_mac_link_ops_X550em - init mac link function pointers * @hw: pointer to hardware structure */ void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; DEBUGFUNC("ixgbe_init_mac_link_ops_X550em"); switch (hw->mac.ops.get_media_type(hw)) { case ixgbe_media_type_fiber: /* CS4227 does not support autoneg, so disable the laser control * functions for SFP+ fiber */ mac->ops.disable_tx_laser = NULL; mac->ops.enable_tx_laser = NULL; mac->ops.flap_tx_laser = NULL; mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber; mac->ops.set_rate_select_speed = ixgbe_set_soft_rate_select_speed; if ((hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) || (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP)) mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_x550a; else mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_x550em; break; case ixgbe_media_type_copper: if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) break; if (hw->mac.type == ixgbe_mac_X550EM_a) { if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) { mac->ops.setup_link = ixgbe_setup_sgmii_fw; mac->ops.check_link = ixgbe_check_mac_link_generic; } else { mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; } } else { mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; mac->ops.check_link = ixgbe_check_link_t_X550em; } break; case ixgbe_media_type_backplane: if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII || hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) mac->ops.setup_link = ixgbe_setup_sgmii; break; default: break; } } /** * ixgbe_get_link_capabilities_x550em - Determines link capabilities * @hw: pointer to hardware structure * @speed: pointer to link speed - * @autoneg: TRUE when autoneg or autotry is enabled + * @autoneg: true when autoneg or autotry is enabled */ s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *autoneg) { DEBUGFUNC("ixgbe_get_link_capabilities_X550em"); if (hw->phy.type == ixgbe_phy_fw) { - *autoneg = TRUE; + *autoneg = true; *speed = hw->phy.speeds_supported; return 0; } /* SFP */ if (hw->phy.media_type == ixgbe_media_type_fiber) { /* CS4227 SFP must not enable auto-negotiation */ - *autoneg = FALSE; + *autoneg = false; /* Check if 1G SFP module. */ if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) { *speed = IXGBE_LINK_SPEED_1GB_FULL; return IXGBE_SUCCESS; } /* Link capabilities are based on SFP */ if (hw->phy.multispeed_fiber) *speed = IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL; else *speed = IXGBE_LINK_SPEED_10GB_FULL; } else { *autoneg = true; switch (hw->phy.type) { case ixgbe_phy_x550em_xfi: *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_10GB_FULL; *autoneg = false; break; case ixgbe_phy_ext_1g_t: case ixgbe_phy_sgmii: *speed = IXGBE_LINK_SPEED_1GB_FULL; break; case ixgbe_phy_x550em_kr: if (hw->mac.type == ixgbe_mac_X550EM_a) { /* check different backplane modes */ if (hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) { *speed = IXGBE_LINK_SPEED_2_5GB_FULL; break; } else if (hw->device_id == IXGBE_DEV_ID_X550EM_A_KR_L) { *speed = IXGBE_LINK_SPEED_1GB_FULL; break; } } /* fall through */ default: *speed = IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL; break; } } return IXGBE_SUCCESS; } /** * ixgbe_get_lasi_ext_t_x550em - Determime external Base T PHY interrupt cause * @hw: pointer to hardware structure * @lsc: pointer to boolean flag which indicates whether external Base T * PHY interrupt is lsc * * Determime if external Base T PHY interrupt cause is high temperature * failure alarm or link status change. * * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature * failure alarm, else return PHY access status. */ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) { u32 status; u16 reg; - *lsc = FALSE; + *lsc = false; /* Vendor alarm triggered */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG, IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®); if (status != IXGBE_SUCCESS || !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN)) return status; /* Vendor Auto-Neg alarm triggered or Global alarm 1 triggered */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG, IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®); if (status != IXGBE_SUCCESS || !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN | IXGBE_MDIO_GLOBAL_ALARM_1_INT))) return status; /* Global alarm triggered */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1, IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®); if (status != IXGBE_SUCCESS) return status; /* If high temperature failure, then return over temp error and exit */ if (reg & IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL) { /* power down the PHY in case the PHY FW didn't already */ - ixgbe_set_copper_phy_power(hw, FALSE); + ixgbe_set_copper_phy_power(hw, false); return IXGBE_ERR_OVERTEMP; } else if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) { /* device fault alarm triggered */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_FAULT_MSG, IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®); if (status != IXGBE_SUCCESS) return status; /* if device fault was due to high temp alarm handle and exit */ if (reg == IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP) { /* power down the PHY in case the PHY FW didn't */ - ixgbe_set_copper_phy_power(hw, FALSE); + ixgbe_set_copper_phy_power(hw, false); return IXGBE_ERR_OVERTEMP; } } /* Vendor alarm 2 triggered */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG, IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); if (status != IXGBE_SUCCESS || !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT)) return status; /* link connect/disconnect event occurred */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2, IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); if (status != IXGBE_SUCCESS) return status; /* Indicate LSC */ if (reg & IXGBE_MDIO_AUTO_NEG_VEN_LSC) - *lsc = TRUE; + *lsc = true; return IXGBE_SUCCESS; } /** * ixgbe_enable_lasi_ext_t_x550em - Enable external Base T PHY interrupts * @hw: pointer to hardware structure * * Enable link status change and temperature failure alarm for the external * Base T PHY * * Returns PHY access status */ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) { u32 status; u16 reg; bool lsc; /* Clear interrupt flags */ status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc); /* Enable link status change alarm */ /* Enable the LASI interrupts on X552 devices to receive notifications * of the link configurations of the external PHY and correspondingly * support the configuration of the internal iXFI link, since iXFI does * not support auto-negotiation. This is not required for X553 devices * having KR support, which performs auto-negotiations and which is used * as the internal link to the external PHY. Hence adding a check here * to avoid enabling LASI interrupts for X553 devices. */ if (hw->mac.type != ixgbe_mac_X550EM_a) { status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); if (status != IXGBE_SUCCESS) return status; reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN; status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg); if (status != IXGBE_SUCCESS) return status; } /* Enable high temperature failure and global fault alarms */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®); if (status != IXGBE_SUCCESS) return status; reg |= (IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN | IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN); status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, reg); if (status != IXGBE_SUCCESS) return status; /* Enable vendor Auto-Neg alarm and Global Interrupt Mask 1 alarm */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK, IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®); if (status != IXGBE_SUCCESS) return status; reg |= (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN | IXGBE_MDIO_GLOBAL_ALARM_1_INT); status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK, IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, reg); if (status != IXGBE_SUCCESS) return status; /* Enable chip-wide vendor alarm */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK, IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®); if (status != IXGBE_SUCCESS) return status; reg |= IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN; status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK, IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, reg); return status; } /** * ixgbe_setup_kr_speed_x550em - Configure the KR PHY for link speed. * @hw: pointer to hardware structure * @speed: link speed * * Configures the integrated KR PHY. **/ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw, ixgbe_link_speed speed) { s32 status; u32 reg_val; status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); if (status) return status; reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR | IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX); /* Advertise 10G support. */ if (speed & IXGBE_LINK_SPEED_10GB_FULL) reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR; /* Advertise 1G support. */ if (speed & IXGBE_LINK_SPEED_1GB_FULL) reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX; status = hw->mac.ops.write_iosf_sb_reg(hw, IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); if (hw->mac.type == ixgbe_mac_X550EM_a) { /* Set lane mode to KR auto negotiation */ status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); if (status) return status; reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN; reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; status = hw->mac.ops.write_iosf_sb_reg(hw, IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); } return ixgbe_restart_an_internal_phy_x550em(hw); } /** * ixgbe_reset_phy_fw - Reset firmware-controlled PHYs * @hw: pointer to hardware structure */ static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw) { u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 }; s32 rc; if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw)) return IXGBE_SUCCESS; rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_PHY_SW_RESET, &store); if (rc) return rc; memset(store, 0, sizeof(store)); rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_INIT_PHY, &store); if (rc) return rc; return ixgbe_setup_fw_link(hw); } /** * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp * @hw: pointer to hardware structure */ static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw) { u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 }; s32 rc; rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store); if (rc) return rc; if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) { ixgbe_shutdown_fw_phy(hw); return IXGBE_ERR_OVERTEMP; } return IXGBE_SUCCESS; } /** * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register * @hw: pointer to hardware structure * * Read NW_MNG_IF_SEL register and save field values, and check for valid field * values. **/ static s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw) { /* Save NW management interface connected on board. This is used * to determine internal PHY mode. */ hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL); /* If X552 (X550EM_a) and MDIO is connected to external PHY, then set * PHY address. This register field was has only been used for X552. */ if (hw->mac.type == ixgbe_mac_X550EM_a && hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) { hw->phy.addr = (hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >> IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT; } return IXGBE_SUCCESS; } /** * ixgbe_init_phy_ops_X550em - PHY/SFP specific init * @hw: pointer to hardware structure * * Initialize any function pointers that were not able to be * set during init_shared_code because the PHY/SFP type was * not known. Perform the SFP init if necessary. */ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) { struct ixgbe_phy_info *phy = &hw->phy; s32 ret_val; DEBUGFUNC("ixgbe_init_phy_ops_X550em"); hw->mac.ops.set_lan_id(hw); ixgbe_read_mng_if_sel_x550em(hw); if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) { phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; ixgbe_setup_mux_ctl(hw); phy->ops.identify_sfp = ixgbe_identify_sfp_module_X550em; } switch (hw->device_id) { case IXGBE_DEV_ID_X550EM_A_1G_T: case IXGBE_DEV_ID_X550EM_A_1G_T_L: phy->ops.read_reg_mdi = NULL; phy->ops.write_reg_mdi = NULL; hw->phy.ops.read_reg = NULL; hw->phy.ops.write_reg = NULL; phy->ops.check_overtemp = ixgbe_check_overtemp_fw; if (hw->bus.lan_id) hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM; else hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM; break; case IXGBE_DEV_ID_X550EM_A_10G_T: case IXGBE_DEV_ID_X550EM_A_SFP: hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a; hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a; if (hw->bus.lan_id) hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM; else hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM; break; case IXGBE_DEV_ID_X550EM_X_SFP: /* set up for CS4227 usage */ hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; break; case IXGBE_DEV_ID_X550EM_X_1G_T: phy->ops.read_reg_mdi = NULL; phy->ops.write_reg_mdi = NULL; default: break; } /* Identify the PHY or SFP module */ ret_val = phy->ops.identify(hw); if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED || ret_val == IXGBE_ERR_PHY_ADDR_INVALID) return ret_val; /* Setup function pointers based on detected hardware */ ixgbe_init_mac_link_ops_X550em(hw); if (phy->sfp_type != ixgbe_sfp_type_unknown) phy->ops.reset = NULL; /* Set functions pointers based on phy type */ switch (hw->phy.type) { case ixgbe_phy_x550em_kx4: phy->ops.setup_link = NULL; phy->ops.read_reg = ixgbe_read_phy_reg_x550em; phy->ops.write_reg = ixgbe_write_phy_reg_x550em; break; case ixgbe_phy_x550em_kr: phy->ops.setup_link = ixgbe_setup_kr_x550em; phy->ops.read_reg = ixgbe_read_phy_reg_x550em; phy->ops.write_reg = ixgbe_write_phy_reg_x550em; break; case ixgbe_phy_ext_1g_t: /* link is managed by FW */ phy->ops.setup_link = NULL; phy->ops.reset = NULL; break; case ixgbe_phy_x550em_xfi: /* link is managed by HW */ phy->ops.setup_link = NULL; phy->ops.read_reg = ixgbe_read_phy_reg_x550em; phy->ops.write_reg = ixgbe_write_phy_reg_x550em; break; case ixgbe_phy_x550em_ext_t: /* If internal link mode is XFI, then setup iXFI internal link, * else setup KR now. */ phy->ops.setup_internal_link = ixgbe_setup_internal_phy_t_x550em; /* setup SW LPLU only for first revision of X550EM_x */ if ((hw->mac.type == ixgbe_mac_X550EM_x) && !(IXGBE_FUSES0_REV_MASK & IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)))) phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em; phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em; phy->ops.reset = ixgbe_reset_phy_t_X550em; break; case ixgbe_phy_sgmii: phy->ops.setup_link = NULL; break; case ixgbe_phy_fw: phy->ops.setup_link = ixgbe_setup_fw_link; phy->ops.reset = ixgbe_reset_phy_fw; break; default: break; } return ret_val; } /** * ixgbe_set_mdio_speed - Set MDIO clock speed * @hw: pointer to hardware structure */ static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw) { u32 hlreg0; switch (hw->device_id) { case IXGBE_DEV_ID_X550EM_X_10G_T: case IXGBE_DEV_ID_X550EM_A_SGMII: case IXGBE_DEV_ID_X550EM_A_SGMII_L: case IXGBE_DEV_ID_X550EM_A_10G_T: case IXGBE_DEV_ID_X550EM_A_SFP: case IXGBE_DEV_ID_X550EM_A_QSFP: /* Config MDIO clock speed before the first MDIO PHY access */ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); hlreg0 &= ~IXGBE_HLREG0_MDCSPD; IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); break; case IXGBE_DEV_ID_X550EM_A_1G_T: case IXGBE_DEV_ID_X550EM_A_1G_T_L: /* Select fast MDIO clock speed for these devices */ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); hlreg0 |= IXGBE_HLREG0_MDCSPD; IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); break; default: break; } } /** * ixgbe_reset_hw_X550em - Perform hardware reset * @hw: pointer to hardware structure * * Resets the hardware by resetting the transmit and receive units, masks * and clears all interrupts, perform a PHY reset, and perform a link (MAC) * reset. */ s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) { ixgbe_link_speed link_speed; s32 status; u32 ctrl = 0; u32 i; - bool link_up = FALSE; + bool link_up = false; u32 swfw_mask = hw->phy.phy_semaphore_mask; DEBUGFUNC("ixgbe_reset_hw_X550em"); /* Call adapter stop to disable Tx/Rx and clear interrupts */ status = hw->mac.ops.stop_adapter(hw); if (status != IXGBE_SUCCESS) { DEBUGOUT1("Failed to stop adapter, STATUS = %d\n", status); return status; } /* flush pending Tx transactions */ ixgbe_clear_tx_pending(hw); ixgbe_set_mdio_speed(hw); /* PHY ops must be identified and initialized prior to reset */ status = hw->phy.ops.init(hw); if (status) DEBUGOUT1("Failed to initialize PHY ops, STATUS = %d\n", status); if (status == IXGBE_ERR_SFP_NOT_SUPPORTED || status == IXGBE_ERR_PHY_ADDR_INVALID) { DEBUGOUT("Returning from reset HW due to PHY init failure\n"); return status; } /* start the external PHY */ if (hw->phy.type == ixgbe_phy_x550em_ext_t) { status = ixgbe_init_ext_t_x550em(hw); if (status) { DEBUGOUT1("Failed to start the external PHY, STATUS = %d\n", status); return status; } } /* Setup SFP module if there is one present. */ if (hw->phy.sfp_setup_needed) { status = hw->mac.ops.setup_sfp(hw); - hw->phy.sfp_setup_needed = FALSE; + hw->phy.sfp_setup_needed = false; } if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) return status; /* Reset PHY */ if (!hw->phy.reset_disable && hw->phy.ops.reset) { if (hw->phy.ops.reset(hw) == IXGBE_ERR_OVERTEMP) return IXGBE_ERR_OVERTEMP; } mac_reset_top: /* Issue global reset to the MAC. Needs to be SW reset if link is up. * If link reset is used when link is up, it might reset the PHY when * mng is using it. If link is down or the flag to force full link * reset is set, then perform link reset. */ ctrl = IXGBE_CTRL_LNK_RST; if (!hw->force_full_reset) { - hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE); + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); if (link_up) ctrl = IXGBE_CTRL_RST; } status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); if (status != IXGBE_SUCCESS) { ERROR_REPORT2(IXGBE_ERROR_CAUTION, "semaphore failed with %d", status); return IXGBE_ERR_SWFW_SYNC; } ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); IXGBE_WRITE_FLUSH(hw); hw->mac.ops.release_swfw_sync(hw, swfw_mask); /* Poll for reset bit to self-clear meaning reset is complete */ for (i = 0; i < 10; i++) { usec_delay(1); ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); if (!(ctrl & IXGBE_CTRL_RST_MASK)) break; } if (ctrl & IXGBE_CTRL_RST_MASK) { status = IXGBE_ERR_RESET_FAILED; DEBUGOUT("Reset polling failed to complete.\n"); } msec_delay(50); /* Double resets are required for recovery from certain error * conditions. Between resets, it is necessary to stall to * allow time for any pending HW events to complete. */ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; goto mac_reset_top; } /* Store the permanent mac address */ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); /* Store MAC address from RAR0, clear receive address registers, and * clear the multicast table. Also reset num_rar_entries to 128, * since we modify this value when programming the SAN MAC address. */ hw->mac.num_rar_entries = 128; hw->mac.ops.init_rx_addrs(hw); ixgbe_set_mdio_speed(hw); if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) ixgbe_setup_mux_ctl(hw); if (status != IXGBE_SUCCESS) DEBUGOUT1("Reset HW failed, STATUS = %d\n", status); return status; } /** * ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY. * @hw: pointer to hardware structure */ s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw) { u32 status; u16 reg; status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_TX_VENDOR_ALARMS_3, IXGBE_MDIO_PMA_PMD_DEV_TYPE, ®); if (status != IXGBE_SUCCESS) return status; /* If PHY FW reset completed bit is set then this is the first * SW instance after a power on so the PHY FW must be un-stalled. */ if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) { status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_RES_PR_10, IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®); if (status != IXGBE_SUCCESS) return status; reg &= ~IXGBE_MDIO_POWER_UP_STALL; status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_RES_PR_10, IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, reg); if (status != IXGBE_SUCCESS) return status; } return status; } /** * ixgbe_setup_kr_x550em - Configure the KR PHY. * @hw: pointer to hardware structure **/ s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw) { /* leave link alone for 2.5G */ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) return IXGBE_SUCCESS; if (ixgbe_check_reset_blocked(hw)) return 0; return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised); } /** * ixgbe_setup_mac_link_sfp_x550em - Setup internal/external the PHY for SFP * @hw: pointer to hardware structure * @speed: new link speed * @autoneg_wait_to_complete: unused * * Configure the external PHY and the integrated KR PHY for SFP support. **/ s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete) { s32 ret_val; u16 reg_slice, reg_val; - bool setup_linear = FALSE; + bool setup_linear = false; UNREFERENCED_1PARAMETER(autoneg_wait_to_complete); /* Check if SFP module is supported and linear */ ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); /* If no SFP module present, then return success. Return success since * there is no reason to configure CS4227 and SFP not present error is * not excepted in the setup MAC link flow. */ if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT) return IXGBE_SUCCESS; if (ret_val != IXGBE_SUCCESS) return ret_val; /* Configure internal PHY for KR/KX. */ ixgbe_setup_kr_speed_x550em(hw, speed); /* Configure CS4227 LINE side to proper mode. */ reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + (hw->bus.lan_id << 12); if (setup_linear) reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1; else reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1; ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice, reg_val); return ret_val; } /** * ixgbe_setup_sfi_x550a - Configure the internal PHY for native SFI mode * @hw: pointer to hardware structure * @speed: the link speed to force * * Configures the integrated PHY for native SFI mode. Used to connect the * internal PHY directly to an SFP cage, without autonegotiation. **/ static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed) { struct ixgbe_mac_info *mac = &hw->mac; s32 status; u32 reg_val; /* Disable all AN and force speed to 10G Serial. */ status = mac->ops.read_iosf_sb_reg(hw, IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); if (status != IXGBE_SUCCESS) return status; reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; /* Select forced link speed for internal PHY. */ switch (*speed) { case IXGBE_LINK_SPEED_10GB_FULL: reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G; break; case IXGBE_LINK_SPEED_1GB_FULL: reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G; break; default: /* Other link speeds are not supported by internal PHY. */ return IXGBE_ERR_LINK_SETUP; } status = mac->ops.write_iosf_sb_reg(hw, IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); /* Toggle port SW reset by AN reset. */ status = ixgbe_restart_an_internal_phy_x550em(hw); return status; } /** * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP * @hw: pointer to hardware structure * @speed: new link speed * @autoneg_wait_to_complete: unused * * Configure the the integrated PHY for SFP support. **/ s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete) { s32 ret_val; u16 reg_phy_ext; - bool setup_linear = FALSE; + bool setup_linear = false; u32 reg_slice, reg_phy_int, slice_offset; UNREFERENCED_1PARAMETER(autoneg_wait_to_complete); /* Check if SFP module is supported and linear */ ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); /* If no SFP module present, then return success. Return success since * SFP not present error is not excepted in the setup MAC link flow. */ if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT) return IXGBE_SUCCESS; if (ret_val != IXGBE_SUCCESS) return ret_val; if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) { /* Configure internal PHY for native SFI based on module type */ ret_val = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, ®_phy_int); if (ret_val != IXGBE_SUCCESS) return ret_val; reg_phy_int &= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA; if (!setup_linear) reg_phy_int |= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR; ret_val = hw->mac.ops.write_iosf_sb_reg(hw, IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, reg_phy_int); if (ret_val != IXGBE_SUCCESS) return ret_val; /* Setup SFI internal link. */ ret_val = ixgbe_setup_sfi_x550a(hw, &speed); } else { /* Configure internal PHY for KR/KX. */ ixgbe_setup_kr_speed_x550em(hw, speed); if (hw->phy.addr == 0x0 || hw->phy.addr == 0xFFFF) { /* Find Address */ DEBUGOUT("Invalid NW_MNG_IF_SEL.MDIO_PHY_ADD value\n"); return IXGBE_ERR_PHY_ADDR_INVALID; } /* Get external PHY SKU id */ ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU, IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); if (ret_val != IXGBE_SUCCESS) return ret_val; /* When configuring quad port CS4223, the MAC instance is part * of the slice offset. */ if (reg_phy_ext == IXGBE_CS4223_SKU_ID) slice_offset = (hw->bus.lan_id + (hw->bus.instance_id << 1)) << 12; else slice_offset = hw->bus.lan_id << 12; /* Configure CS4227/CS4223 LINE side to proper mode. */ reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset; ret_val = hw->phy.ops.read_reg(hw, reg_slice, IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); if (ret_val != IXGBE_SUCCESS) return ret_val; reg_phy_ext &= ~((IXGBE_CS4227_EDC_MODE_CX1 << 1) | (IXGBE_CS4227_EDC_MODE_SR << 1)); if (setup_linear) reg_phy_ext = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1; else reg_phy_ext = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1; ret_val = hw->phy.ops.write_reg(hw, reg_slice, IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext); /* Flush previous write with a read */ ret_val = hw->phy.ops.read_reg(hw, reg_slice, IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); } return ret_val; } /** * ixgbe_setup_ixfi_x550em_x - MAC specific iXFI configuration * @hw: pointer to hardware structure * * iXfI configuration needed for ixgbe_mac_X550EM_x devices. **/ static s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; s32 status; u32 reg_val; /* Disable training protocol FSM. */ status = mac->ops.read_iosf_sb_reg(hw, IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); if (status != IXGBE_SUCCESS) return status; reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL; status = mac->ops.write_iosf_sb_reg(hw, IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); if (status != IXGBE_SUCCESS) return status; /* Disable Flex from training TXFFE. */ status = mac->ops.read_iosf_sb_reg(hw, IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); if (status != IXGBE_SUCCESS) return status; reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN; reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN; reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN; status = mac->ops.write_iosf_sb_reg(hw, IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); if (status != IXGBE_SUCCESS) return status; status = mac->ops.read_iosf_sb_reg(hw, IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); if (status != IXGBE_SUCCESS) return status; reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN; reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN; reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN; status = mac->ops.write_iosf_sb_reg(hw, IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); if (status != IXGBE_SUCCESS) return status; /* Enable override for coefficients. */ status = mac->ops.read_iosf_sb_reg(hw, IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); if (status != IXGBE_SUCCESS) return status; reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN; reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN; reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN; reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN; status = mac->ops.write_iosf_sb_reg(hw, IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); return status; } /** * ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode. * @hw: pointer to hardware structure * @speed: the link speed to force * * Configures the integrated KR PHY to use iXFI mode. Used to connect an * internal and external PHY at a specific speed, without autonegotiation. **/ static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed) { struct ixgbe_mac_info *mac = &hw->mac; s32 status; u32 reg_val; /* iXFI is only supported with X552 */ if (mac->type != ixgbe_mac_X550EM_x) return IXGBE_ERR_LINK_SETUP; /* Disable AN and force speed to 10G Serial. */ status = mac->ops.read_iosf_sb_reg(hw, IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); if (status != IXGBE_SUCCESS) return status; reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; /* Select forced link speed for internal PHY. */ switch (*speed) { case IXGBE_LINK_SPEED_10GB_FULL: reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G; break; case IXGBE_LINK_SPEED_1GB_FULL: reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; break; default: /* Other link speeds are not supported by internal KR PHY. */ return IXGBE_ERR_LINK_SETUP; } status = mac->ops.write_iosf_sb_reg(hw, IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); if (status != IXGBE_SUCCESS) return status; /* Additional configuration needed for x550em_x */ if (hw->mac.type == ixgbe_mac_X550EM_x) { status = ixgbe_setup_ixfi_x550em_x(hw); if (status != IXGBE_SUCCESS) return status; } /* Toggle port SW reset by AN reset. */ status = ixgbe_restart_an_internal_phy_x550em(hw); return status; } /** * ixgbe_ext_phy_t_x550em_get_link - Get ext phy link status * @hw: address of hardware structure * @link_up: address of boolean to indicate link status * * Returns error code if unable to get link status. */ static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up) { u32 ret; u16 autoneg_status; - *link_up = FALSE; + *link_up = false; /* read this twice back to back to indicate current status */ ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_status); if (ret != IXGBE_SUCCESS) return ret; ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_status); if (ret != IXGBE_SUCCESS) return ret; *link_up = !!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS); return IXGBE_SUCCESS; } /** * ixgbe_setup_internal_phy_t_x550em - Configure KR PHY to X557 link * @hw: point to hardware structure * * Configures the link between the integrated KR PHY and the external X557 PHY * The driver will call this function when it gets a link status change * interrupt from the X557 PHY. This function configures the link speed * between the PHYs to match the link speed of the BASE-T link. * * A return of a non-zero value indicates an error, and the base driver should * not report link up. */ s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw) { ixgbe_link_speed force_speed; bool link_up; u32 status; u16 speed; if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) return IXGBE_ERR_CONFIG; if (hw->mac.type == ixgbe_mac_X550EM_x && !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { /* If link is down, there is no setup necessary so return */ status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); if (status != IXGBE_SUCCESS) return status; if (!link_up) return IXGBE_SUCCESS; status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT, IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &speed); if (status != IXGBE_SUCCESS) return status; /* If link is still down - no setup is required so return */ status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); if (status != IXGBE_SUCCESS) return status; if (!link_up) return IXGBE_SUCCESS; /* clear everything but the speed and duplex bits */ speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK; switch (speed) { case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL: force_speed = IXGBE_LINK_SPEED_10GB_FULL; break; case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL: force_speed = IXGBE_LINK_SPEED_1GB_FULL; break; default: /* Internal PHY does not support anything else */ return IXGBE_ERR_INVALID_LINK_SETTINGS; } return ixgbe_setup_ixfi_x550em(hw, &force_speed); } else { speed = IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL; return ixgbe_setup_kr_speed_x550em(hw, speed); } } /** * ixgbe_setup_phy_loopback_x550em - Configure the KR PHY for loopback. * @hw: pointer to hardware structure * * Configures the integrated KR PHY to use internal loopback mode. **/ s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw) { s32 status; u32 reg_val; /* Disable AN and force speed to 10G Serial. */ status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); if (status != IXGBE_SUCCESS) return status; reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G; status = hw->mac.ops.write_iosf_sb_reg(hw, IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); if (status != IXGBE_SUCCESS) return status; /* Set near-end loopback clocks. */ status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); if (status != IXGBE_SUCCESS) return status; reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B; reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS; status = hw->mac.ops.write_iosf_sb_reg(hw, IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); if (status != IXGBE_SUCCESS) return status; /* Set loopback enable. */ status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); if (status != IXGBE_SUCCESS) return status; reg_val |= IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK; status = hw->mac.ops.write_iosf_sb_reg(hw, IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); if (status != IXGBE_SUCCESS) return status; /* Training bypass. */ status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); if (status != IXGBE_SUCCESS) return status; reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS; status = hw->mac.ops.write_iosf_sb_reg(hw, IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); return status; } /** * ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command * assuming that the semaphore is already obtained. * @hw: pointer to hardware structure * @offset: offset of word in the EEPROM to read * @data: word read from the EEPROM * * Reads a 16 bit word from the EEPROM using the hostif. **/ s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data) { const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM; struct ixgbe_hic_read_shadow_ram buffer; s32 status; DEBUGFUNC("ixgbe_read_ee_hostif_X550"); buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; buffer.hdr.req.buf_lenh = 0; buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; /* convert offset from words to bytes */ buffer.address = IXGBE_CPU_TO_BE32(offset * 2); /* one word */ buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16)); buffer.pad2 = 0; buffer.data = 0; buffer.pad3 = 0; status = hw->mac.ops.acquire_swfw_sync(hw, mask); if (status) return status; status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer), IXGBE_HI_COMMAND_TIMEOUT); if (!status) { *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, FW_NVM_DATA_OFFSET); } hw->mac.ops.release_swfw_sync(hw, mask); return status; } /** * ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif * @hw: pointer to hardware structure * @offset: offset of word in the EEPROM to read * @words: number of words * @data: word(s) read from the EEPROM * * Reads a 16 bit word(s) from the EEPROM using the hostif. **/ s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM; struct ixgbe_hic_read_shadow_ram buffer; u32 current_word = 0; u16 words_to_read; s32 status; u32 i; DEBUGFUNC("ixgbe_read_ee_hostif_buffer_X550"); /* Take semaphore for the entire operation. */ status = hw->mac.ops.acquire_swfw_sync(hw, mask); if (status) { DEBUGOUT("EEPROM read buffer - semaphore failed\n"); return status; } while (words) { if (words > FW_MAX_READ_BUFFER_SIZE / 2) words_to_read = FW_MAX_READ_BUFFER_SIZE / 2; else words_to_read = words; buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; buffer.hdr.req.buf_lenh = 0; buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; /* convert offset from words to bytes */ buffer.address = IXGBE_CPU_TO_BE32((offset + current_word) * 2); buffer.length = IXGBE_CPU_TO_BE16(words_to_read * 2); buffer.pad2 = 0; buffer.data = 0; buffer.pad3 = 0; status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer), IXGBE_HI_COMMAND_TIMEOUT); if (status) { DEBUGOUT("Host interface command failed\n"); goto out; } for (i = 0; i < words_to_read; i++) { u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) + 2 * i; u32 value = IXGBE_READ_REG(hw, reg); data[current_word] = (u16)(value & 0xffff); current_word++; i++; if (i < words_to_read) { value >>= 16; data[current_word] = (u16)(value & 0xffff); current_word++; } } words -= words_to_read; } out: hw->mac.ops.release_swfw_sync(hw, mask); return status; } /** * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif * @hw: pointer to hardware structure * @offset: offset of word in the EEPROM to write * @data: word write to the EEPROM * * Write a 16 bit word to the EEPROM using the hostif. **/ s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, u16 data) { s32 status; struct ixgbe_hic_write_shadow_ram buffer; DEBUGFUNC("ixgbe_write_ee_hostif_data_X550"); buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD; buffer.hdr.req.buf_lenh = 0; buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN; buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; /* one word */ buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16)); buffer.data = data; buffer.address = IXGBE_CPU_TO_BE32(offset * 2); status = ixgbe_host_interface_command(hw, (u32 *)&buffer, sizeof(buffer), - IXGBE_HI_COMMAND_TIMEOUT, TRUE); + IXGBE_HI_COMMAND_TIMEOUT, true); if (status != IXGBE_SUCCESS) { DEBUGOUT2("for offset %04x failed with status %d\n", offset, status); return status; } if (buffer.hdr.rsp.buf_lenh_status != FW_CEM_RESP_STATUS_SUCCESS) { DEBUGOUT2("for offset %04x host interface return status %02x\n", offset, buffer.hdr.rsp.buf_lenh_status); return IXGBE_ERR_HOST_INTERFACE_COMMAND; } return status; } /** * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif * @hw: pointer to hardware structure * @offset: offset of word in the EEPROM to write * @data: word write to the EEPROM * * Write a 16 bit word to the EEPROM using the hostif. **/ s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data) { s32 status = IXGBE_SUCCESS; DEBUGFUNC("ixgbe_write_ee_hostif_X550"); if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == IXGBE_SUCCESS) { status = ixgbe_write_ee_hostif_data_X550(hw, offset, data); hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); } else { DEBUGOUT("write ee hostif failed to get semaphore"); status = IXGBE_ERR_SWFW_SYNC; } return status; } /** * ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif * @hw: pointer to hardware structure * @offset: offset of word in the EEPROM to write * @words: number of words * @data: word(s) write to the EEPROM * * Write a 16 bit word(s) to the EEPROM using the hostif. **/ s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { s32 status = IXGBE_SUCCESS; u32 i = 0; DEBUGFUNC("ixgbe_write_ee_hostif_buffer_X550"); /* Take semaphore for the entire operation. */ status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM); if (status != IXGBE_SUCCESS) { DEBUGOUT("EEPROM write buffer - semaphore failed\n"); goto out; } for (i = 0; i < words; i++) { status = ixgbe_write_ee_hostif_data_X550(hw, offset + i, data[i]); if (status != IXGBE_SUCCESS) { DEBUGOUT("Eeprom buffered write failed\n"); break; } } hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); out: return status; } /** * ixgbe_checksum_ptr_x550 - Checksum one pointer region * @hw: pointer to hardware structure * @ptr: pointer offset in eeprom * @size: size of section pointed by ptr, if 0 first word will be used as size * @csum: address of checksum to update * @buffer: pointer to buffer containing calculated checksum * @buffer_size: size of buffer * * Returns error status for any failure */ static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr, u16 size, u16 *csum, u16 *buffer, u32 buffer_size) { u16 buf[256]; s32 status; u16 length, bufsz, i, start; u16 *local_buffer; bufsz = sizeof(buf) / sizeof(buf[0]); /* Read a chunk at the pointer location */ if (!buffer) { status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf); if (status) { DEBUGOUT("Failed to read EEPROM image\n"); return status; } local_buffer = buf; } else { if (buffer_size < ptr) return IXGBE_ERR_PARAM; local_buffer = &buffer[ptr]; } if (size) { start = 0; length = size; } else { start = 1; length = local_buffer[0]; /* Skip pointer section if length is invalid. */ if (length == 0xFFFF || length == 0 || (ptr + length) >= hw->eeprom.word_size) return IXGBE_SUCCESS; } if (buffer && ((u32)start + (u32)length > buffer_size)) return IXGBE_ERR_PARAM; for (i = start; length; i++, length--) { if (i == bufsz && !buffer) { ptr += bufsz; i = 0; if (length < bufsz) bufsz = length; /* Read a chunk at the pointer location */ status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf); if (status) { DEBUGOUT("Failed to read EEPROM image\n"); return status; } } *csum += local_buffer[i]; } return IXGBE_SUCCESS; } /** * ixgbe_calc_checksum_X550 - Calculates and returns the checksum * @hw: pointer to hardware structure * @buffer: pointer to buffer containing calculated checksum * @buffer_size: size of buffer * * Returns a negative error code on error, or the 16-bit checksum **/ s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size) { u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1]; u16 *local_buffer; s32 status; u16 checksum = 0; u16 pointer, i, size; DEBUGFUNC("ixgbe_calc_eeprom_checksum_X550"); hw->eeprom.ops.init_params(hw); if (!buffer) { /* Read pointer area */ status = ixgbe_read_ee_hostif_buffer_X550(hw, 0, IXGBE_EEPROM_LAST_WORD + 1, eeprom_ptrs); if (status) { DEBUGOUT("Failed to read EEPROM image\n"); return status; } local_buffer = eeprom_ptrs; } else { if (buffer_size < IXGBE_EEPROM_LAST_WORD) return IXGBE_ERR_PARAM; local_buffer = buffer; } /* * For X550 hardware include 0x0-0x41 in the checksum, skip the * checksum word itself */ for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++) if (i != IXGBE_EEPROM_CHECKSUM) checksum += local_buffer[i]; /* * Include all data from pointers 0x3, 0x6-0xE. This excludes the * FW, PHY module, and PCIe Expansion/Option ROM pointers. */ for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) { if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR) continue; pointer = local_buffer[i]; /* Skip pointer section if the pointer is invalid. */ if (pointer == 0xFFFF || pointer == 0 || pointer >= hw->eeprom.word_size) continue; switch (i) { case IXGBE_PCIE_GENERAL_PTR: size = IXGBE_IXGBE_PCIE_GENERAL_SIZE; break; case IXGBE_PCIE_CONFIG0_PTR: case IXGBE_PCIE_CONFIG1_PTR: size = IXGBE_PCIE_CONFIG_SIZE; break; default: size = 0; break; } status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum, buffer, buffer_size); if (status) return status; } checksum = (u16)IXGBE_EEPROM_SUM - checksum; return (s32)checksum; } /** * ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum * @hw: pointer to hardware structure * * Returns a negative error code on error, or the 16-bit checksum **/ s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw) { return ixgbe_calc_checksum_X550(hw, NULL, 0); } /** * ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum * @hw: pointer to hardware structure * @checksum_val: calculated checksum * * Performs checksum calculation and validates the EEPROM checksum. If the * caller does not need checksum_val, the value can be NULL. **/ s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val) { s32 status; u16 checksum; u16 read_checksum = 0; DEBUGFUNC("ixgbe_validate_eeprom_checksum_X550"); /* Read the first word from the EEPROM. If this times out or fails, do * not continue or we could be in for a very long wait while every * EEPROM read fails */ status = hw->eeprom.ops.read(hw, 0, &checksum); if (status) { DEBUGOUT("EEPROM read failed\n"); return status; } status = hw->eeprom.ops.calc_checksum(hw); if (status < 0) return status; checksum = (u16)(status & 0xffff); status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); if (status) return status; /* Verify read checksum from EEPROM is the same as * calculated checksum */ if (read_checksum != checksum) { status = IXGBE_ERR_EEPROM_CHECKSUM; ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE, "Invalid EEPROM checksum"); } /* If the user cares, return the calculated checksum */ if (checksum_val) *checksum_val = checksum; return status; } /** * ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash * @hw: pointer to hardware structure * * After writing EEPROM to shadow RAM using EEWR register, software calculates * checksum and updates the EEPROM and instructs the hardware to update * the flash. **/ s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw) { s32 status; u16 checksum = 0; DEBUGFUNC("ixgbe_update_eeprom_checksum_X550"); /* Read the first word from the EEPROM. If this times out or fails, do * not continue or we could be in for a very long wait while every * EEPROM read fails */ status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum); if (status) { DEBUGOUT("EEPROM read failed\n"); return status; } status = ixgbe_calc_eeprom_checksum_X550(hw); if (status < 0) return status; checksum = (u16)(status & 0xffff); status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM, checksum); if (status) return status; status = ixgbe_update_flash_X550(hw); return status; } /** * ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device * @hw: pointer to hardware structure * * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash. **/ s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw) { s32 status = IXGBE_SUCCESS; union ixgbe_hic_hdr2 buffer; DEBUGFUNC("ixgbe_update_flash_X550"); buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD; buffer.req.buf_lenh = 0; buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN; buffer.req.checksum = FW_DEFAULT_CHECKSUM; status = ixgbe_host_interface_command(hw, (u32 *)&buffer, sizeof(buffer), - IXGBE_HI_COMMAND_TIMEOUT, FALSE); + IXGBE_HI_COMMAND_TIMEOUT, false); return status; } /** * ixgbe_get_supported_physical_layer_X550em - Returns physical layer type * @hw: pointer to hardware structure * * Determines physical layer capabilities of the current configuration. **/ u64 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw) { u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; u16 ext_ability = 0; DEBUGFUNC("ixgbe_get_supported_physical_layer_X550em"); hw->phy.ops.identify(hw); switch (hw->phy.type) { case ixgbe_phy_x550em_kr: if (hw->mac.type == ixgbe_mac_X550EM_a) { if (hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) { physical_layer = IXGBE_PHYSICAL_LAYER_2500BASE_KX; break; } else if (hw->device_id == IXGBE_DEV_ID_X550EM_A_KR_L) { physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX; break; } } /* fall through */ case ixgbe_phy_x550em_xfi: physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR | IXGBE_PHYSICAL_LAYER_1000BASE_KX; break; case ixgbe_phy_x550em_kx4: physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4 | IXGBE_PHYSICAL_LAYER_1000BASE_KX; break; case ixgbe_phy_x550em_ext_t: hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; break; case ixgbe_phy_fw: if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_1GB_FULL) physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_100_FULL) physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_10_FULL) physical_layer |= IXGBE_PHYSICAL_LAYER_10BASE_T; break; case ixgbe_phy_sgmii: physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX; break; case ixgbe_phy_ext_1g_t: physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T; break; default: break; } if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw); return physical_layer; } /** * ixgbe_get_bus_info_x550em - Set PCI bus info * @hw: pointer to hardware structure * * Sets bus link width and speed to unknown because X550em is * not a PCI device. **/ s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw) { DEBUGFUNC("ixgbe_get_bus_info_x550em"); hw->bus.width = ixgbe_bus_width_unknown; hw->bus.speed = ixgbe_bus_speed_unknown; hw->mac.ops.set_lan_id(hw); return IXGBE_SUCCESS; } /** * ixgbe_disable_rx_x550 - Disable RX unit * @hw: pointer to hardware structure * * Enables the Rx DMA unit for x550 **/ void ixgbe_disable_rx_x550(struct ixgbe_hw *hw) { u32 rxctrl, pfdtxgswc; s32 status; struct ixgbe_hic_disable_rxen fw_cmd; DEBUGFUNC("ixgbe_enable_rx_dma_x550"); rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); if (rxctrl & IXGBE_RXCTRL_RXEN) { pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); - hw->mac.set_lben = TRUE; + hw->mac.set_lben = true; } else { - hw->mac.set_lben = FALSE; + hw->mac.set_lben = false; } fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD; fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN; fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; fw_cmd.port_number = (u8)hw->bus.lan_id; status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, sizeof(struct ixgbe_hic_disable_rxen), - IXGBE_HI_COMMAND_TIMEOUT, TRUE); + IXGBE_HI_COMMAND_TIMEOUT, true); /* If we fail - disable RX using register write */ if (status) { rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); if (rxctrl & IXGBE_RXCTRL_RXEN) { rxctrl &= ~IXGBE_RXCTRL_RXEN; IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); } } } } /** * ixgbe_enter_lplu_x550em - Transition to low power states * @hw: pointer to hardware structure * * Configures Low Power Link Up on transition to low power states * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the * X557 PHY immediately prior to entering LPLU. **/ s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) { u16 an_10g_cntl_reg, autoneg_reg, speed; s32 status; ixgbe_link_speed lcd_speed; u32 save_autoneg; bool link_up; /* SW LPLU not required on later HW revisions. */ if ((hw->mac.type == ixgbe_mac_X550EM_x) && (IXGBE_FUSES0_REV_MASK & IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)))) return IXGBE_SUCCESS; /* If blocked by MNG FW, then don't restart AN */ if (ixgbe_check_reset_blocked(hw)) return IXGBE_SUCCESS; status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); if (status != IXGBE_SUCCESS) return status; status = ixgbe_read_eeprom(hw, NVM_INIT_CTRL_3, &hw->eeprom.ctrl_word_3); if (status != IXGBE_SUCCESS) return status; /* If link is down, LPLU disabled in NVM, WoL disabled, or manageability * disabled, then force link down by entering low power mode. */ if (!link_up || !(hw->eeprom.ctrl_word_3 & NVM_INIT_CTRL_3_LPLU) || !(hw->wol_enabled || ixgbe_mng_present(hw))) - return ixgbe_set_copper_phy_power(hw, FALSE); + return ixgbe_set_copper_phy_power(hw, false); /* Determine LCD */ status = ixgbe_get_lcd_t_x550em(hw, &lcd_speed); if (status != IXGBE_SUCCESS) return status; /* If no valid LCD link speed, then force link down and exit. */ if (lcd_speed == IXGBE_LINK_SPEED_UNKNOWN) - return ixgbe_set_copper_phy_power(hw, FALSE); + return ixgbe_set_copper_phy_power(hw, false); status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT, IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &speed); if (status != IXGBE_SUCCESS) return status; /* If no link now, speed is invalid so take link down */ status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); if (status != IXGBE_SUCCESS) - return ixgbe_set_copper_phy_power(hw, FALSE); + return ixgbe_set_copper_phy_power(hw, false); /* clear everything but the speed bits */ speed &= IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK; /* If current speed is already LCD, then exit. */ if (((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB) && (lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) || ((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) && (lcd_speed == IXGBE_LINK_SPEED_10GB_FULL))) return status; /* Clear AN completed indication */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM, IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); if (status != IXGBE_SUCCESS) return status; status = hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_10g_cntl_reg); if (status != IXGBE_SUCCESS) return status; status = hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); if (status != IXGBE_SUCCESS) return status; save_autoneg = hw->phy.autoneg_advertised; /* Setup link at least common link speed */ - status = hw->mac.ops.setup_link(hw, lcd_speed, FALSE); + status = hw->mac.ops.setup_link(hw, lcd_speed, false); /* restore autoneg from before setting lplu speed */ hw->phy.autoneg_advertised = save_autoneg; return status; } /** * ixgbe_get_lcd_x550em - Determine lowest common denominator * @hw: pointer to hardware structure * @lcd_speed: pointer to lowest common link speed * * Determine lowest common link speed with link partner. **/ s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *lcd_speed) { u16 an_lp_status; s32 status; u16 word = hw->eeprom.ctrl_word_3; *lcd_speed = IXGBE_LINK_SPEED_UNKNOWN; status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS, IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_lp_status); if (status != IXGBE_SUCCESS) return status; /* If link partner advertised 1G, return 1G */ if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) { *lcd_speed = IXGBE_LINK_SPEED_1GB_FULL; return status; } /* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */ if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) || (word & NVM_INIT_CTRL_3_D10GMP_PORT0)) return status; /* Link partner not capable of lower speeds, return 10G */ *lcd_speed = IXGBE_LINK_SPEED_10GB_FULL; return status; } /** * ixgbe_setup_fc_X550em - Set up flow control * @hw: pointer to hardware structure * * Called at init time to set up flow control. **/ s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw) { s32 ret_val = IXGBE_SUCCESS; u32 pause, asm_dir, reg_val; DEBUGFUNC("ixgbe_setup_fc_X550em"); /* Validate the requested mode */ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; goto out; } /* 10gig parts do not have a word in the EEPROM to determine the * default flow control setting, so we explicitly set it to full. */ if (hw->fc.requested_mode == ixgbe_fc_default) hw->fc.requested_mode = ixgbe_fc_full; /* Determine PAUSE and ASM_DIR bits. */ switch (hw->fc.requested_mode) { case ixgbe_fc_none: pause = 0; asm_dir = 0; break; case ixgbe_fc_tx_pause: pause = 0; asm_dir = 1; break; case ixgbe_fc_rx_pause: /* Rx Flow control is enabled and Tx Flow control is * disabled by software override. Since there really * isn't a way to advertise that we are capable of RX * Pause ONLY, we will advertise that we support both * symmetric and asymmetric Rx PAUSE, as such we fall * through to the fc_full statement. Later, we will * disable the adapter's ability to send PAUSE frames. */ case ixgbe_fc_full: pause = 1; asm_dir = 1; break; default: ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Flow control param set incorrectly\n"); ret_val = IXGBE_ERR_CONFIG; goto out; } switch (hw->device_id) { case IXGBE_DEV_ID_X550EM_X_KR: case IXGBE_DEV_ID_X550EM_A_KR: case IXGBE_DEV_ID_X550EM_A_KR_L: ret_val = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); if (ret_val != IXGBE_SUCCESS) goto out; reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); if (pause) reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; if (asm_dir) reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; ret_val = hw->mac.ops.write_iosf_sb_reg(hw, IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); /* This device does not fully support AN. */ - hw->fc.disable_fc_autoneg = TRUE; + hw->fc.disable_fc_autoneg = true; break; case IXGBE_DEV_ID_X550EM_X_XFI: - hw->fc.disable_fc_autoneg = TRUE; + hw->fc.disable_fc_autoneg = true; break; default: break; } out: return ret_val; } /** * ixgbe_fc_autoneg_backplane_x550em_a - Enable flow control IEEE clause 37 * @hw: pointer to hardware structure * * Enable flow control according to IEEE clause 37. **/ void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw) { u32 link_s1, lp_an_page_low, an_cntl_1; s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED; ixgbe_link_speed speed; bool link_up; /* AN should have completed when the cable was plugged in. * Look for reasons to bail out. Bail out if: * - FC autoneg is disabled, or if * - link is not up. */ if (hw->fc.disable_fc_autoneg) { ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, "Flow control autoneg is disabled"); goto out; } - hw->mac.ops.check_link(hw, &speed, &link_up, FALSE); + hw->mac.ops.check_link(hw, &speed, &link_up, false); if (!link_up) { ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down"); goto out; } /* Check at auto-negotiation has completed */ status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KRM_LINK_S1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &link_s1); if (status != IXGBE_SUCCESS || (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) { DEBUGOUT("Auto-Negotiation did not complete\n"); status = IXGBE_ERR_FC_NOT_NEGOTIATED; goto out; } /* Read the 10g AN autoc and LP ability registers and resolve * local flow control settings accordingly */ status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl_1); if (status != IXGBE_SUCCESS) { DEBUGOUT("Auto-Negotiation did not complete\n"); goto out; } status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KRM_LP_BASE_PAGE_HIGH(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &lp_an_page_low); if (status != IXGBE_SUCCESS) { DEBUGOUT("Auto-Negotiation did not complete\n"); goto out; } status = ixgbe_negotiate_fc(hw, an_cntl_1, lp_an_page_low, IXGBE_KRM_AN_CNTL_1_SYM_PAUSE, IXGBE_KRM_AN_CNTL_1_ASM_PAUSE, IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE, IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE); out: if (status == IXGBE_SUCCESS) { - hw->fc.fc_was_autonegged = TRUE; + hw->fc.fc_was_autonegged = true; } else { - hw->fc.fc_was_autonegged = FALSE; + hw->fc.fc_was_autonegged = false; hw->fc.current_mode = hw->fc.requested_mode; } } /** * ixgbe_fc_autoneg_fiber_x550em_a - passthrough FC settings * @hw: pointer to hardware structure * **/ void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw) { - hw->fc.fc_was_autonegged = FALSE; + hw->fc.fc_was_autonegged = false; hw->fc.current_mode = hw->fc.requested_mode; } /** * ixgbe_fc_autoneg_sgmii_x550em_a - Enable flow control IEEE clause 37 * @hw: pointer to hardware structure * * Enable flow control according to IEEE clause 37. **/ void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw) { s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED; u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; ixgbe_link_speed speed; bool link_up; /* AN should have completed when the cable was plugged in. * Look for reasons to bail out. Bail out if: * - FC autoneg is disabled, or if * - link is not up. */ if (hw->fc.disable_fc_autoneg) { ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, "Flow control autoneg is disabled"); goto out; } - hw->mac.ops.check_link(hw, &speed, &link_up, FALSE); + hw->mac.ops.check_link(hw, &speed, &link_up, false); if (!link_up) { ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down"); goto out; } /* Check if auto-negotiation has completed */ status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info); if (status != IXGBE_SUCCESS || !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) { DEBUGOUT("Auto-Negotiation did not complete\n"); status = IXGBE_ERR_FC_NOT_NEGOTIATED; goto out; } /* Negotiate the flow control */ status = ixgbe_negotiate_fc(hw, info[0], info[0], FW_PHY_ACT_GET_LINK_INFO_FC_RX, FW_PHY_ACT_GET_LINK_INFO_FC_TX, FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX, FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX); out: if (status == IXGBE_SUCCESS) { - hw->fc.fc_was_autonegged = TRUE; + hw->fc.fc_was_autonegged = true; } else { - hw->fc.fc_was_autonegged = FALSE; + hw->fc.fc_was_autonegged = false; hw->fc.current_mode = hw->fc.requested_mode; } } /** * ixgbe_setup_fc_backplane_x550em_a - Set up flow control * @hw: pointer to hardware structure * * Called at init time to set up flow control. **/ s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw) { s32 status = IXGBE_SUCCESS; u32 an_cntl = 0; DEBUGFUNC("ixgbe_setup_fc_backplane_x550em_a"); /* Validate the requested mode */ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); return IXGBE_ERR_INVALID_LINK_SETTINGS; } if (hw->fc.requested_mode == ixgbe_fc_default) hw->fc.requested_mode = ixgbe_fc_full; /* Set up the 1G and 10G flow control advertisement registers so the * HW will be able to do FC autoneg once the cable is plugged in. If * we link at 10G, the 1G advertisement is harmless and vice versa. */ status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl); if (status != IXGBE_SUCCESS) { DEBUGOUT("Auto-Negotiation did not complete\n"); return status; } /* The possible values of fc.requested_mode are: * 0: Flow control is completely disabled * 1: Rx flow control is enabled (we can receive pause frames, * but not send pause frames). * 2: Tx flow control is enabled (we can send pause frames but * we do not support receiving pause frames). * 3: Both Rx and Tx flow control (symmetric) are enabled. * other: Invalid. */ switch (hw->fc.requested_mode) { case ixgbe_fc_none: /* Flow control completely disabled by software override. */ an_cntl &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); break; case ixgbe_fc_tx_pause: /* Tx Flow control is enabled, and Rx Flow control is * disabled by software override. */ an_cntl |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; an_cntl &= ~IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; break; case ixgbe_fc_rx_pause: /* Rx Flow control is enabled and Tx Flow control is * disabled by software override. Since there really * isn't a way to advertise that we are capable of RX * Pause ONLY, we will advertise that we support both * symmetric and asymmetric Rx PAUSE, as such we fall * through to the fc_full statement. Later, we will * disable the adapter's ability to send PAUSE frames. */ case ixgbe_fc_full: /* Flow control (both Rx and Tx) is enabled by SW override. */ an_cntl |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; break; default: ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Flow control param set incorrectly\n"); return IXGBE_ERR_CONFIG; } status = hw->mac.ops.write_iosf_sb_reg(hw, IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, an_cntl); /* Restart auto-negotiation. */ status = ixgbe_restart_an_internal_phy_x550em(hw); return status; } /** * ixgbe_set_mux - Set mux for port 1 access with CS4227 * @hw: pointer to hardware structure * @state: set mux if 1, clear if 0 */ static void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state) { u32 esdp; if (!hw->bus.lan_id) return; esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); if (state) esdp |= IXGBE_ESDP_SDP1; else esdp &= ~IXGBE_ESDP_SDP1; IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); IXGBE_WRITE_FLUSH(hw); } /** * ixgbe_acquire_swfw_sync_X550em - Acquire SWFW semaphore * @hw: pointer to hardware structure * @mask: Mask to specify which semaphore to acquire * * Acquires the SWFW semaphore and sets the I2C MUX **/ s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) { s32 status; DEBUGFUNC("ixgbe_acquire_swfw_sync_X550em"); status = ixgbe_acquire_swfw_sync_X540(hw, mask); if (status) return status; if (mask & IXGBE_GSSR_I2C_MASK) ixgbe_set_mux(hw, 1); return IXGBE_SUCCESS; } /** * ixgbe_release_swfw_sync_X550em - Release SWFW semaphore * @hw: pointer to hardware structure * @mask: Mask to specify which semaphore to release * * Releases the SWFW semaphore and sets the I2C MUX **/ void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) { DEBUGFUNC("ixgbe_release_swfw_sync_X550em"); if (mask & IXGBE_GSSR_I2C_MASK) ixgbe_set_mux(hw, 0); ixgbe_release_swfw_sync_X540(hw, mask); } /** * ixgbe_acquire_swfw_sync_X550a - Acquire SWFW semaphore * @hw: pointer to hardware structure * @mask: Mask to specify which semaphore to acquire * * Acquires the SWFW semaphore and get the shared phy token as needed */ static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask) { u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM; int retries = FW_PHY_TOKEN_RETRIES; s32 status = IXGBE_SUCCESS; DEBUGFUNC("ixgbe_acquire_swfw_sync_X550a"); while (--retries) { status = IXGBE_SUCCESS; if (hmask) status = ixgbe_acquire_swfw_sync_X540(hw, hmask); if (status) { DEBUGOUT1("Could not acquire SWFW semaphore, Status = %d\n", status); return status; } if (!(mask & IXGBE_GSSR_TOKEN_SM)) return IXGBE_SUCCESS; status = ixgbe_get_phy_token(hw); if (status == IXGBE_ERR_TOKEN_RETRY) DEBUGOUT1("Could not acquire PHY token, Status = %d\n", status); if (status == IXGBE_SUCCESS) return IXGBE_SUCCESS; if (hmask) ixgbe_release_swfw_sync_X540(hw, hmask); if (status != IXGBE_ERR_TOKEN_RETRY) { DEBUGOUT1("Unable to retry acquiring the PHY token, Status = %d\n", status); return status; } } DEBUGOUT1("Semaphore acquisition retries failed!: PHY ID = 0x%08X\n", hw->phy.id); return status; } /** * ixgbe_release_swfw_sync_X550a - Release SWFW semaphore * @hw: pointer to hardware structure * @mask: Mask to specify which semaphore to release * * Releases the SWFW semaphore and puts the shared phy token as needed */ static void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask) { u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM; DEBUGFUNC("ixgbe_release_swfw_sync_X550a"); if (mask & IXGBE_GSSR_TOKEN_SM) ixgbe_put_phy_token(hw); if (hmask) ixgbe_release_swfw_sync_X540(hw, hmask); } /** * ixgbe_read_phy_reg_x550a - Reads specified PHY register * @hw: pointer to hardware structure * @reg_addr: 32 bit address of PHY register to read * @device_type: 5 bit device type * @phy_data: Pointer to read data from PHY register * * Reads a value from a specified PHY register using the SWFW lock and PHY * Token. The PHY Token is needed since the MDIO is shared between to MAC * instances. **/ s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 *phy_data) { s32 status; u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM; DEBUGFUNC("ixgbe_read_phy_reg_x550a"); if (hw->mac.ops.acquire_swfw_sync(hw, mask)) return IXGBE_ERR_SWFW_SYNC; status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data); hw->mac.ops.release_swfw_sync(hw, mask); return status; } /** * ixgbe_write_phy_reg_x550a - Writes specified PHY register * @hw: pointer to hardware structure * @reg_addr: 32 bit PHY register to write * @device_type: 5 bit device type * @phy_data: Data to write to the PHY register * * Writes a value to specified PHY register using the SWFW lock and PHY Token. * The PHY Token is needed since the MDIO is shared between to MAC instances. **/ s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 phy_data) { s32 status; u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM; DEBUGFUNC("ixgbe_write_phy_reg_x550a"); if (hw->mac.ops.acquire_swfw_sync(hw, mask) == IXGBE_SUCCESS) { status = hw->phy.ops.write_reg_mdi(hw, reg_addr, device_type, phy_data); hw->mac.ops.release_swfw_sync(hw, mask); } else { status = IXGBE_ERR_SWFW_SYNC; } return status; } /** * ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt * @hw: pointer to hardware structure * * Handle external Base T PHY interrupt. If high temperature * failure alarm then return error, else if link status change * then setup internal/external PHY link * * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature * failure alarm, else return PHY access status. */ s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw) { bool lsc; u32 status; status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc); if (status != IXGBE_SUCCESS) return status; if (lsc) return ixgbe_setup_internal_phy(hw); return IXGBE_SUCCESS; } /** * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed * @hw: pointer to hardware structure * @speed: new link speed - * @autoneg_wait_to_complete: TRUE when waiting for completion is needed + * @autoneg_wait_to_complete: true when waiting for completion is needed * * Setup internal/external PHY link speed based on link speed, then set * external PHY auto advertised link speed. * * Returns error status for any failure **/ s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete) { s32 status; ixgbe_link_speed force_speed; u32 i; bool link_up = false; DEBUGFUNC("ixgbe_setup_mac_link_t_X550em"); /* Setup internal/external PHY link speed to iXFI (10G), unless * only 1G is auto advertised then setup KX link. */ if (speed & IXGBE_LINK_SPEED_10GB_FULL) force_speed = IXGBE_LINK_SPEED_10GB_FULL; else force_speed = IXGBE_LINK_SPEED_1GB_FULL; /* If X552 and internal link mode is XFI, then setup XFI internal link. */ if (hw->mac.type == ixgbe_mac_X550EM_x && !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { status = ixgbe_setup_ixfi_x550em(hw, &force_speed); if (status != IXGBE_SUCCESS) return status; /* Wait for the controller to acquire link */ for (i = 0; i < 10; i++) { msec_delay(100); status = ixgbe_check_link(hw, &force_speed, &link_up, false); if (status != IXGBE_SUCCESS) return status; if (link_up) break; } } return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete); } /** * ixgbe_check_link_t_X550em - Determine link and speed status * @hw: pointer to hardware structure * @speed: pointer to link speed - * @link_up: TRUE when link is up + * @link_up: true when link is up * @link_up_wait_to_complete: bool used to wait for link up or not * * Check that both the MAC and X557 external PHY have link. **/ s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *link_up, bool link_up_wait_to_complete) { u32 status; u16 i, autoneg_status = 0; if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) return IXGBE_ERR_CONFIG; status = ixgbe_check_mac_link_generic(hw, speed, link_up, link_up_wait_to_complete); /* If check link fails or MAC link is not up, then return */ if (status != IXGBE_SUCCESS || !(*link_up)) return status; /* MAC link is up, so check external PHY link. * X557 PHY. Link status is latching low, and can only be used to detect * link drop, and not the current status of the link without performing * back-to-back reads. */ for (i = 0; i < 2; i++) { status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_status); if (status != IXGBE_SUCCESS) return status; } /* If external PHY link is not up, then indicate link not up */ if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS)) - *link_up = FALSE; + *link_up = false; return IXGBE_SUCCESS; } /** * ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI * @hw: pointer to hardware structure **/ s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw) { s32 status; status = ixgbe_reset_phy_generic(hw); if (status != IXGBE_SUCCESS) return status; /* Configure Link Status Alarm and Temperature Threshold interrupts */ return ixgbe_enable_lasi_ext_t_x550em(hw); } /** * ixgbe_led_on_t_X550em - Turns on the software controllable LEDs. * @hw: pointer to hardware structure * @led_idx: led number to turn on **/ s32 ixgbe_led_on_t_X550em(struct ixgbe_hw *hw, u32 led_idx) { u16 phy_data; DEBUGFUNC("ixgbe_led_on_t_X550em"); if (led_idx >= IXGBE_X557_MAX_LED_INDEX) return IXGBE_ERR_PARAM; /* To turn on the LED, set mode to ON. */ ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data); phy_data |= IXGBE_X557_LED_MANUAL_SET_MASK; ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data); /* Some designs have the LEDs wired to the MAC */ return ixgbe_led_on_generic(hw, led_idx); } /** * ixgbe_led_off_t_X550em - Turns off the software controllable LEDs. * @hw: pointer to hardware structure * @led_idx: led number to turn off **/ s32 ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, u32 led_idx) { u16 phy_data; DEBUGFUNC("ixgbe_led_off_t_X550em"); if (led_idx >= IXGBE_X557_MAX_LED_INDEX) return IXGBE_ERR_PARAM; /* To turn on the LED, set mode to ON. */ ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data); phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK; ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data); /* Some designs have the LEDs wired to the MAC */ return ixgbe_led_off_generic(hw, led_idx); } /** * ixgbe_set_fw_drv_ver_x550 - Sends driver version to firmware * @hw: pointer to the HW structure * @maj: driver version major number * @min: driver version minor number * @build: driver version build number * @sub: driver version sub build number * @len: length of driver_ver string * @driver_ver: driver string * * Sends driver version number to firmware through the manageability * block. On success return IXGBE_SUCCESS * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. **/ s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build, u8 sub, u16 len, const char *driver_ver) { struct ixgbe_hic_drv_info2 fw_cmd; s32 ret_val = IXGBE_SUCCESS; int i; DEBUGFUNC("ixgbe_set_fw_drv_ver_x550"); if ((len == 0) || (driver_ver == NULL) || (len > sizeof(fw_cmd.driver_string))) return IXGBE_ERR_INVALID_ARGUMENT; fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len; fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; fw_cmd.port_num = (u8)hw->bus.func; fw_cmd.ver_maj = maj; fw_cmd.ver_min = min; fw_cmd.ver_build = build; fw_cmd.ver_sub = sub; fw_cmd.hdr.checksum = 0; memcpy(fw_cmd.driver_string, driver_ver, len); fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, sizeof(fw_cmd), IXGBE_HI_COMMAND_TIMEOUT, - TRUE); + true); if (ret_val != IXGBE_SUCCESS) continue; if (fw_cmd.hdr.cmd_or_resp.ret_status == FW_CEM_RESP_STATUS_SUCCESS) ret_val = IXGBE_SUCCESS; else ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; break; } return ret_val; } /** * ixgbe_fw_recovery_mode_X550 - Check FW NVM recovery mode * @hw: pointer t hardware structure * * Returns true if in FW NVM recovery mode. **/ bool ixgbe_fw_recovery_mode_X550(struct ixgbe_hw *hw) { u32 fwsm; fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)); return !!(fwsm & IXGBE_FWSM_FW_NVM_RECOVERY_MODE); }