Index: stable/10/sys/dev/e1000/e1000_80003es2lan.c =================================================================== --- stable/10/sys/dev/e1000/e1000_80003es2lan.c (revision 296054) +++ stable/10/sys/dev/e1000/e1000_80003es2lan.c (revision 296055) @@ -1,1513 +1,1526 @@ /****************************************************************************** Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ /* 80003ES2LAN Gigabit Ethernet Controller (Copper) * 80003ES2LAN Gigabit Ethernet Controller (Serdes) */ #include "e1000_api.h" static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw); static void e1000_release_phy_80003es2lan(struct e1000_hw *hw); static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw); static void e1000_release_nvm_80003es2lan(struct e1000_hw *hw); static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, u32 offset, u16 *data); static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, u32 offset, u16 data); static s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); static s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw); static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw); static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw); static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed, u16 *duplex); static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw); static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw); static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw); static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw); static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask); static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex); static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw); static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw); static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, u16 *data); static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, u16 data); static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw); static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask); static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw); static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw); /* A table for the GG82563 cable length where the range is defined * with a lower bound at "index" and the upper bound at * "index + 5". */ static const u16 e1000_gg82563_cable_length_table[] = { 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF }; #define GG82563_CABLE_LENGTH_TABLE_SIZE \ (sizeof(e1000_gg82563_cable_length_table) / \ sizeof(e1000_gg82563_cable_length_table[0])) /** * e1000_init_phy_params_80003es2lan - Init ESB2 PHY func ptrs. * @hw: pointer to the HW structure **/ static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; DEBUGFUNC("e1000_init_phy_params_80003es2lan"); if (hw->phy.media_type != e1000_media_type_copper) { phy->type = e1000_phy_none; return E1000_SUCCESS; } else { phy->ops.power_up = e1000_power_up_phy_copper; phy->ops.power_down = e1000_power_down_phy_copper_80003es2lan; } phy->addr = 1; phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; phy->reset_delay_us = 100; phy->type = e1000_phy_gg82563; phy->ops.acquire = e1000_acquire_phy_80003es2lan; phy->ops.check_polarity = e1000_check_polarity_m88; phy->ops.check_reset_block = e1000_check_reset_block_generic; phy->ops.commit = e1000_phy_sw_reset_generic; phy->ops.get_cfg_done = e1000_get_cfg_done_80003es2lan; phy->ops.get_info = e1000_get_phy_info_m88; phy->ops.release = e1000_release_phy_80003es2lan; phy->ops.reset = e1000_phy_hw_reset_generic; phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_generic; phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan; phy->ops.get_cable_length = e1000_get_cable_length_80003es2lan; phy->ops.read_reg = e1000_read_phy_reg_gg82563_80003es2lan; phy->ops.write_reg = e1000_write_phy_reg_gg82563_80003es2lan; phy->ops.cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan; /* This can only be done after all function pointers are setup. */ ret_val = e1000_get_phy_id(hw); /* Verify phy id */ if (phy->id != GG82563_E_PHY_ID) return -E1000_ERR_PHY; return ret_val; } /** * e1000_init_nvm_params_80003es2lan - Init ESB2 NVM func ptrs. * @hw: pointer to the HW structure **/ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw) { struct e1000_nvm_info *nvm = &hw->nvm; u32 eecd = E1000_READ_REG(hw, E1000_EECD); u16 size; DEBUGFUNC("e1000_init_nvm_params_80003es2lan"); nvm->opcode_bits = 8; nvm->delay_usec = 1; switch (nvm->override) { case e1000_nvm_override_spi_large: nvm->page_size = 32; nvm->address_bits = 16; break; case e1000_nvm_override_spi_small: nvm->page_size = 8; nvm->address_bits = 8; break; default: nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; break; } nvm->type = e1000_nvm_eeprom_spi; size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> E1000_EECD_SIZE_EX_SHIFT); /* Added to a constant, "size" becomes the left-shift value * for setting word_size. */ size += NVM_WORD_SIZE_BASE_SHIFT; /* EEPROM access above 16k is unsupported */ if (size > 14) size = 14; nvm->word_size = 1 << size; /* Function Pointers */ nvm->ops.acquire = e1000_acquire_nvm_80003es2lan; nvm->ops.read = e1000_read_nvm_eerd; nvm->ops.release = e1000_release_nvm_80003es2lan; nvm->ops.update = e1000_update_nvm_checksum_generic; nvm->ops.valid_led_default = e1000_valid_led_default_generic; nvm->ops.validate = e1000_validate_nvm_checksum_generic; nvm->ops.write = e1000_write_nvm_80003es2lan; return E1000_SUCCESS; } /** * e1000_init_mac_params_80003es2lan - Init ESB2 MAC func ptrs. * @hw: pointer to the HW structure **/ static s32 e1000_init_mac_params_80003es2lan(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; DEBUGFUNC("e1000_init_mac_params_80003es2lan"); /* Set media type and media-dependent function pointers */ switch (hw->device_id) { case E1000_DEV_ID_80003ES2LAN_SERDES_DPT: hw->phy.media_type = e1000_media_type_internal_serdes; mac->ops.check_for_link = e1000_check_for_serdes_link_generic; mac->ops.setup_physical_interface = e1000_setup_fiber_serdes_link_generic; break; default: hw->phy.media_type = e1000_media_type_copper; mac->ops.check_for_link = e1000_check_for_copper_link_generic; mac->ops.setup_physical_interface = e1000_setup_copper_link_80003es2lan; break; } /* Set mta register count */ mac->mta_reg_count = 128; /* Set rar entry count */ mac->rar_entry_count = E1000_RAR_ENTRIES; /* Set if part includes ASF firmware */ mac->asf_firmware_present = TRUE; /* FWSM register */ mac->has_fwsm = TRUE; /* ARC supported; valid only if manageability features are enabled. */ mac->arc_subsystem_valid = !!(E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK); /* Adaptive IFS not supported */ mac->adaptive_ifs = FALSE; /* Function pointers */ /* bus type/speed/width */ mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic; /* reset */ mac->ops.reset_hw = e1000_reset_hw_80003es2lan; /* hw initialization */ mac->ops.init_hw = e1000_init_hw_80003es2lan; /* link setup */ mac->ops.setup_link = e1000_setup_link_generic; /* check management mode */ mac->ops.check_mng_mode = e1000_check_mng_mode_generic; /* multicast address update */ mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; /* writing VFTA */ mac->ops.write_vfta = e1000_write_vfta_generic; /* clearing VFTA */ mac->ops.clear_vfta = e1000_clear_vfta_generic; /* read mac address */ mac->ops.read_mac_addr = e1000_read_mac_addr_80003es2lan; /* ID LED init */ mac->ops.id_led_init = e1000_id_led_init_generic; /* blink LED */ mac->ops.blink_led = e1000_blink_led_generic; /* setup LED */ mac->ops.setup_led = e1000_setup_led_generic; /* cleanup LED */ mac->ops.cleanup_led = e1000_cleanup_led_generic; /* turn on/off LED */ mac->ops.led_on = e1000_led_on_generic; mac->ops.led_off = e1000_led_off_generic; /* clear hardware counters */ mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_80003es2lan; /* link info */ mac->ops.get_link_up_info = e1000_get_link_up_info_80003es2lan; /* set lan id for port to determine which phy lock to use */ hw->mac.ops.set_lan_id(hw); return E1000_SUCCESS; } /** * e1000_init_function_pointers_80003es2lan - Init ESB2 func ptrs. * @hw: pointer to the HW structure * * Called to initialize all function pointers and parameters. **/ void e1000_init_function_pointers_80003es2lan(struct e1000_hw *hw) { DEBUGFUNC("e1000_init_function_pointers_80003es2lan"); hw->mac.ops.init_params = e1000_init_mac_params_80003es2lan; hw->nvm.ops.init_params = e1000_init_nvm_params_80003es2lan; hw->phy.ops.init_params = e1000_init_phy_params_80003es2lan; } /** * e1000_acquire_phy_80003es2lan - Acquire rights to access PHY * @hw: pointer to the HW structure * * A wrapper to acquire access rights to the correct PHY. **/ static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw) { u16 mask; DEBUGFUNC("e1000_acquire_phy_80003es2lan"); mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM; return e1000_acquire_swfw_sync_80003es2lan(hw, mask); } /** * e1000_release_phy_80003es2lan - Release rights to access PHY * @hw: pointer to the HW structure * * A wrapper to release access rights to the correct PHY. **/ static void e1000_release_phy_80003es2lan(struct e1000_hw *hw) { u16 mask; DEBUGFUNC("e1000_release_phy_80003es2lan"); mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM; e1000_release_swfw_sync_80003es2lan(hw, mask); } /** * e1000_acquire_mac_csr_80003es2lan - Acquire right to access Kumeran register * @hw: pointer to the HW structure * * Acquire the semaphore to access the Kumeran interface. * **/ static s32 e1000_acquire_mac_csr_80003es2lan(struct e1000_hw *hw) { u16 mask; DEBUGFUNC("e1000_acquire_mac_csr_80003es2lan"); mask = E1000_SWFW_CSR_SM; return e1000_acquire_swfw_sync_80003es2lan(hw, mask); } /** * e1000_release_mac_csr_80003es2lan - Release right to access Kumeran Register * @hw: pointer to the HW structure * * Release the semaphore used to access the Kumeran interface **/ static void e1000_release_mac_csr_80003es2lan(struct e1000_hw *hw) { u16 mask; DEBUGFUNC("e1000_release_mac_csr_80003es2lan"); mask = E1000_SWFW_CSR_SM; e1000_release_swfw_sync_80003es2lan(hw, mask); } /** * e1000_acquire_nvm_80003es2lan - Acquire rights to access NVM * @hw: pointer to the HW structure * * Acquire the semaphore to access the EEPROM. **/ static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw) { s32 ret_val; DEBUGFUNC("e1000_acquire_nvm_80003es2lan"); ret_val = e1000_acquire_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); if (ret_val) return ret_val; ret_val = e1000_acquire_nvm_generic(hw); if (ret_val) e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); return ret_val; } /** * e1000_release_nvm_80003es2lan - Relinquish rights to access NVM * @hw: pointer to the HW structure * * Release the semaphore used to access the EEPROM. **/ static void e1000_release_nvm_80003es2lan(struct e1000_hw *hw) { DEBUGFUNC("e1000_release_nvm_80003es2lan"); e1000_release_nvm_generic(hw); e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); } /** * e1000_acquire_swfw_sync_80003es2lan - Acquire SW/FW semaphore * @hw: pointer to the HW structure * @mask: specifies which semaphore to acquire * * Acquire the SW/FW semaphore to access the PHY or NVM. The mask * will also specify which port we're acquiring the lock for. **/ static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask) { u32 swfw_sync; u32 swmask = mask; u32 fwmask = mask << 16; s32 i = 0; s32 timeout = 50; DEBUGFUNC("e1000_acquire_swfw_sync_80003es2lan"); while (i < timeout) { if (e1000_get_hw_semaphore_generic(hw)) return -E1000_ERR_SWFW_SYNC; swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); if (!(swfw_sync & (fwmask | swmask))) break; /* Firmware currently using resource (fwmask) * or other software thread using resource (swmask) */ e1000_put_hw_semaphore_generic(hw); msec_delay_irq(5); i++; } if (i == timeout) { DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); return -E1000_ERR_SWFW_SYNC; } swfw_sync |= swmask; E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); e1000_put_hw_semaphore_generic(hw); return E1000_SUCCESS; } /** * e1000_release_swfw_sync_80003es2lan - Release SW/FW semaphore * @hw: pointer to the HW structure * @mask: specifies which semaphore to acquire * * Release the SW/FW semaphore used to access the PHY or NVM. The mask * will also specify which port we're releasing the lock for. **/ static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask) { u32 swfw_sync; DEBUGFUNC("e1000_release_swfw_sync_80003es2lan"); while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS) ; /* Empty */ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); swfw_sync &= ~mask; E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); e1000_put_hw_semaphore_generic(hw); } /** * e1000_read_phy_reg_gg82563_80003es2lan - Read GG82563 PHY register * @hw: pointer to the HW structure * @offset: offset of the register to read * @data: pointer to the data returned from the operation * * Read the GG82563 PHY register. **/ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, u32 offset, u16 *data) { s32 ret_val; u32 page_select; u16 temp; DEBUGFUNC("e1000_read_phy_reg_gg82563_80003es2lan"); ret_val = e1000_acquire_phy_80003es2lan(hw); if (ret_val) return ret_val; /* Select Configuration Page */ if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { page_select = GG82563_PHY_PAGE_SELECT; } else { /* Use Alternative Page Select register to access * registers 30 and 31 */ page_select = GG82563_PHY_PAGE_SELECT_ALT; } temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT); ret_val = e1000_write_phy_reg_mdic(hw, page_select, temp); if (ret_val) { e1000_release_phy_80003es2lan(hw); return ret_val; } if (hw->dev_spec._80003es2lan.mdic_wa_enable) { /* The "ready" bit in the MDIC register may be incorrectly set * before the device has completed the "Page Select" MDI * transaction. So we wait 200us after each MDI command... */ usec_delay(200); /* ...and verify the command was successful. */ ret_val = e1000_read_phy_reg_mdic(hw, page_select, &temp); if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) { e1000_release_phy_80003es2lan(hw); return -E1000_ERR_PHY; } usec_delay(200); ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); usec_delay(200); } else { ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); } e1000_release_phy_80003es2lan(hw); return ret_val; } /** * e1000_write_phy_reg_gg82563_80003es2lan - Write GG82563 PHY register * @hw: pointer to the HW structure * @offset: offset of the register to read * @data: value to write to the register * * Write to the GG82563 PHY register. **/ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, u32 offset, u16 data) { s32 ret_val; u32 page_select; u16 temp; DEBUGFUNC("e1000_write_phy_reg_gg82563_80003es2lan"); ret_val = e1000_acquire_phy_80003es2lan(hw); if (ret_val) return ret_val; /* Select Configuration Page */ if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { page_select = GG82563_PHY_PAGE_SELECT; } else { /* Use Alternative Page Select register to access * registers 30 and 31 */ page_select = GG82563_PHY_PAGE_SELECT_ALT; } temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT); ret_val = e1000_write_phy_reg_mdic(hw, page_select, temp); if (ret_val) { e1000_release_phy_80003es2lan(hw); return ret_val; } if (hw->dev_spec._80003es2lan.mdic_wa_enable) { /* The "ready" bit in the MDIC register may be incorrectly set * before the device has completed the "Page Select" MDI * transaction. So we wait 200us after each MDI command... */ usec_delay(200); /* ...and verify the command was successful. */ ret_val = e1000_read_phy_reg_mdic(hw, page_select, &temp); if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) { e1000_release_phy_80003es2lan(hw); return -E1000_ERR_PHY; } usec_delay(200); ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); usec_delay(200); } else { ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); } e1000_release_phy_80003es2lan(hw); return ret_val; } /** * e1000_write_nvm_80003es2lan - Write to ESB2 NVM * @hw: pointer to the HW structure * @offset: offset of the register to read * @words: number of words to write * @data: buffer of data to write to the NVM * * Write "words" of data to the ESB2 NVM. **/ static s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) { DEBUGFUNC("e1000_write_nvm_80003es2lan"); return e1000_write_nvm_spi(hw, offset, words, data); } /** * e1000_get_cfg_done_80003es2lan - Wait for configuration to complete * @hw: pointer to the HW structure * * Wait a specific amount of time for manageability processes to complete. * This is a function pointer entry point called by the phy module. **/ static s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw) { s32 timeout = PHY_CFG_TIMEOUT; u32 mask = E1000_NVM_CFG_DONE_PORT_0; DEBUGFUNC("e1000_get_cfg_done_80003es2lan"); if (hw->bus.func == 1) mask = E1000_NVM_CFG_DONE_PORT_1; while (timeout) { if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask) break; msec_delay(1); timeout--; } if (!timeout) { DEBUGOUT("MNG configuration cycle has not completed.\n"); return -E1000_ERR_RESET; } return E1000_SUCCESS; } /** * e1000_phy_force_speed_duplex_80003es2lan - Force PHY speed and duplex * @hw: pointer to the HW structure * * Force the speed and duplex settings onto the PHY. This is a * function pointer entry point called by the phy module. **/ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) { s32 ret_val; u16 phy_data; bool link; DEBUGFUNC("e1000_phy_force_speed_duplex_80003es2lan"); if (!(hw->phy.ops.read_reg)) return E1000_SUCCESS; /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI * forced whenever speed and duplex are forced. */ ret_val = hw->phy.ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); if (ret_val) return ret_val; phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_AUTO; ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_SPEC_CTRL, phy_data); if (ret_val) return ret_val; DEBUGOUT1("GG82563 PSCR: %X\n", phy_data); ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_data); if (ret_val) return ret_val; e1000_phy_force_speed_duplex_setup(hw, &phy_data); /* Reset the phy to commit changes. */ phy_data |= MII_CR_RESET; ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_data); if (ret_val) return ret_val; usec_delay(1); if (hw->phy.autoneg_wait_to_complete) { DEBUGOUT("Waiting for forced speed/duplex link on GG82563 phy.\n"); ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 100000, &link); if (ret_val) return ret_val; if (!link) { /* We didn't get link. * Reset the DSP and cross our fingers. */ ret_val = e1000_phy_reset_dsp_generic(hw); if (ret_val) return ret_val; } /* Try once more */ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 100000, &link); if (ret_val) return ret_val; } ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, &phy_data); if (ret_val) return ret_val; /* Resetting the phy means we need to verify the TX_CLK corresponds * to the link speed. 10Mbps -> 2.5MHz, else 25MHz. */ phy_data &= ~GG82563_MSCR_TX_CLK_MASK; if (hw->mac.forced_speed_duplex & E1000_ALL_10_SPEED) phy_data |= GG82563_MSCR_TX_CLK_10MBPS_2_5; else phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25; /* In addition, we must re-enable CRS on Tx for both half and full * duplex. */ phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX; ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, phy_data); return ret_val; } /** * e1000_get_cable_length_80003es2lan - Set approximate cable length * @hw: pointer to the HW structure * * Find the approximate cable length as measured by the GG82563 PHY. * This is a function pointer entry point called by the phy module. **/ static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data, index; DEBUGFUNC("e1000_get_cable_length_80003es2lan"); if (!(hw->phy.ops.read_reg)) return E1000_SUCCESS; ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_DSP_DISTANCE, &phy_data); if (ret_val) return ret_val; index = phy_data & GG82563_DSPD_CABLE_LENGTH; if (index >= GG82563_CABLE_LENGTH_TABLE_SIZE - 5) return -E1000_ERR_PHY; phy->min_cable_length = e1000_gg82563_cable_length_table[index]; phy->max_cable_length = e1000_gg82563_cable_length_table[index + 5]; phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; return E1000_SUCCESS; } /** * e1000_get_link_up_info_80003es2lan - Report speed and duplex * @hw: pointer to the HW structure * @speed: pointer to speed buffer * @duplex: pointer to duplex buffer * * Retrieve the current speed and duplex configuration. **/ static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed, u16 *duplex) { s32 ret_val; DEBUGFUNC("e1000_get_link_up_info_80003es2lan"); if (hw->phy.media_type == e1000_media_type_copper) { ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex); hw->phy.ops.cfg_on_link_up(hw); } else { ret_val = e1000_get_speed_and_duplex_fiber_serdes_generic(hw, speed, duplex); } return ret_val; } /** * e1000_reset_hw_80003es2lan - Reset the ESB2 controller * @hw: pointer to the HW structure * * Perform a global reset to the ESB2 controller. **/ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) { u32 ctrl; s32 ret_val; u16 kum_reg_data; DEBUGFUNC("e1000_reset_hw_80003es2lan"); /* Prevent the PCI-E bus from sticking if there is no TLP connection * on the last TLP read/write transaction when MAC is reset. */ ret_val = e1000_disable_pcie_master_generic(hw); if (ret_val) DEBUGOUT("PCI-E Master disable polling has failed.\n"); DEBUGOUT("Masking off all interrupts\n"); E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); E1000_WRITE_REG(hw, E1000_RCTL, 0); E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); E1000_WRITE_FLUSH(hw); msec_delay(10); ctrl = E1000_READ_REG(hw, E1000_CTRL); ret_val = e1000_acquire_phy_80003es2lan(hw); if (ret_val) return ret_val; DEBUGOUT("Issuing a global reset to MAC\n"); E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); e1000_release_phy_80003es2lan(hw); /* Disable IBIST slave mode (far-end loopback) */ - e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, - &kum_reg_data); - kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; - e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, - kum_reg_data); + ret_val = e1000_read_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_INBAND_PARAM, &kum_reg_data); + if (!ret_val) { + kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_INBAND_PARAM, + kum_reg_data); + if (ret_val) + DEBUGOUT("Error disabling far-end loopback\n"); + } else + DEBUGOUT("Error disabling far-end loopback\n"); ret_val = e1000_get_auto_rd_done_generic(hw); if (ret_val) /* We don't want to continue accessing MAC registers. */ return ret_val; /* Clear any pending interrupt events. */ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); E1000_READ_REG(hw, E1000_ICR); return e1000_check_alt_mac_addr_generic(hw); } /** * e1000_init_hw_80003es2lan - Initialize the ESB2 controller * @hw: pointer to the HW structure * * Initialize the hw bits, LED, VFTA, MTA, link and hw counters. **/ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; u32 reg_data; s32 ret_val; u16 kum_reg_data; u16 i; DEBUGFUNC("e1000_init_hw_80003es2lan"); e1000_initialize_hw_bits_80003es2lan(hw); /* Initialize identification LED */ ret_val = mac->ops.id_led_init(hw); /* An error is not fatal and we should not stop init due to this */ if (ret_val) DEBUGOUT("Error initializing identification LED\n"); /* Disabling VLAN filtering */ DEBUGOUT("Initializing the IEEE VLAN\n"); mac->ops.clear_vfta(hw); /* Setup the receive address. */ e1000_init_rx_addrs_generic(hw, mac->rar_entry_count); /* Zero out the Multicast HASH table */ DEBUGOUT("Zeroing the MTA\n"); for (i = 0; i < mac->mta_reg_count; i++) E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); /* Setup link and flow control */ ret_val = mac->ops.setup_link(hw); if (ret_val) return ret_val; /* Disable IBIST slave mode (far-end loopback) */ - e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, - &kum_reg_data); - kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; - e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, - kum_reg_data); + ret_val = + e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + &kum_reg_data); + if (!ret_val) { + kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_INBAND_PARAM, + kum_reg_data); + if (ret_val) + DEBUGOUT("Error disabling far-end loopback\n"); + } else + DEBUGOUT("Error disabling far-end loopback\n"); /* Set the transmit descriptor write-back policy */ reg_data = E1000_READ_REG(hw, E1000_TXDCTL(0)); reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC); E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg_data); /* ...for both queues. */ reg_data = E1000_READ_REG(hw, E1000_TXDCTL(1)); reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC); E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg_data); /* Enable retransmit on late collisions */ reg_data = E1000_READ_REG(hw, E1000_TCTL); reg_data |= E1000_TCTL_RTLC; E1000_WRITE_REG(hw, E1000_TCTL, reg_data); /* Configure Gigabit Carry Extend Padding */ reg_data = E1000_READ_REG(hw, E1000_TCTL_EXT); reg_data &= ~E1000_TCTL_EXT_GCEX_MASK; reg_data |= DEFAULT_TCTL_EXT_GCEX_80003ES2LAN; E1000_WRITE_REG(hw, E1000_TCTL_EXT, reg_data); /* Configure Transmit Inter-Packet Gap */ reg_data = E1000_READ_REG(hw, E1000_TIPG); reg_data &= ~E1000_TIPG_IPGT_MASK; reg_data |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN; E1000_WRITE_REG(hw, E1000_TIPG, reg_data); reg_data = E1000_READ_REG_ARRAY(hw, E1000_FFLT, 0x0001); reg_data &= ~0x00100000; E1000_WRITE_REG_ARRAY(hw, E1000_FFLT, 0x0001, reg_data); /* default to TRUE to enable the MDIC W/A */ hw->dev_spec._80003es2lan.mdic_wa_enable = TRUE; ret_val = e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_OFFSET >> E1000_KMRNCTRLSTA_OFFSET_SHIFT, &i); if (!ret_val) { if ((i & E1000_KMRNCTRLSTA_OPMODE_MASK) == E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO) hw->dev_spec._80003es2lan.mdic_wa_enable = FALSE; } /* Clear all of the statistics registers (clear on read). It is * important that we do this after we have tried to establish link * because the symbol error count will increment wildly if there * is no link. */ e1000_clear_hw_cntrs_80003es2lan(hw); return ret_val; } /** * e1000_initialize_hw_bits_80003es2lan - Init hw bits of ESB2 * @hw: pointer to the HW structure * * Initializes required hardware-dependent bits needed for normal operation. **/ static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw) { u32 reg; DEBUGFUNC("e1000_initialize_hw_bits_80003es2lan"); /* Transmit Descriptor Control 0 */ reg = E1000_READ_REG(hw, E1000_TXDCTL(0)); reg |= (1 << 22); E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg); /* Transmit Descriptor Control 1 */ reg = E1000_READ_REG(hw, E1000_TXDCTL(1)); reg |= (1 << 22); E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg); /* Transmit Arbitration Control 0 */ reg = E1000_READ_REG(hw, E1000_TARC(0)); reg &= ~(0xF << 27); /* 30:27 */ if (hw->phy.media_type != e1000_media_type_copper) reg &= ~(1 << 20); E1000_WRITE_REG(hw, E1000_TARC(0), reg); /* Transmit Arbitration Control 1 */ reg = E1000_READ_REG(hw, E1000_TARC(1)); if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR) reg &= ~(1 << 28); else reg |= (1 << 28); E1000_WRITE_REG(hw, E1000_TARC(1), reg); /* Disable IPv6 extension header parsing because some malformed * IPv6 headers can hang the Rx. */ reg = E1000_READ_REG(hw, E1000_RFCTL); reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); E1000_WRITE_REG(hw, E1000_RFCTL, reg); return; } /** * e1000_copper_link_setup_gg82563_80003es2lan - Configure GG82563 Link * @hw: pointer to the HW structure * * Setup some GG82563 PHY registers for obtaining link **/ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u32 reg; u16 data; DEBUGFUNC("e1000_copper_link_setup_gg82563_80003es2lan"); ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, &data); if (ret_val) return ret_val; data |= GG82563_MSCR_ASSERT_CRS_ON_TX; /* Use 25MHz for both link down and 1000Base-T for Tx clock. */ data |= GG82563_MSCR_TX_CLK_1000MBPS_25; ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, data); if (ret_val) return ret_val; /* Options: * MDI/MDI-X = 0 (default) * 0 - Auto for all speeds * 1 - MDI mode * 2 - MDI-X mode * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) */ ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_SPEC_CTRL, &data); if (ret_val) return ret_val; data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK; switch (phy->mdix) { case 1: data |= GG82563_PSCR_CROSSOVER_MODE_MDI; break; case 2: data |= GG82563_PSCR_CROSSOVER_MODE_MDIX; break; case 0: default: data |= GG82563_PSCR_CROSSOVER_MODE_AUTO; break; } /* Options: * disable_polarity_correction = 0 (default) * Automatic Correction for Reversed Cable Polarity * 0 - Disabled * 1 - Enabled */ data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE; if (phy->disable_polarity_correction) data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE; ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_SPEC_CTRL, data); if (ret_val) return ret_val; /* SW Reset the PHY so all changes take effect */ ret_val = hw->phy.ops.commit(hw); if (ret_val) { DEBUGOUT("Error Resetting the PHY\n"); return ret_val; } /* Bypass Rx and Tx FIFO's */ reg = E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL; data = (E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS | E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS); ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data); if (ret_val) return ret_val; reg = E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE; ret_val = e1000_read_kmrn_reg_80003es2lan(hw, reg, &data); if (ret_val) return ret_val; data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE; ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data); if (ret_val) return ret_val; ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_SPEC_CTRL_2, &data); if (ret_val) return ret_val; data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG; ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_SPEC_CTRL_2, data); if (ret_val) return ret_val; reg = E1000_READ_REG(hw, E1000_CTRL_EXT); reg &= ~E1000_CTRL_EXT_LINK_MODE_MASK; E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_PWR_MGMT_CTRL, &data); if (ret_val) return ret_val; /* Do not init these registers when the HW is in IAMT mode, since the * firmware will have already initialized them. We only initialize * them if the HW is not in IAMT mode. */ if (!hw->mac.ops.check_mng_mode(hw)) { /* Enable Electrical Idle on the PHY */ data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE; ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_PWR_MGMT_CTRL, data); if (ret_val) return ret_val; ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, &data); if (ret_val) return ret_val; data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, data); if (ret_val) return ret_val; } /* Workaround: Disable padding in Kumeran interface in the MAC * and in the PHY to avoid CRC errors. */ ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_INBAND_CTRL, &data); if (ret_val) return ret_val; data |= GG82563_ICR_DIS_PADDING; ret_val = hw->phy.ops.write_reg(hw, GG82563_PHY_INBAND_CTRL, data); if (ret_val) return ret_val; return E1000_SUCCESS; } /** * e1000_setup_copper_link_80003es2lan - Setup Copper Link for ESB2 * @hw: pointer to the HW structure * * Essentially a wrapper for setting up all things "copper" related. * This is a function pointer entry point called by the mac module. **/ static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw) { u32 ctrl; s32 ret_val; u16 reg_data; DEBUGFUNC("e1000_setup_copper_link_80003es2lan"); ctrl = E1000_READ_REG(hw, E1000_CTRL); ctrl |= E1000_CTRL_SLU; ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); E1000_WRITE_REG(hw, E1000_CTRL, ctrl); /* Set the mac to wait the maximum time between each * iteration and increase the max iterations when * polling the phy; this fixes erroneous timeouts at 10Mbps. */ ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 4), 0xFFFF); if (ret_val) return ret_val; ret_val = e1000_read_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9), ®_data); if (ret_val) return ret_val; reg_data |= 0x3F; ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9), reg_data); if (ret_val) return ret_val; ret_val = e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, ®_data); if (ret_val) return ret_val; reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING; ret_val = e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, reg_data); if (ret_val) return ret_val; ret_val = e1000_copper_link_setup_gg82563_80003es2lan(hw); if (ret_val) return ret_val; return e1000_setup_copper_link_generic(hw); } /** * e1000_cfg_on_link_up_80003es2lan - es2 link configuration after link-up * @hw: pointer to the HW structure * @duplex: current duplex setting * * Configure the KMRN interface by applying last minute quirks for * 10/100 operation. **/ static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw) { s32 ret_val = E1000_SUCCESS; u16 speed; u16 duplex; DEBUGFUNC("e1000_configure_on_link_up"); if (hw->phy.media_type == e1000_media_type_copper) { ret_val = e1000_get_speed_and_duplex_copper_generic(hw, &speed, &duplex); if (ret_val) return ret_val; if (speed == SPEED_1000) ret_val = e1000_cfg_kmrn_1000_80003es2lan(hw); else ret_val = e1000_cfg_kmrn_10_100_80003es2lan(hw, duplex); } return ret_val; } /** * e1000_cfg_kmrn_10_100_80003es2lan - Apply "quirks" for 10/100 operation * @hw: pointer to the HW structure * @duplex: current duplex setting * * Configure the KMRN interface by applying last minute quirks for * 10/100 operation. **/ static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex) { s32 ret_val; u32 tipg; u32 i = 0; u16 reg_data, reg_data2; DEBUGFUNC("e1000_configure_kmrn_for_10_100"); reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT; ret_val = e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, reg_data); if (ret_val) return ret_val; /* Configure Transmit Inter-Packet Gap */ tipg = E1000_READ_REG(hw, E1000_TIPG); tipg &= ~E1000_TIPG_IPGT_MASK; tipg |= DEFAULT_TIPG_IPGT_10_100_80003ES2LAN; E1000_WRITE_REG(hw, E1000_TIPG, tipg); do { ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data); if (ret_val) return ret_val; ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data2); if (ret_val) return ret_val; i++; } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY)); if (duplex == HALF_DUPLEX) reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER; else reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; return hw->phy.ops.write_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); } /** * e1000_cfg_kmrn_1000_80003es2lan - Apply "quirks" for gigabit operation * @hw: pointer to the HW structure * * Configure the KMRN interface by applying last minute quirks for * gigabit operation. **/ static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw) { s32 ret_val; u16 reg_data, reg_data2; u32 tipg; u32 i = 0; DEBUGFUNC("e1000_configure_kmrn_for_1000"); reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT; ret_val = e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, reg_data); if (ret_val) return ret_val; /* Configure Transmit Inter-Packet Gap */ tipg = E1000_READ_REG(hw, E1000_TIPG); tipg &= ~E1000_TIPG_IPGT_MASK; tipg |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN; E1000_WRITE_REG(hw, E1000_TIPG, tipg); do { ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data); if (ret_val) return ret_val; ret_val = hw->phy.ops.read_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data2); if (ret_val) return ret_val; i++; } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY)); reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; return hw->phy.ops.write_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); } /** * e1000_read_kmrn_reg_80003es2lan - Read kumeran register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Acquire semaphore, then read the PHY register at offset * using the kumeran interface. The information retrieved is stored in data. * Release the semaphore before exiting. **/ static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, u16 *data) { u32 kmrnctrlsta; s32 ret_val; DEBUGFUNC("e1000_read_kmrn_reg_80003es2lan"); ret_val = e1000_acquire_mac_csr_80003es2lan(hw); if (ret_val) return ret_val; kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta); E1000_WRITE_FLUSH(hw); usec_delay(2); kmrnctrlsta = E1000_READ_REG(hw, E1000_KMRNCTRLSTA); *data = (u16)kmrnctrlsta; e1000_release_mac_csr_80003es2lan(hw); return ret_val; } /** * e1000_write_kmrn_reg_80003es2lan - Write kumeran register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Acquire semaphore, then write the data to PHY register * at the offset using the kumeran interface. Release semaphore * before exiting. **/ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, u16 data) { u32 kmrnctrlsta; s32 ret_val; DEBUGFUNC("e1000_write_kmrn_reg_80003es2lan"); ret_val = e1000_acquire_mac_csr_80003es2lan(hw); if (ret_val) return ret_val; kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & E1000_KMRNCTRLSTA_OFFSET) | data; E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta); E1000_WRITE_FLUSH(hw); usec_delay(2); e1000_release_mac_csr_80003es2lan(hw); return ret_val; } /** * e1000_read_mac_addr_80003es2lan - Read device MAC address * @hw: pointer to the HW structure **/ static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw) { s32 ret_val; DEBUGFUNC("e1000_read_mac_addr_80003es2lan"); /* If there's an alternate MAC address place it in RAR0 * so that it will override the Si installed default perm * address. */ ret_val = e1000_check_alt_mac_addr_generic(hw); if (ret_val) return ret_val; return e1000_read_mac_addr_generic(hw); } /** * e1000_power_down_phy_copper_80003es2lan - Remove link during PHY power down * @hw: pointer to the HW structure * * In the case of a PHY power down to save power, or to turn off link during a * driver unload, or wake on lan is not enabled, remove the link. **/ static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw) { /* If the management interface is not enabled, then power down */ if (!(hw->mac.ops.check_mng_mode(hw) || hw->phy.ops.check_reset_block(hw))) e1000_power_down_phy_copper(hw); return; } /** * e1000_clear_hw_cntrs_80003es2lan - Clear device specific hardware counters * @hw: pointer to the HW structure * * Clears the hardware counters by reading the counter registers. **/ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw) { DEBUGFUNC("e1000_clear_hw_cntrs_80003es2lan"); e1000_clear_hw_cntrs_base_generic(hw); E1000_READ_REG(hw, E1000_PRC64); E1000_READ_REG(hw, E1000_PRC127); E1000_READ_REG(hw, E1000_PRC255); E1000_READ_REG(hw, E1000_PRC511); E1000_READ_REG(hw, E1000_PRC1023); E1000_READ_REG(hw, E1000_PRC1522); E1000_READ_REG(hw, E1000_PTC64); E1000_READ_REG(hw, E1000_PTC127); E1000_READ_REG(hw, E1000_PTC255); E1000_READ_REG(hw, E1000_PTC511); E1000_READ_REG(hw, E1000_PTC1023); E1000_READ_REG(hw, E1000_PTC1522); E1000_READ_REG(hw, E1000_ALGNERRC); E1000_READ_REG(hw, E1000_RXERRC); E1000_READ_REG(hw, E1000_TNCRS); E1000_READ_REG(hw, E1000_CEXTERR); E1000_READ_REG(hw, E1000_TSCTC); E1000_READ_REG(hw, E1000_TSCTFC); E1000_READ_REG(hw, E1000_MGTPRC); E1000_READ_REG(hw, E1000_MGTPDC); E1000_READ_REG(hw, E1000_MGTPTC); E1000_READ_REG(hw, E1000_IAC); E1000_READ_REG(hw, E1000_ICRXOC); E1000_READ_REG(hw, E1000_ICRXPTC); E1000_READ_REG(hw, E1000_ICRXATC); E1000_READ_REG(hw, E1000_ICTXPTC); E1000_READ_REG(hw, E1000_ICTXATC); E1000_READ_REG(hw, E1000_ICTXQEC); E1000_READ_REG(hw, E1000_ICTXQMTC); E1000_READ_REG(hw, E1000_ICRXDMTC); } Index: stable/10/sys/dev/e1000/e1000_82540.c =================================================================== --- stable/10/sys/dev/e1000/e1000_82540.c (revision 296054) +++ stable/10/sys/dev/e1000/e1000_82540.c (revision 296055) @@ -1,718 +1,718 @@ /****************************************************************************** Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ /* * 82540EM Gigabit Ethernet Controller * 82540EP Gigabit Ethernet Controller * 82545EM Gigabit Ethernet Controller (Copper) * 82545EM Gigabit Ethernet Controller (Fiber) * 82545GM Gigabit Ethernet Controller * 82546EB Gigabit Ethernet Controller (Copper) * 82546EB Gigabit Ethernet Controller (Fiber) * 82546GB Gigabit Ethernet Controller */ #include "e1000_api.h" static s32 e1000_init_phy_params_82540(struct e1000_hw *hw); static s32 e1000_init_nvm_params_82540(struct e1000_hw *hw); static s32 e1000_init_mac_params_82540(struct e1000_hw *hw); static s32 e1000_adjust_serdes_amplitude_82540(struct e1000_hw *hw); static void e1000_clear_hw_cntrs_82540(struct e1000_hw *hw); static s32 e1000_init_hw_82540(struct e1000_hw *hw); static s32 e1000_reset_hw_82540(struct e1000_hw *hw); static s32 e1000_set_phy_mode_82540(struct e1000_hw *hw); static s32 e1000_set_vco_speed_82540(struct e1000_hw *hw); static s32 e1000_setup_copper_link_82540(struct e1000_hw *hw); static s32 e1000_setup_fiber_serdes_link_82540(struct e1000_hw *hw); static void e1000_power_down_phy_copper_82540(struct e1000_hw *hw); static s32 e1000_read_mac_addr_82540(struct e1000_hw *hw); /** * e1000_init_phy_params_82540 - Init PHY func ptrs. * @hw: pointer to the HW structure **/ static s32 e1000_init_phy_params_82540(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; - s32 ret_val = E1000_SUCCESS; + s32 ret_val; phy->addr = 1; phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; phy->reset_delay_us = 10000; phy->type = e1000_phy_m88; /* Function Pointers */ phy->ops.check_polarity = e1000_check_polarity_m88; phy->ops.commit = e1000_phy_sw_reset_generic; phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; phy->ops.get_cable_length = e1000_get_cable_length_m88; phy->ops.get_cfg_done = e1000_get_cfg_done_generic; phy->ops.read_reg = e1000_read_phy_reg_m88; phy->ops.reset = e1000_phy_hw_reset_generic; phy->ops.write_reg = e1000_write_phy_reg_m88; phy->ops.get_info = e1000_get_phy_info_m88; phy->ops.power_up = e1000_power_up_phy_copper; phy->ops.power_down = e1000_power_down_phy_copper_82540; ret_val = e1000_get_phy_id(hw); if (ret_val) goto out; /* Verify phy id */ switch (hw->mac.type) { case e1000_82540: case e1000_82545: case e1000_82545_rev_3: case e1000_82546: case e1000_82546_rev_3: if (phy->id == M88E1011_I_PHY_ID) break; /* Fall Through */ default: ret_val = -E1000_ERR_PHY; goto out; break; } out: return ret_val; } /** * e1000_init_nvm_params_82540 - Init NVM func ptrs. * @hw: pointer to the HW structure **/ static s32 e1000_init_nvm_params_82540(struct e1000_hw *hw) { struct e1000_nvm_info *nvm = &hw->nvm; u32 eecd = E1000_READ_REG(hw, E1000_EECD); DEBUGFUNC("e1000_init_nvm_params_82540"); nvm->type = e1000_nvm_eeprom_microwire; nvm->delay_usec = 50; nvm->opcode_bits = 3; switch (nvm->override) { case e1000_nvm_override_microwire_large: nvm->address_bits = 8; nvm->word_size = 256; break; case e1000_nvm_override_microwire_small: nvm->address_bits = 6; nvm->word_size = 64; break; default: nvm->address_bits = eecd & E1000_EECD_SIZE ? 8 : 6; nvm->word_size = eecd & E1000_EECD_SIZE ? 256 : 64; break; } /* Function Pointers */ nvm->ops.acquire = e1000_acquire_nvm_generic; nvm->ops.read = e1000_read_nvm_microwire; nvm->ops.release = e1000_release_nvm_generic; nvm->ops.update = e1000_update_nvm_checksum_generic; nvm->ops.valid_led_default = e1000_valid_led_default_generic; nvm->ops.validate = e1000_validate_nvm_checksum_generic; nvm->ops.write = e1000_write_nvm_microwire; return E1000_SUCCESS; } /** * e1000_init_mac_params_82540 - Init MAC func ptrs. * @hw: pointer to the HW structure **/ static s32 e1000_init_mac_params_82540(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; s32 ret_val = E1000_SUCCESS; DEBUGFUNC("e1000_init_mac_params_82540"); /* Set media type */ switch (hw->device_id) { case E1000_DEV_ID_82545EM_FIBER: case E1000_DEV_ID_82545GM_FIBER: case E1000_DEV_ID_82546EB_FIBER: case E1000_DEV_ID_82546GB_FIBER: hw->phy.media_type = e1000_media_type_fiber; break; case E1000_DEV_ID_82545GM_SERDES: case E1000_DEV_ID_82546GB_SERDES: hw->phy.media_type = e1000_media_type_internal_serdes; break; default: hw->phy.media_type = e1000_media_type_copper; break; } /* Set mta register count */ mac->mta_reg_count = 128; /* Set rar entry count */ mac->rar_entry_count = E1000_RAR_ENTRIES; /* Function pointers */ /* bus type/speed/width */ mac->ops.get_bus_info = e1000_get_bus_info_pci_generic; /* function id */ mac->ops.set_lan_id = e1000_set_lan_id_multi_port_pci; /* reset */ mac->ops.reset_hw = e1000_reset_hw_82540; /* hw initialization */ mac->ops.init_hw = e1000_init_hw_82540; /* link setup */ mac->ops.setup_link = e1000_setup_link_generic; /* physical interface setup */ mac->ops.setup_physical_interface = (hw->phy.media_type == e1000_media_type_copper) ? e1000_setup_copper_link_82540 : e1000_setup_fiber_serdes_link_82540; /* check for link */ switch (hw->phy.media_type) { case e1000_media_type_copper: mac->ops.check_for_link = e1000_check_for_copper_link_generic; break; case e1000_media_type_fiber: mac->ops.check_for_link = e1000_check_for_fiber_link_generic; break; case e1000_media_type_internal_serdes: mac->ops.check_for_link = e1000_check_for_serdes_link_generic; break; default: ret_val = -E1000_ERR_CONFIG; goto out; break; } /* link info */ mac->ops.get_link_up_info = (hw->phy.media_type == e1000_media_type_copper) ? e1000_get_speed_and_duplex_copper_generic : e1000_get_speed_and_duplex_fiber_serdes_generic; /* multicast address update */ mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; /* writing VFTA */ mac->ops.write_vfta = e1000_write_vfta_generic; /* clearing VFTA */ mac->ops.clear_vfta = e1000_clear_vfta_generic; /* read mac address */ mac->ops.read_mac_addr = e1000_read_mac_addr_82540; /* ID LED init */ mac->ops.id_led_init = e1000_id_led_init_generic; /* setup LED */ mac->ops.setup_led = e1000_setup_led_generic; /* cleanup LED */ mac->ops.cleanup_led = e1000_cleanup_led_generic; /* turn on/off LED */ mac->ops.led_on = e1000_led_on_generic; mac->ops.led_off = e1000_led_off_generic; /* clear hardware counters */ mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82540; out: return ret_val; } /** * e1000_init_function_pointers_82540 - Init func ptrs. * @hw: pointer to the HW structure * * Called to initialize all function pointers and parameters. **/ void e1000_init_function_pointers_82540(struct e1000_hw *hw) { DEBUGFUNC("e1000_init_function_pointers_82540"); hw->mac.ops.init_params = e1000_init_mac_params_82540; hw->nvm.ops.init_params = e1000_init_nvm_params_82540; hw->phy.ops.init_params = e1000_init_phy_params_82540; } /** * e1000_reset_hw_82540 - Reset hardware * @hw: pointer to the HW structure * * This resets the hardware into a known state. **/ static s32 e1000_reset_hw_82540(struct e1000_hw *hw) { u32 ctrl, manc; s32 ret_val = E1000_SUCCESS; DEBUGFUNC("e1000_reset_hw_82540"); DEBUGOUT("Masking off all interrupts\n"); E1000_WRITE_REG(hw, E1000_IMC, 0xFFFFFFFF); E1000_WRITE_REG(hw, E1000_RCTL, 0); E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); E1000_WRITE_FLUSH(hw); /* * Delay to allow any outstanding PCI transactions to complete * before resetting the device. */ msec_delay(10); ctrl = E1000_READ_REG(hw, E1000_CTRL); DEBUGOUT("Issuing a global reset to 82540/82545/82546 MAC\n"); switch (hw->mac.type) { case e1000_82545_rev_3: case e1000_82546_rev_3: E1000_WRITE_REG(hw, E1000_CTRL_DUP, ctrl | E1000_CTRL_RST); break; default: /* * These controllers can't ack the 64-bit write when * issuing the reset, so we use IO-mapping as a * workaround to issue the reset. */ E1000_WRITE_REG_IO(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); break; } /* Wait for EEPROM reload */ msec_delay(5); /* Disable HW ARPs on ASF enabled adapters */ manc = E1000_READ_REG(hw, E1000_MANC); manc &= ~E1000_MANC_ARP_EN; E1000_WRITE_REG(hw, E1000_MANC, manc); E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); E1000_READ_REG(hw, E1000_ICR); return ret_val; } /** * e1000_init_hw_82540 - Initialize hardware * @hw: pointer to the HW structure * * This inits the hardware readying it for operation. **/ static s32 e1000_init_hw_82540(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; u32 txdctl, ctrl_ext; - s32 ret_val = E1000_SUCCESS; + s32 ret_val; u16 i; DEBUGFUNC("e1000_init_hw_82540"); /* Initialize identification LED */ ret_val = mac->ops.id_led_init(hw); if (ret_val) { DEBUGOUT("Error initializing identification LED\n"); /* This is not fatal and we should not stop init due to this */ } /* Disabling VLAN filtering */ DEBUGOUT("Initializing the IEEE VLAN\n"); if (mac->type < e1000_82545_rev_3) E1000_WRITE_REG(hw, E1000_VET, 0); mac->ops.clear_vfta(hw); /* Setup the receive address. */ e1000_init_rx_addrs_generic(hw, mac->rar_entry_count); /* Zero out the Multicast HASH table */ DEBUGOUT("Zeroing the MTA\n"); for (i = 0; i < mac->mta_reg_count; i++) { E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); /* * Avoid back to back register writes by adding the register * read (flush). This is to protect against some strange * bridge configurations that may issue Memory Write Block * (MWB) to our register space. The *_rev_3 hardware at * least doesn't respond correctly to every other dword in an * MWB to our register space. */ E1000_WRITE_FLUSH(hw); } if (mac->type < e1000_82545_rev_3) e1000_pcix_mmrbc_workaround_generic(hw); /* Setup link and flow control */ ret_val = mac->ops.setup_link(hw); txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0)); txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB; E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl); /* * Clear all of the statistics registers (clear on read). It is * important that we do this after we have tried to establish link * because the symbol error count will increment wildly if there * is no link. */ e1000_clear_hw_cntrs_82540(hw); if ((hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER) || (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3)) { ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); /* * Relaxed ordering must be disabled to avoid a parity * error crash in a PCI slot. */ ctrl_ext |= E1000_CTRL_EXT_RO_DIS; E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); } return ret_val; } /** * e1000_setup_copper_link_82540 - Configure copper link settings * @hw: pointer to the HW structure * * Calls the appropriate function to configure the link for auto-neg or forced * speed and duplex. Then we check for link, once link is established calls * to configure collision distance and flow control are called. If link is * not established, we return -E1000_ERR_PHY (-2). **/ static s32 e1000_setup_copper_link_82540(struct e1000_hw *hw) { u32 ctrl; - s32 ret_val = E1000_SUCCESS; + s32 ret_val; u16 data; DEBUGFUNC("e1000_setup_copper_link_82540"); ctrl = E1000_READ_REG(hw, E1000_CTRL); ctrl |= E1000_CTRL_SLU; ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); E1000_WRITE_REG(hw, E1000_CTRL, ctrl); ret_val = e1000_set_phy_mode_82540(hw); if (ret_val) goto out; if (hw->mac.type == e1000_82545_rev_3 || hw->mac.type == e1000_82546_rev_3) { ret_val = hw->phy.ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &data); if (ret_val) goto out; data |= 0x00000008; ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, data); if (ret_val) goto out; } ret_val = e1000_copper_link_setup_m88(hw); if (ret_val) goto out; ret_val = e1000_setup_copper_link_generic(hw); out: return ret_val; } /** * e1000_setup_fiber_serdes_link_82540 - Setup link for fiber/serdes * @hw: pointer to the HW structure * * Set the output amplitude to the value in the EEPROM and adjust the VCO * speed to improve Bit Error Rate (BER) performance. Configures collision * distance and flow control for fiber and serdes links. Upon successful * setup, poll for link. **/ static s32 e1000_setup_fiber_serdes_link_82540(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; s32 ret_val = E1000_SUCCESS; DEBUGFUNC("e1000_setup_fiber_serdes_link_82540"); switch (mac->type) { case e1000_82545_rev_3: case e1000_82546_rev_3: if (hw->phy.media_type == e1000_media_type_internal_serdes) { /* * If we're on serdes media, adjust the output * amplitude to value set in the EEPROM. */ ret_val = e1000_adjust_serdes_amplitude_82540(hw); if (ret_val) goto out; } /* Adjust VCO speed to improve BER performance */ ret_val = e1000_set_vco_speed_82540(hw); if (ret_val) goto out; default: break; } ret_val = e1000_setup_fiber_serdes_link_generic(hw); out: return ret_val; } /** * e1000_adjust_serdes_amplitude_82540 - Adjust amplitude based on EEPROM * @hw: pointer to the HW structure * * Adjust the SERDES output amplitude based on the EEPROM settings. **/ static s32 e1000_adjust_serdes_amplitude_82540(struct e1000_hw *hw) { - s32 ret_val = E1000_SUCCESS; + s32 ret_val; u16 nvm_data; DEBUGFUNC("e1000_adjust_serdes_amplitude_82540"); ret_val = hw->nvm.ops.read(hw, NVM_SERDES_AMPLITUDE, 1, &nvm_data); if (ret_val) goto out; if (nvm_data != NVM_RESERVED_WORD) { /* Adjust serdes output amplitude only. */ nvm_data &= NVM_SERDES_AMPLITUDE_MASK; ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_EXT_CTRL, nvm_data); if (ret_val) goto out; } out: return ret_val; } /** * e1000_set_vco_speed_82540 - Set VCO speed for better performance * @hw: pointer to the HW structure * * Set the VCO speed to improve Bit Error Rate (BER) performance. **/ static s32 e1000_set_vco_speed_82540(struct e1000_hw *hw) { - s32 ret_val = E1000_SUCCESS; + s32 ret_val; u16 default_page = 0; u16 phy_data; DEBUGFUNC("e1000_set_vco_speed_82540"); /* Set PHY register 30, page 5, bit 8 to 0 */ ret_val = hw->phy.ops.read_reg(hw, M88E1000_PHY_PAGE_SELECT, &default_page); if (ret_val) goto out; ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005); if (ret_val) goto out; ret_val = hw->phy.ops.read_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); if (ret_val) goto out; phy_data &= ~M88E1000_PHY_VCO_REG_BIT8; ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); if (ret_val) goto out; /* Set PHY register 30, page 4, bit 11 to 1 */ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004); if (ret_val) goto out; ret_val = hw->phy.ops.read_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); if (ret_val) goto out; phy_data |= M88E1000_PHY_VCO_REG_BIT11; ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); if (ret_val) goto out; ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_PAGE_SELECT, default_page); out: return ret_val; } /** * e1000_set_phy_mode_82540 - Set PHY to class A mode * @hw: pointer to the HW structure * * Sets the PHY to class A mode and assumes the following operations will * follow to enable the new class mode: * 1. Do a PHY soft reset. * 2. Restart auto-negotiation or force link. **/ static s32 e1000_set_phy_mode_82540(struct e1000_hw *hw) { s32 ret_val = E1000_SUCCESS; u16 nvm_data; DEBUGFUNC("e1000_set_phy_mode_82540"); if (hw->mac.type != e1000_82545_rev_3) goto out; ret_val = hw->nvm.ops.read(hw, NVM_PHY_CLASS_WORD, 1, &nvm_data); if (ret_val) { ret_val = -E1000_ERR_PHY; goto out; } if ((nvm_data != NVM_RESERVED_WORD) && (nvm_data & NVM_PHY_CLASS_A)) { ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x000B); if (ret_val) { ret_val = -E1000_ERR_PHY; goto out; } ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x8104); if (ret_val) { ret_val = -E1000_ERR_PHY; goto out; } } out: return ret_val; } /** * e1000_power_down_phy_copper_82540 - Remove link in case of PHY power down * @hw: pointer to the HW structure * * In the case of a PHY power down to save power, or to turn off link during a * driver unload, or wake on lan is not enabled, remove the link. **/ static void e1000_power_down_phy_copper_82540(struct e1000_hw *hw) { /* If the management interface is not enabled, then power down */ if (!(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_SMBUS_EN)) e1000_power_down_phy_copper(hw); return; } /** * e1000_clear_hw_cntrs_82540 - Clear device specific hardware counters * @hw: pointer to the HW structure * * Clears the hardware counters by reading the counter registers. **/ static void e1000_clear_hw_cntrs_82540(struct e1000_hw *hw) { DEBUGFUNC("e1000_clear_hw_cntrs_82540"); e1000_clear_hw_cntrs_base_generic(hw); E1000_READ_REG(hw, E1000_PRC64); E1000_READ_REG(hw, E1000_PRC127); E1000_READ_REG(hw, E1000_PRC255); E1000_READ_REG(hw, E1000_PRC511); E1000_READ_REG(hw, E1000_PRC1023); E1000_READ_REG(hw, E1000_PRC1522); E1000_READ_REG(hw, E1000_PTC64); E1000_READ_REG(hw, E1000_PTC127); E1000_READ_REG(hw, E1000_PTC255); E1000_READ_REG(hw, E1000_PTC511); E1000_READ_REG(hw, E1000_PTC1023); E1000_READ_REG(hw, E1000_PTC1522); E1000_READ_REG(hw, E1000_ALGNERRC); E1000_READ_REG(hw, E1000_RXERRC); E1000_READ_REG(hw, E1000_TNCRS); E1000_READ_REG(hw, E1000_CEXTERR); E1000_READ_REG(hw, E1000_TSCTC); E1000_READ_REG(hw, E1000_TSCTFC); E1000_READ_REG(hw, E1000_MGTPRC); E1000_READ_REG(hw, E1000_MGTPDC); E1000_READ_REG(hw, E1000_MGTPTC); } /** * e1000_read_mac_addr_82540 - Read device MAC address * @hw: pointer to the HW structure * * Reads the device MAC address from the EEPROM and stores the value. * Since devices with two ports use the same EEPROM, we increment the * last bit in the MAC address for the second port. * * This version is being used over generic because of customer issues * with VmWare and Virtual Box when using generic. It seems in * the emulated 82545, RAR[0] does NOT have a valid address after a * reset, this older method works and using this breaks nothing for * these legacy adapters. **/ s32 e1000_read_mac_addr_82540(struct e1000_hw *hw) { s32 ret_val = E1000_SUCCESS; u16 offset, nvm_data, i; DEBUGFUNC("e1000_read_mac_addr"); for (i = 0; i < ETH_ADDR_LEN; i += 2) { offset = i >> 1; ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data); if (ret_val) { DEBUGOUT("NVM Read Error\n"); goto out; } hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF); hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8); } /* Flip last bit of mac address if we're on second port */ if (hw->bus.func == E1000_FUNC_1) hw->mac.perm_addr[5] ^= 1; for (i = 0; i < ETH_ADDR_LEN; i++) hw->mac.addr[i] = hw->mac.perm_addr[i]; out: return ret_val; } Index: stable/10/sys/dev/e1000/e1000_82541.c =================================================================== --- stable/10/sys/dev/e1000/e1000_82541.c (revision 296054) +++ stable/10/sys/dev/e1000/e1000_82541.c (revision 296055) @@ -1,1303 +1,1304 @@ /****************************************************************************** Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ /* * 82541EI Gigabit Ethernet Controller * 82541ER Gigabit Ethernet Controller * 82541GI Gigabit Ethernet Controller * 82541PI Gigabit Ethernet Controller * 82547EI Gigabit Ethernet Controller * 82547GI Gigabit Ethernet Controller */ #include "e1000_api.h" static s32 e1000_init_phy_params_82541(struct e1000_hw *hw); static s32 e1000_init_nvm_params_82541(struct e1000_hw *hw); static s32 e1000_init_mac_params_82541(struct e1000_hw *hw); static s32 e1000_reset_hw_82541(struct e1000_hw *hw); static s32 e1000_init_hw_82541(struct e1000_hw *hw); static s32 e1000_get_link_up_info_82541(struct e1000_hw *hw, u16 *speed, u16 *duplex); static s32 e1000_phy_hw_reset_82541(struct e1000_hw *hw); static s32 e1000_setup_copper_link_82541(struct e1000_hw *hw); static s32 e1000_check_for_link_82541(struct e1000_hw *hw); static s32 e1000_get_cable_length_igp_82541(struct e1000_hw *hw); static s32 e1000_set_d3_lplu_state_82541(struct e1000_hw *hw, bool active); static s32 e1000_setup_led_82541(struct e1000_hw *hw); static s32 e1000_cleanup_led_82541(struct e1000_hw *hw); static void e1000_clear_hw_cntrs_82541(struct e1000_hw *hw); static s32 e1000_read_mac_addr_82541(struct e1000_hw *hw); static s32 e1000_config_dsp_after_link_change_82541(struct e1000_hw *hw, bool link_up); static s32 e1000_phy_init_script_82541(struct e1000_hw *hw); static void e1000_power_down_phy_copper_82541(struct e1000_hw *hw); static const u16 e1000_igp_cable_length_table[] = { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25, 25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40, 40, 50, 50, 50, 50, 50, 50, 50, 60, 60, 60, 60, 60, 60, 60, 60, 60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80, 80, 90, 90, 90, 90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120}; #define IGP01E1000_AGC_LENGTH_TABLE_SIZE \ (sizeof(e1000_igp_cable_length_table) / \ sizeof(e1000_igp_cable_length_table[0])) /** * e1000_init_phy_params_82541 - Init PHY func ptrs. * @hw: pointer to the HW structure **/ static s32 e1000_init_phy_params_82541(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; - s32 ret_val = E1000_SUCCESS; + s32 ret_val; DEBUGFUNC("e1000_init_phy_params_82541"); phy->addr = 1; phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; phy->reset_delay_us = 10000; phy->type = e1000_phy_igp; /* Function Pointers */ phy->ops.check_polarity = e1000_check_polarity_igp; phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp; phy->ops.get_cable_length = e1000_get_cable_length_igp_82541; phy->ops.get_cfg_done = e1000_get_cfg_done_generic; phy->ops.get_info = e1000_get_phy_info_igp; phy->ops.read_reg = e1000_read_phy_reg_igp; phy->ops.reset = e1000_phy_hw_reset_82541; phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82541; phy->ops.write_reg = e1000_write_phy_reg_igp; phy->ops.power_up = e1000_power_up_phy_copper; phy->ops.power_down = e1000_power_down_phy_copper_82541; ret_val = e1000_get_phy_id(hw); if (ret_val) goto out; /* Verify phy id */ if (phy->id != IGP01E1000_I_PHY_ID) { ret_val = -E1000_ERR_PHY; goto out; } out: return ret_val; } /** * e1000_init_nvm_params_82541 - Init NVM func ptrs. * @hw: pointer to the HW structure **/ static s32 e1000_init_nvm_params_82541(struct e1000_hw *hw) { struct e1000_nvm_info *nvm = &hw->nvm; s32 ret_val = E1000_SUCCESS; u32 eecd = E1000_READ_REG(hw, E1000_EECD); u16 size; DEBUGFUNC("e1000_init_nvm_params_82541"); switch (nvm->override) { case e1000_nvm_override_spi_large: nvm->type = e1000_nvm_eeprom_spi; eecd |= E1000_EECD_ADDR_BITS; break; case e1000_nvm_override_spi_small: nvm->type = e1000_nvm_eeprom_spi; eecd &= ~E1000_EECD_ADDR_BITS; break; case e1000_nvm_override_microwire_large: nvm->type = e1000_nvm_eeprom_microwire; eecd |= E1000_EECD_SIZE; break; case e1000_nvm_override_microwire_small: nvm->type = e1000_nvm_eeprom_microwire; eecd &= ~E1000_EECD_SIZE; break; default: nvm->type = eecd & E1000_EECD_TYPE ? e1000_nvm_eeprom_spi : e1000_nvm_eeprom_microwire; break; } if (nvm->type == e1000_nvm_eeprom_spi) { nvm->address_bits = (eecd & E1000_EECD_ADDR_BITS) ? 16 : 8; nvm->delay_usec = 1; nvm->opcode_bits = 8; nvm->page_size = (eecd & E1000_EECD_ADDR_BITS) ? 32 : 8; /* Function Pointers */ nvm->ops.acquire = e1000_acquire_nvm_generic; nvm->ops.read = e1000_read_nvm_spi; nvm->ops.release = e1000_release_nvm_generic; nvm->ops.update = e1000_update_nvm_checksum_generic; nvm->ops.valid_led_default = e1000_valid_led_default_generic; nvm->ops.validate = e1000_validate_nvm_checksum_generic; nvm->ops.write = e1000_write_nvm_spi; /* * nvm->word_size must be discovered after the pointers * are set so we can verify the size from the nvm image * itself. Temporarily set it to a dummy value so the * read will work. */ nvm->word_size = 64; ret_val = nvm->ops.read(hw, NVM_CFG, 1, &size); if (ret_val) goto out; size = (size & NVM_SIZE_MASK) >> NVM_SIZE_SHIFT; /* * if size != 0, it can be added to a constant and become * the left-shift value to set the word_size. Otherwise, * word_size stays at 64. */ if (size) { size += NVM_WORD_SIZE_BASE_SHIFT_82541; nvm->word_size = 1 << size; } } else { nvm->address_bits = (eecd & E1000_EECD_ADDR_BITS) ? 8 : 6; nvm->delay_usec = 50; nvm->opcode_bits = 3; nvm->word_size = (eecd & E1000_EECD_ADDR_BITS) ? 256 : 64; /* Function Pointers */ nvm->ops.acquire = e1000_acquire_nvm_generic; nvm->ops.read = e1000_read_nvm_microwire; nvm->ops.release = e1000_release_nvm_generic; nvm->ops.update = e1000_update_nvm_checksum_generic; nvm->ops.valid_led_default = e1000_valid_led_default_generic; nvm->ops.validate = e1000_validate_nvm_checksum_generic; nvm->ops.write = e1000_write_nvm_microwire; } out: return ret_val; } /** * e1000_init_mac_params_82541 - Init MAC func ptrs. * @hw: pointer to the HW structure **/ static s32 e1000_init_mac_params_82541(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; DEBUGFUNC("e1000_init_mac_params_82541"); /* Set media type */ hw->phy.media_type = e1000_media_type_copper; /* Set mta register count */ mac->mta_reg_count = 128; /* Set rar entry count */ mac->rar_entry_count = E1000_RAR_ENTRIES; /* Set if part includes ASF firmware */ mac->asf_firmware_present = TRUE; /* Function Pointers */ /* bus type/speed/width */ mac->ops.get_bus_info = e1000_get_bus_info_pci_generic; /* function id */ mac->ops.set_lan_id = e1000_set_lan_id_single_port; /* reset */ mac->ops.reset_hw = e1000_reset_hw_82541; /* hw initialization */ mac->ops.init_hw = e1000_init_hw_82541; /* link setup */ mac->ops.setup_link = e1000_setup_link_generic; /* physical interface link setup */ mac->ops.setup_physical_interface = e1000_setup_copper_link_82541; /* check for link */ mac->ops.check_for_link = e1000_check_for_link_82541; /* link info */ mac->ops.get_link_up_info = e1000_get_link_up_info_82541; /* multicast address update */ mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; /* writing VFTA */ mac->ops.write_vfta = e1000_write_vfta_generic; /* clearing VFTA */ mac->ops.clear_vfta = e1000_clear_vfta_generic; /* read mac address */ mac->ops.read_mac_addr = e1000_read_mac_addr_82541; /* ID LED init */ mac->ops.id_led_init = e1000_id_led_init_generic; /* setup LED */ mac->ops.setup_led = e1000_setup_led_82541; /* cleanup LED */ mac->ops.cleanup_led = e1000_cleanup_led_82541; /* turn on/off LED */ mac->ops.led_on = e1000_led_on_generic; mac->ops.led_off = e1000_led_off_generic; /* clear hardware counters */ mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82541; return E1000_SUCCESS; } /** * e1000_init_function_pointers_82541 - Init func ptrs. * @hw: pointer to the HW structure * * Called to initialize all function pointers and parameters. **/ void e1000_init_function_pointers_82541(struct e1000_hw *hw) { DEBUGFUNC("e1000_init_function_pointers_82541"); hw->mac.ops.init_params = e1000_init_mac_params_82541; hw->nvm.ops.init_params = e1000_init_nvm_params_82541; hw->phy.ops.init_params = e1000_init_phy_params_82541; } /** * e1000_reset_hw_82541 - Reset hardware * @hw: pointer to the HW structure * * This resets the hardware into a known state. **/ static s32 e1000_reset_hw_82541(struct e1000_hw *hw) { - u32 ledctl, ctrl, icr, manc; + u32 ledctl, ctrl, manc; DEBUGFUNC("e1000_reset_hw_82541"); DEBUGOUT("Masking off all interrupts\n"); E1000_WRITE_REG(hw, E1000_IMC, 0xFFFFFFFF); E1000_WRITE_REG(hw, E1000_RCTL, 0); E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); E1000_WRITE_FLUSH(hw); /* * Delay to allow any outstanding PCI transactions to complete * before resetting the device. */ msec_delay(10); ctrl = E1000_READ_REG(hw, E1000_CTRL); /* Must reset the Phy before resetting the MAC */ if ((hw->mac.type == e1000_82541) || (hw->mac.type == e1000_82547)) { E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_PHY_RST)); + E1000_WRITE_FLUSH(hw); msec_delay(5); } DEBUGOUT("Issuing a global reset to 82541/82547 MAC\n"); switch (hw->mac.type) { case e1000_82541: case e1000_82541_rev_2: /* * These controllers can't ack the 64-bit write when * issuing the reset, so we use IO-mapping as a * workaround to issue the reset. */ E1000_WRITE_REG_IO(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); break; default: E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); break; } /* Wait for NVM reload */ msec_delay(20); /* Disable HW ARPs on ASF enabled adapters */ manc = E1000_READ_REG(hw, E1000_MANC); manc &= ~E1000_MANC_ARP_EN; E1000_WRITE_REG(hw, E1000_MANC, manc); if ((hw->mac.type == e1000_82541) || (hw->mac.type == e1000_82547)) { e1000_phy_init_script_82541(hw); /* Configure activity LED after Phy reset */ ledctl = E1000_READ_REG(hw, E1000_LEDCTL); ledctl &= IGP_ACTIVITY_LED_MASK; ledctl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl); } /* Once again, mask the interrupts */ DEBUGOUT("Masking off all interrupts\n"); E1000_WRITE_REG(hw, E1000_IMC, 0xFFFFFFFF); /* Clear any pending interrupt events. */ - icr = E1000_READ_REG(hw, E1000_ICR); + E1000_READ_REG(hw, E1000_ICR); return E1000_SUCCESS; } /** * e1000_init_hw_82541 - Initialize hardware * @hw: pointer to the HW structure * * This inits the hardware readying it for operation. **/ static s32 e1000_init_hw_82541(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541; u32 i, txdctl; s32 ret_val; DEBUGFUNC("e1000_init_hw_82541"); /* Initialize identification LED */ ret_val = mac->ops.id_led_init(hw); if (ret_val) { DEBUGOUT("Error initializing identification LED\n"); /* This is not fatal and we should not stop init due to this */ } /* Storing the Speed Power Down value for later use */ ret_val = hw->phy.ops.read_reg(hw, IGP01E1000_GMII_FIFO, &dev_spec->spd_default); if (ret_val) goto out; /* Disabling VLAN filtering */ DEBUGOUT("Initializing the IEEE VLAN\n"); mac->ops.clear_vfta(hw); /* Setup the receive address. */ e1000_init_rx_addrs_generic(hw, mac->rar_entry_count); /* Zero out the Multicast HASH table */ DEBUGOUT("Zeroing the MTA\n"); for (i = 0; i < mac->mta_reg_count; i++) { E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); /* * Avoid back to back register writes by adding the register * read (flush). This is to protect against some strange * bridge configurations that may issue Memory Write Block * (MWB) to our register space. */ E1000_WRITE_FLUSH(hw); } /* Setup link and flow control */ ret_val = mac->ops.setup_link(hw); txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0)); txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB; E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl); /* * Clear all of the statistics registers (clear on read). It is * important that we do this after we have tried to establish link * because the symbol error count will increment wildly if there * is no link. */ e1000_clear_hw_cntrs_82541(hw); out: return ret_val; } /** * e1000_get_link_up_info_82541 - Report speed and duplex * @hw: pointer to the HW structure * @speed: pointer to speed buffer * @duplex: pointer to duplex buffer * * Retrieve the current speed and duplex configuration. **/ static s32 e1000_get_link_up_info_82541(struct e1000_hw *hw, u16 *speed, u16 *duplex) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; DEBUGFUNC("e1000_get_link_up_info_82541"); ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex); if (ret_val) goto out; if (!phy->speed_downgraded) goto out; /* * IGP01 PHY may advertise full duplex operation after speed * downgrade even if it is operating at half duplex. * Here we set the duplex settings to match the duplex in the * link partner's capabilities. */ ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_EXP, &data); if (ret_val) goto out; if (!(data & NWAY_ER_LP_NWAY_CAPS)) { *duplex = HALF_DUPLEX; } else { ret_val = phy->ops.read_reg(hw, PHY_LP_ABILITY, &data); if (ret_val) goto out; if (*speed == SPEED_100) { if (!(data & NWAY_LPAR_100TX_FD_CAPS)) *duplex = HALF_DUPLEX; } else if (*speed == SPEED_10) { if (!(data & NWAY_LPAR_10T_FD_CAPS)) *duplex = HALF_DUPLEX; } } out: return ret_val; } /** * e1000_phy_hw_reset_82541 - PHY hardware reset * @hw: pointer to the HW structure * * Verify the reset block is not blocking us from resetting. Acquire * semaphore (if necessary) and read/set/write the device control reset * bit in the PHY. Wait the appropriate delay time for the device to * reset and release the semaphore (if necessary). **/ static s32 e1000_phy_hw_reset_82541(struct e1000_hw *hw) { s32 ret_val; u32 ledctl; DEBUGFUNC("e1000_phy_hw_reset_82541"); ret_val = e1000_phy_hw_reset_generic(hw); if (ret_val) goto out; e1000_phy_init_script_82541(hw); if ((hw->mac.type == e1000_82541) || (hw->mac.type == e1000_82547)) { /* Configure activity LED after PHY reset */ ledctl = E1000_READ_REG(hw, E1000_LEDCTL); ledctl &= IGP_ACTIVITY_LED_MASK; ledctl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl); } out: return ret_val; } /** * e1000_setup_copper_link_82541 - Configure copper link settings * @hw: pointer to the HW structure * * Calls the appropriate function to configure the link for auto-neg or forced * speed and duplex. Then we check for link, once link is established calls * to configure collision distance and flow control are called. If link is * not established, we return -E1000_ERR_PHY (-2). **/ static s32 e1000_setup_copper_link_82541(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541; s32 ret_val; u32 ctrl, ledctl; DEBUGFUNC("e1000_setup_copper_link_82541"); ctrl = E1000_READ_REG(hw, E1000_CTRL); ctrl |= E1000_CTRL_SLU; ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); E1000_WRITE_REG(hw, E1000_CTRL, ctrl); /* Earlier revs of the IGP phy require us to force MDI. */ if (hw->mac.type == e1000_82541 || hw->mac.type == e1000_82547) { dev_spec->dsp_config = e1000_dsp_config_disabled; phy->mdix = 1; } else { dev_spec->dsp_config = e1000_dsp_config_enabled; } ret_val = e1000_copper_link_setup_igp(hw); if (ret_val) goto out; if (hw->mac.autoneg) { if (dev_spec->ffe_config == e1000_ffe_config_active) dev_spec->ffe_config = e1000_ffe_config_enabled; } /* Configure activity LED after Phy reset */ ledctl = E1000_READ_REG(hw, E1000_LEDCTL); ledctl &= IGP_ACTIVITY_LED_MASK; ledctl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl); ret_val = e1000_setup_copper_link_generic(hw); out: return ret_val; } /** * e1000_check_for_link_82541 - Check/Store link connection * @hw: pointer to the HW structure * * This checks the link condition of the adapter and stores the * results in the hw->mac structure. **/ static s32 e1000_check_for_link_82541(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; s32 ret_val; bool link; DEBUGFUNC("e1000_check_for_link_82541"); /* * We only want to go out to the PHY registers to see if Auto-Neg * has completed and/or if our link status has changed. The * get_link_status flag is set upon receiving a Link Status * Change or Rx Sequence Error interrupt. */ if (!mac->get_link_status) { ret_val = E1000_SUCCESS; goto out; } /* * First we want to see if the MII Status Register reports * link. If so, then we want to get the current speed/duplex * of the PHY. */ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); if (ret_val) goto out; if (!link) { ret_val = e1000_config_dsp_after_link_change_82541(hw, FALSE); goto out; /* No link detected */ } mac->get_link_status = FALSE; /* * Check if there was DownShift, must be checked * immediately after link-up */ e1000_check_downshift_generic(hw); /* * If we are forcing speed/duplex, then we simply return since * we have already determined whether we have link or not. */ if (!mac->autoneg) { ret_val = -E1000_ERR_CONFIG; goto out; } ret_val = e1000_config_dsp_after_link_change_82541(hw, TRUE); /* * Auto-Neg is enabled. Auto Speed Detection takes care * of MAC speed/duplex configuration. So we only need to * configure Collision Distance in the MAC. */ mac->ops.config_collision_dist(hw); /* * Configure Flow Control now that Auto-Neg has completed. * First, we need to restore the desired flow control * settings because we may have had to re-autoneg with a * different link partner. */ ret_val = e1000_config_fc_after_link_up_generic(hw); if (ret_val) DEBUGOUT("Error configuring flow control\n"); out: return ret_val; } /** * e1000_config_dsp_after_link_change_82541 - Config DSP after link * @hw: pointer to the HW structure * @link_up: boolean flag for link up status * * Return E1000_ERR_PHY when failing to read/write the PHY, else E1000_SUCCESS * at any other case. * * 82541_rev_2 & 82547_rev_2 have the capability to configure the DSP when a * gigabit link is achieved to improve link quality. **/ static s32 e1000_config_dsp_after_link_change_82541(struct e1000_hw *hw, bool link_up) { struct e1000_phy_info *phy = &hw->phy; struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541; s32 ret_val; u32 idle_errs = 0; u16 phy_data, phy_saved_data, speed, duplex, i; u16 ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20; u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = { IGP01E1000_PHY_AGC_PARAM_A, IGP01E1000_PHY_AGC_PARAM_B, IGP01E1000_PHY_AGC_PARAM_C, IGP01E1000_PHY_AGC_PARAM_D}; DEBUGFUNC("e1000_config_dsp_after_link_change_82541"); if (link_up) { ret_val = hw->mac.ops.get_link_up_info(hw, &speed, &duplex); if (ret_val) { DEBUGOUT("Error getting link speed and duplex\n"); goto out; } if (speed != SPEED_1000) { ret_val = E1000_SUCCESS; goto out; } ret_val = phy->ops.get_cable_length(hw); if (ret_val) goto out; if ((dev_spec->dsp_config == e1000_dsp_config_enabled) && phy->min_cable_length >= 50) { for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { ret_val = phy->ops.read_reg(hw, dsp_reg_array[i], &phy_data); if (ret_val) goto out; phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX; ret_val = phy->ops.write_reg(hw, dsp_reg_array[i], phy_data); if (ret_val) goto out; } dev_spec->dsp_config = e1000_dsp_config_activated; } if ((dev_spec->ffe_config != e1000_ffe_config_enabled) || (phy->min_cable_length >= 50)) { ret_val = E1000_SUCCESS; goto out; } /* clear previous idle error counts */ ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data); if (ret_val) goto out; for (i = 0; i < ffe_idle_err_timeout; i++) { usec_delay(1000); ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data); if (ret_val) goto out; idle_errs += (phy_data & SR_1000T_IDLE_ERROR_CNT); if (idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) { dev_spec->ffe_config = e1000_ffe_config_active; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_DSP_FFE, IGP01E1000_PHY_DSP_FFE_CM_CP); if (ret_val) goto out; break; } if (idle_errs) ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_100; } } else { if (dev_spec->dsp_config == e1000_dsp_config_activated) { /* * Save off the current value of register 0x2F5B * to be restored at the end of the routines. */ ret_val = phy->ops.read_reg(hw, 0x2F5B, &phy_saved_data); if (ret_val) goto out; /* Disable the PHY transmitter */ ret_val = phy->ops.write_reg(hw, 0x2F5B, 0x0003); if (ret_val) goto out; msec_delay_irq(20); ret_val = phy->ops.write_reg(hw, 0x0000, IGP01E1000_IEEE_FORCE_GIG); if (ret_val) goto out; for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { ret_val = phy->ops.read_reg(hw, dsp_reg_array[i], &phy_data); if (ret_val) goto out; phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX; phy_data |= IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS; ret_val = phy->ops.write_reg(hw, dsp_reg_array[i], phy_data); if (ret_val) goto out; } ret_val = phy->ops.write_reg(hw, 0x0000, IGP01E1000_IEEE_RESTART_AUTONEG); if (ret_val) goto out; msec_delay_irq(20); /* Now enable the transmitter */ ret_val = phy->ops.write_reg(hw, 0x2F5B, phy_saved_data); if (ret_val) goto out; dev_spec->dsp_config = e1000_dsp_config_enabled; } if (dev_spec->ffe_config != e1000_ffe_config_active) { ret_val = E1000_SUCCESS; goto out; } /* * Save off the current value of register 0x2F5B * to be restored at the end of the routines. */ ret_val = phy->ops.read_reg(hw, 0x2F5B, &phy_saved_data); if (ret_val) goto out; /* Disable the PHY transmitter */ ret_val = phy->ops.write_reg(hw, 0x2F5B, 0x0003); if (ret_val) goto out; msec_delay_irq(20); ret_val = phy->ops.write_reg(hw, 0x0000, IGP01E1000_IEEE_FORCE_GIG); if (ret_val) goto out; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_DSP_FFE, IGP01E1000_PHY_DSP_FFE_DEFAULT); if (ret_val) goto out; ret_val = phy->ops.write_reg(hw, 0x0000, IGP01E1000_IEEE_RESTART_AUTONEG); if (ret_val) goto out; msec_delay_irq(20); /* Now enable the transmitter */ ret_val = phy->ops.write_reg(hw, 0x2F5B, phy_saved_data); if (ret_val) goto out; dev_spec->ffe_config = e1000_ffe_config_enabled; } out: return ret_val; } /** * e1000_get_cable_length_igp_82541 - Determine cable length for igp PHY * @hw: pointer to the HW structure * * The automatic gain control (agc) normalizes the amplitude of the * received signal, adjusting for the attenuation produced by the * cable. By reading the AGC registers, which represent the * combination of coarse and fine gain value, the value can be put * into a lookup table to obtain the approximate cable length * for each channel. **/ static s32 e1000_get_cable_length_igp_82541(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val = E1000_SUCCESS; u16 i, data; u16 cur_agc_value, agc_value = 0; u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE; u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = {IGP01E1000_PHY_AGC_A, IGP01E1000_PHY_AGC_B, IGP01E1000_PHY_AGC_C, IGP01E1000_PHY_AGC_D}; DEBUGFUNC("e1000_get_cable_length_igp_82541"); /* Read the AGC registers for all channels */ for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &data); if (ret_val) goto out; cur_agc_value = data >> IGP01E1000_AGC_LENGTH_SHIFT; /* Bounds checking */ if ((cur_agc_value >= IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) || (cur_agc_value == 0)) { ret_val = -E1000_ERR_PHY; goto out; } agc_value += cur_agc_value; if (min_agc_value > cur_agc_value) min_agc_value = cur_agc_value; } /* Remove the minimal AGC result for length < 50m */ if (agc_value < IGP01E1000_PHY_CHANNEL_NUM * 50) { agc_value -= min_agc_value; /* Average the three remaining channels for the length. */ agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1); } else { /* Average the channels for the length. */ agc_value /= IGP01E1000_PHY_CHANNEL_NUM; } phy->min_cable_length = (e1000_igp_cable_length_table[agc_value] > IGP01E1000_AGC_RANGE) ? (e1000_igp_cable_length_table[agc_value] - IGP01E1000_AGC_RANGE) : 0; phy->max_cable_length = e1000_igp_cable_length_table[agc_value] + IGP01E1000_AGC_RANGE; phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; out: return ret_val; } /** * e1000_set_d3_lplu_state_82541 - Sets low power link up state for D3 * @hw: pointer to the HW structure * @active: boolean used to enable/disable lplu * * Success returns 0, Failure returns 1 * * The low power link up (lplu) state is set to the power management level D3 * and SmartSpeed is disabled when active is TRUE, else clear lplu for D3 * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU * is used during Dx states where the power conservation is most important. * During driver activity, SmartSpeed should be enabled so performance is * maintained. **/ static s32 e1000_set_d3_lplu_state_82541(struct e1000_hw *hw, bool active) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; DEBUGFUNC("e1000_set_d3_lplu_state_82541"); switch (hw->mac.type) { case e1000_82541_rev_2: case e1000_82547_rev_2: break; default: ret_val = e1000_set_d3_lplu_state_generic(hw, active); goto out; break; } ret_val = phy->ops.read_reg(hw, IGP01E1000_GMII_FIFO, &data); if (ret_val) goto out; if (!active) { data &= ~IGP01E1000_GMII_FLEX_SPD; ret_val = phy->ops.write_reg(hw, IGP01E1000_GMII_FIFO, data); if (ret_val) goto out; /* * LPLU and SmartSpeed are mutually exclusive. LPLU is used * during Dx states where the power conservation is most * important. During driver activity we should enable * SmartSpeed, so performance is maintained. */ if (phy->smart_speed == e1000_smart_speed_on) { ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) goto out; data |= IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) goto out; } else if (phy->smart_speed == e1000_smart_speed_off) { ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) goto out; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) goto out; } } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { data |= IGP01E1000_GMII_FLEX_SPD; ret_val = phy->ops.write_reg(hw, IGP01E1000_GMII_FIFO, data); if (ret_val) goto out; /* When LPLU is enabled, we should disable SmartSpeed */ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) goto out; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, data); } out: return ret_val; } /** * e1000_setup_led_82541 - Configures SW controllable LED * @hw: pointer to the HW structure * * This prepares the SW controllable LED for use and saves the current state * of the LED so it can be later restored. **/ static s32 e1000_setup_led_82541(struct e1000_hw *hw) { struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541; s32 ret_val; DEBUGFUNC("e1000_setup_led_82541"); ret_val = hw->phy.ops.read_reg(hw, IGP01E1000_GMII_FIFO, &dev_spec->spd_default); if (ret_val) goto out; ret_val = hw->phy.ops.write_reg(hw, IGP01E1000_GMII_FIFO, (u16)(dev_spec->spd_default & ~IGP01E1000_GMII_SPD)); if (ret_val) goto out; E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1); out: return ret_val; } /** * e1000_cleanup_led_82541 - Set LED config to default operation * @hw: pointer to the HW structure * * Remove the current LED configuration and set the LED configuration * to the default value, saved from the EEPROM. **/ static s32 e1000_cleanup_led_82541(struct e1000_hw *hw) { struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541; s32 ret_val; DEBUGFUNC("e1000_cleanup_led_82541"); ret_val = hw->phy.ops.write_reg(hw, IGP01E1000_GMII_FIFO, dev_spec->spd_default); if (ret_val) goto out; E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default); out: return ret_val; } /** * e1000_phy_init_script_82541 - Initialize GbE PHY * @hw: pointer to the HW structure * * Initializes the IGP PHY. **/ static s32 e1000_phy_init_script_82541(struct e1000_hw *hw) { struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541; u32 ret_val; u16 phy_saved_data; DEBUGFUNC("e1000_phy_init_script_82541"); if (!dev_spec->phy_init_script) { ret_val = E1000_SUCCESS; goto out; } /* Delay after phy reset to enable NVM configuration to load */ msec_delay(20); /* * Save off the current value of register 0x2F5B to be restored at * the end of this routine. */ ret_val = hw->phy.ops.read_reg(hw, 0x2F5B, &phy_saved_data); /* Disabled the PHY transmitter */ hw->phy.ops.write_reg(hw, 0x2F5B, 0x0003); msec_delay(20); hw->phy.ops.write_reg(hw, 0x0000, 0x0140); msec_delay(5); switch (hw->mac.type) { case e1000_82541: case e1000_82547: hw->phy.ops.write_reg(hw, 0x1F95, 0x0001); hw->phy.ops.write_reg(hw, 0x1F71, 0xBD21); hw->phy.ops.write_reg(hw, 0x1F79, 0x0018); hw->phy.ops.write_reg(hw, 0x1F30, 0x1600); hw->phy.ops.write_reg(hw, 0x1F31, 0x0014); hw->phy.ops.write_reg(hw, 0x1F32, 0x161C); hw->phy.ops.write_reg(hw, 0x1F94, 0x0003); hw->phy.ops.write_reg(hw, 0x1F96, 0x003F); hw->phy.ops.write_reg(hw, 0x2010, 0x0008); break; case e1000_82541_rev_2: case e1000_82547_rev_2: hw->phy.ops.write_reg(hw, 0x1F73, 0x0099); break; default: break; } hw->phy.ops.write_reg(hw, 0x0000, 0x3300); msec_delay(20); /* Now enable the transmitter */ hw->phy.ops.write_reg(hw, 0x2F5B, phy_saved_data); if (hw->mac.type == e1000_82547) { u16 fused, fine, coarse; /* Move to analog registers page */ hw->phy.ops.read_reg(hw, IGP01E1000_ANALOG_SPARE_FUSE_STATUS, &fused); if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) { hw->phy.ops.read_reg(hw, IGP01E1000_ANALOG_FUSE_STATUS, &fused); fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK; coarse = fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK; if (coarse > IGP01E1000_ANALOG_FUSE_COARSE_THRESH) { coarse -= IGP01E1000_ANALOG_FUSE_COARSE_10; fine -= IGP01E1000_ANALOG_FUSE_FINE_1; } else if (coarse == IGP01E1000_ANALOG_FUSE_COARSE_THRESH) fine -= IGP01E1000_ANALOG_FUSE_FINE_10; fused = (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) | (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) | (coarse & IGP01E1000_ANALOG_FUSE_COARSE_MASK); hw->phy.ops.write_reg(hw, IGP01E1000_ANALOG_FUSE_CONTROL, fused); hw->phy.ops.write_reg(hw, IGP01E1000_ANALOG_FUSE_BYPASS, IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL); } } out: return ret_val; } /** * e1000_init_script_state_82541 - Enable/Disable PHY init script * @hw: pointer to the HW structure * @state: boolean value used to enable/disable PHY init script * * Allows the driver to enable/disable the PHY init script, if the PHY is an * IGP PHY. **/ void e1000_init_script_state_82541(struct e1000_hw *hw, bool state) { struct e1000_dev_spec_82541 *dev_spec = &hw->dev_spec._82541; DEBUGFUNC("e1000_init_script_state_82541"); if (hw->phy.type != e1000_phy_igp) { DEBUGOUT("Initialization script not necessary.\n"); goto out; } dev_spec->phy_init_script = state; out: return; } /** * e1000_power_down_phy_copper_82541 - Remove link in case of PHY power down * @hw: pointer to the HW structure * * In the case of a PHY power down to save power, or to turn off link during a * driver unload, or wake on lan is not enabled, remove the link. **/ static void e1000_power_down_phy_copper_82541(struct e1000_hw *hw) { /* If the management interface is not enabled, then power down */ if (!(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_SMBUS_EN)) e1000_power_down_phy_copper(hw); return; } /** * e1000_clear_hw_cntrs_82541 - Clear device specific hardware counters * @hw: pointer to the HW structure * * Clears the hardware counters by reading the counter registers. **/ static void e1000_clear_hw_cntrs_82541(struct e1000_hw *hw) { DEBUGFUNC("e1000_clear_hw_cntrs_82541"); e1000_clear_hw_cntrs_base_generic(hw); E1000_READ_REG(hw, E1000_PRC64); E1000_READ_REG(hw, E1000_PRC127); E1000_READ_REG(hw, E1000_PRC255); E1000_READ_REG(hw, E1000_PRC511); E1000_READ_REG(hw, E1000_PRC1023); E1000_READ_REG(hw, E1000_PRC1522); E1000_READ_REG(hw, E1000_PTC64); E1000_READ_REG(hw, E1000_PTC127); E1000_READ_REG(hw, E1000_PTC255); E1000_READ_REG(hw, E1000_PTC511); E1000_READ_REG(hw, E1000_PTC1023); E1000_READ_REG(hw, E1000_PTC1522); E1000_READ_REG(hw, E1000_ALGNERRC); E1000_READ_REG(hw, E1000_RXERRC); E1000_READ_REG(hw, E1000_TNCRS); E1000_READ_REG(hw, E1000_CEXTERR); E1000_READ_REG(hw, E1000_TSCTC); E1000_READ_REG(hw, E1000_TSCTFC); E1000_READ_REG(hw, E1000_MGTPRC); E1000_READ_REG(hw, E1000_MGTPDC); E1000_READ_REG(hw, E1000_MGTPTC); } /** * e1000_read_mac_addr_82541 - Read device MAC address * @hw: pointer to the HW structure * * Reads the device MAC address from the EEPROM and stores the value. **/ static s32 e1000_read_mac_addr_82541(struct e1000_hw *hw) { s32 ret_val = E1000_SUCCESS; u16 offset, nvm_data, i; DEBUGFUNC("e1000_read_mac_addr"); for (i = 0; i < ETH_ADDR_LEN; i += 2) { offset = i >> 1; ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data); if (ret_val) { DEBUGOUT("NVM Read Error\n"); goto out; } hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF); hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8); } for (i = 0; i < ETH_ADDR_LEN; i++) hw->mac.addr[i] = hw->mac.perm_addr[i]; out: return ret_val; } Index: stable/10/sys/dev/e1000/e1000_82542.c =================================================================== --- stable/10/sys/dev/e1000/e1000_82542.c (revision 296054) +++ stable/10/sys/dev/e1000/e1000_82542.c (revision 296055) @@ -1,591 +1,591 @@ /****************************************************************************** Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ /* * 82542 Gigabit Ethernet Controller */ #include "e1000_api.h" static s32 e1000_init_phy_params_82542(struct e1000_hw *hw); static s32 e1000_init_nvm_params_82542(struct e1000_hw *hw); static s32 e1000_init_mac_params_82542(struct e1000_hw *hw); static s32 e1000_get_bus_info_82542(struct e1000_hw *hw); static s32 e1000_reset_hw_82542(struct e1000_hw *hw); static s32 e1000_init_hw_82542(struct e1000_hw *hw); static s32 e1000_setup_link_82542(struct e1000_hw *hw); static s32 e1000_led_on_82542(struct e1000_hw *hw); static s32 e1000_led_off_82542(struct e1000_hw *hw); static int e1000_rar_set_82542(struct e1000_hw *hw, u8 *addr, u32 index); static void e1000_clear_hw_cntrs_82542(struct e1000_hw *hw); static s32 e1000_read_mac_addr_82542(struct e1000_hw *hw); /** * e1000_init_phy_params_82542 - Init PHY func ptrs. * @hw: pointer to the HW structure **/ static s32 e1000_init_phy_params_82542(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val = E1000_SUCCESS; DEBUGFUNC("e1000_init_phy_params_82542"); phy->type = e1000_phy_none; return ret_val; } /** * e1000_init_nvm_params_82542 - Init NVM func ptrs. * @hw: pointer to the HW structure **/ static s32 e1000_init_nvm_params_82542(struct e1000_hw *hw) { struct e1000_nvm_info *nvm = &hw->nvm; DEBUGFUNC("e1000_init_nvm_params_82542"); nvm->address_bits = 6; nvm->delay_usec = 50; nvm->opcode_bits = 3; nvm->type = e1000_nvm_eeprom_microwire; nvm->word_size = 64; /* Function Pointers */ nvm->ops.read = e1000_read_nvm_microwire; nvm->ops.release = e1000_stop_nvm; nvm->ops.write = e1000_write_nvm_microwire; nvm->ops.update = e1000_update_nvm_checksum_generic; nvm->ops.validate = e1000_validate_nvm_checksum_generic; return E1000_SUCCESS; } /** * e1000_init_mac_params_82542 - Init MAC func ptrs. * @hw: pointer to the HW structure **/ static s32 e1000_init_mac_params_82542(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; DEBUGFUNC("e1000_init_mac_params_82542"); /* Set media type */ hw->phy.media_type = e1000_media_type_fiber; /* Set mta register count */ mac->mta_reg_count = 128; /* Set rar entry count */ mac->rar_entry_count = E1000_RAR_ENTRIES; /* Function pointers */ /* bus type/speed/width */ mac->ops.get_bus_info = e1000_get_bus_info_82542; /* function id */ mac->ops.set_lan_id = e1000_set_lan_id_multi_port_pci; /* reset */ mac->ops.reset_hw = e1000_reset_hw_82542; /* hw initialization */ mac->ops.init_hw = e1000_init_hw_82542; /* link setup */ mac->ops.setup_link = e1000_setup_link_82542; /* phy/fiber/serdes setup */ mac->ops.setup_physical_interface = e1000_setup_fiber_serdes_link_generic; /* check for link */ mac->ops.check_for_link = e1000_check_for_fiber_link_generic; /* multicast address update */ mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; /* writing VFTA */ mac->ops.write_vfta = e1000_write_vfta_generic; /* clearing VFTA */ mac->ops.clear_vfta = e1000_clear_vfta_generic; /* read mac address */ mac->ops.read_mac_addr = e1000_read_mac_addr_82542; /* set RAR */ mac->ops.rar_set = e1000_rar_set_82542; /* turn on/off LED */ mac->ops.led_on = e1000_led_on_82542; mac->ops.led_off = e1000_led_off_82542; /* clear hardware counters */ mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82542; /* link info */ mac->ops.get_link_up_info = e1000_get_speed_and_duplex_fiber_serdes_generic; return E1000_SUCCESS; } /** * e1000_init_function_pointers_82542 - Init func ptrs. * @hw: pointer to the HW structure * * Called to initialize all function pointers and parameters. **/ void e1000_init_function_pointers_82542(struct e1000_hw *hw) { DEBUGFUNC("e1000_init_function_pointers_82542"); hw->mac.ops.init_params = e1000_init_mac_params_82542; hw->nvm.ops.init_params = e1000_init_nvm_params_82542; hw->phy.ops.init_params = e1000_init_phy_params_82542; } /** * e1000_get_bus_info_82542 - Obtain bus information for adapter * @hw: pointer to the HW structure * * This will obtain information about the HW bus for which the * adapter is attached and stores it in the hw structure. **/ static s32 e1000_get_bus_info_82542(struct e1000_hw *hw) { DEBUGFUNC("e1000_get_bus_info_82542"); hw->bus.type = e1000_bus_type_pci; hw->bus.speed = e1000_bus_speed_unknown; hw->bus.width = e1000_bus_width_unknown; return E1000_SUCCESS; } /** * e1000_reset_hw_82542 - Reset hardware * @hw: pointer to the HW structure * * This resets the hardware into a known state. **/ static s32 e1000_reset_hw_82542(struct e1000_hw *hw) { struct e1000_bus_info *bus = &hw->bus; s32 ret_val = E1000_SUCCESS; u32 ctrl; DEBUGFUNC("e1000_reset_hw_82542"); if (hw->revision_id == E1000_REVISION_2) { DEBUGOUT("Disabling MWI on 82542 rev 2\n"); e1000_pci_clear_mwi(hw); } DEBUGOUT("Masking off all interrupts\n"); E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); E1000_WRITE_REG(hw, E1000_RCTL, 0); E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); E1000_WRITE_FLUSH(hw); /* * Delay to allow any outstanding PCI transactions to complete before * resetting the device */ msec_delay(10); ctrl = E1000_READ_REG(hw, E1000_CTRL); DEBUGOUT("Issuing a global reset to 82542/82543 MAC\n"); E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); hw->nvm.ops.reload(hw); msec_delay(2); E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); E1000_READ_REG(hw, E1000_ICR); if (hw->revision_id == E1000_REVISION_2) { if (bus->pci_cmd_word & CMD_MEM_WRT_INVALIDATE) e1000_pci_set_mwi(hw); } return ret_val; } /** * e1000_init_hw_82542 - Initialize hardware * @hw: pointer to the HW structure * * This inits the hardware readying it for operation. **/ static s32 e1000_init_hw_82542(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; struct e1000_dev_spec_82542 *dev_spec = &hw->dev_spec._82542; s32 ret_val = E1000_SUCCESS; u32 ctrl; u16 i; DEBUGFUNC("e1000_init_hw_82542"); /* Disabling VLAN filtering */ E1000_WRITE_REG(hw, E1000_VET, 0); mac->ops.clear_vfta(hw); /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */ if (hw->revision_id == E1000_REVISION_2) { DEBUGOUT("Disabling MWI on 82542 rev 2.0\n"); e1000_pci_clear_mwi(hw); E1000_WRITE_REG(hw, E1000_RCTL, E1000_RCTL_RST); E1000_WRITE_FLUSH(hw); msec_delay(5); } /* Setup the receive address. */ e1000_init_rx_addrs_generic(hw, mac->rar_entry_count); /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */ if (hw->revision_id == E1000_REVISION_2) { E1000_WRITE_REG(hw, E1000_RCTL, 0); E1000_WRITE_FLUSH(hw); msec_delay(1); if (hw->bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) e1000_pci_set_mwi(hw); } /* Zero out the Multicast HASH table */ DEBUGOUT("Zeroing the MTA\n"); for (i = 0; i < mac->mta_reg_count; i++) E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); /* * Set the PCI priority bit correctly in the CTRL register. This * determines if the adapter gives priority to receives, or if it * gives equal priority to transmits and receives. */ if (dev_spec->dma_fairness) { ctrl = E1000_READ_REG(hw, E1000_CTRL); E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PRIOR); } /* Setup link and flow control */ ret_val = e1000_setup_link_82542(hw); /* * Clear all of the statistics registers (clear on read). It is * important that we do this after we have tried to establish link * because the symbol error count will increment wildly if there * is no link. */ e1000_clear_hw_cntrs_82542(hw); return ret_val; } /** * e1000_setup_link_82542 - Setup flow control and link settings * @hw: pointer to the HW structure * * Determines which flow control settings to use, then configures flow * control. Calls the appropriate media-specific link configuration * function. Assuming the adapter has a valid link partner, a valid link * should be established. Assumes the hardware has previously been reset * and the transmitter and receiver are not enabled. **/ static s32 e1000_setup_link_82542(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; - s32 ret_val = E1000_SUCCESS; + s32 ret_val; DEBUGFUNC("e1000_setup_link_82542"); ret_val = e1000_set_default_fc_generic(hw); if (ret_val) goto out; hw->fc.requested_mode &= ~e1000_fc_tx_pause; if (mac->report_tx_early) hw->fc.requested_mode &= ~e1000_fc_rx_pause; /* * Save off the requested flow control mode for use later. Depending * on the link partner's capabilities, we may or may not use this mode. */ hw->fc.current_mode = hw->fc.requested_mode; DEBUGOUT1("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); /* Call the necessary subroutine to configure the link. */ ret_val = mac->ops.setup_physical_interface(hw); if (ret_val) goto out; /* * Initialize the flow control address, type, and PAUSE timer * registers to their default values. This is done even if flow * control is disabled, because it does not hurt anything to * initialize these registers. */ DEBUGOUT("Initializing Flow Control address, type and timer regs\n"); E1000_WRITE_REG(hw, E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW); E1000_WRITE_REG(hw, E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH); E1000_WRITE_REG(hw, E1000_FCT, FLOW_CONTROL_TYPE); E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time); ret_val = e1000_set_fc_watermarks_generic(hw); out: return ret_val; } /** * e1000_led_on_82542 - Turn on SW controllable LED * @hw: pointer to the HW structure * * Turns the SW defined LED on. **/ static s32 e1000_led_on_82542(struct e1000_hw *hw) { u32 ctrl = E1000_READ_REG(hw, E1000_CTRL); DEBUGFUNC("e1000_led_on_82542"); ctrl |= E1000_CTRL_SWDPIN0; ctrl |= E1000_CTRL_SWDPIO0; E1000_WRITE_REG(hw, E1000_CTRL, ctrl); return E1000_SUCCESS; } /** * e1000_led_off_82542 - Turn off SW controllable LED * @hw: pointer to the HW structure * * Turns the SW defined LED off. **/ static s32 e1000_led_off_82542(struct e1000_hw *hw) { u32 ctrl = E1000_READ_REG(hw, E1000_CTRL); DEBUGFUNC("e1000_led_off_82542"); ctrl &= ~E1000_CTRL_SWDPIN0; ctrl |= E1000_CTRL_SWDPIO0; E1000_WRITE_REG(hw, E1000_CTRL, ctrl); return E1000_SUCCESS; } /** * e1000_rar_set_82542 - Set receive address register * @hw: pointer to the HW structure * @addr: pointer to the receive address * @index: receive address array register * * Sets the receive address array register at index to the address passed * in by addr. **/ static int e1000_rar_set_82542(struct e1000_hw *hw, u8 *addr, u32 index) { u32 rar_low, rar_high; DEBUGFUNC("e1000_rar_set_82542"); /* * HW expects these in little endian so we reverse the byte order * from network order (big endian) to little endian */ rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); /* If MAC address zero, no need to set the AV bit */ if (rar_low || rar_high) rar_high |= E1000_RAH_AV; E1000_WRITE_REG_ARRAY(hw, E1000_RA, (index << 1), rar_low); E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((index << 1) + 1), rar_high); return E1000_SUCCESS; } /** * e1000_translate_register_82542 - Translate the proper register offset * @reg: e1000 register to be read * * Registers in 82542 are located in different offsets than other adapters * even though they function in the same manner. This function takes in * the name of the register to read and returns the correct offset for * 82542 silicon. **/ u32 e1000_translate_register_82542(u32 reg) { /* * Some of the 82542 registers are located at different * offsets than they are in newer adapters. * Despite the difference in location, the registers * function in the same manner. */ switch (reg) { case E1000_RA: reg = 0x00040; break; case E1000_RDTR: reg = 0x00108; break; case E1000_RDBAL(0): reg = 0x00110; break; case E1000_RDBAH(0): reg = 0x00114; break; case E1000_RDLEN(0): reg = 0x00118; break; case E1000_RDH(0): reg = 0x00120; break; case E1000_RDT(0): reg = 0x00128; break; case E1000_RDBAL(1): reg = 0x00138; break; case E1000_RDBAH(1): reg = 0x0013C; break; case E1000_RDLEN(1): reg = 0x00140; break; case E1000_RDH(1): reg = 0x00148; break; case E1000_RDT(1): reg = 0x00150; break; case E1000_FCRTH: reg = 0x00160; break; case E1000_FCRTL: reg = 0x00168; break; case E1000_MTA: reg = 0x00200; break; case E1000_TDBAL(0): reg = 0x00420; break; case E1000_TDBAH(0): reg = 0x00424; break; case E1000_TDLEN(0): reg = 0x00428; break; case E1000_TDH(0): reg = 0x00430; break; case E1000_TDT(0): reg = 0x00438; break; case E1000_TIDV: reg = 0x00440; break; case E1000_VFTA: reg = 0x00600; break; case E1000_TDFH: reg = 0x08010; break; case E1000_TDFT: reg = 0x08018; break; default: break; } return reg; } /** * e1000_clear_hw_cntrs_82542 - Clear device specific hardware counters * @hw: pointer to the HW structure * * Clears the hardware counters by reading the counter registers. **/ static void e1000_clear_hw_cntrs_82542(struct e1000_hw *hw) { DEBUGFUNC("e1000_clear_hw_cntrs_82542"); e1000_clear_hw_cntrs_base_generic(hw); E1000_READ_REG(hw, E1000_PRC64); E1000_READ_REG(hw, E1000_PRC127); E1000_READ_REG(hw, E1000_PRC255); E1000_READ_REG(hw, E1000_PRC511); E1000_READ_REG(hw, E1000_PRC1023); E1000_READ_REG(hw, E1000_PRC1522); E1000_READ_REG(hw, E1000_PTC64); E1000_READ_REG(hw, E1000_PTC127); E1000_READ_REG(hw, E1000_PTC255); E1000_READ_REG(hw, E1000_PTC511); E1000_READ_REG(hw, E1000_PTC1023); E1000_READ_REG(hw, E1000_PTC1522); } /** * e1000_read_mac_addr_82542 - Read device MAC address * @hw: pointer to the HW structure * * Reads the device MAC address from the EEPROM and stores the value. **/ -static s32 e1000_read_mac_addr_82542(struct e1000_hw *hw) +s32 e1000_read_mac_addr_82542(struct e1000_hw *hw) { s32 ret_val = E1000_SUCCESS; u16 offset, nvm_data, i; DEBUGFUNC("e1000_read_mac_addr"); for (i = 0; i < ETH_ADDR_LEN; i += 2) { offset = i >> 1; ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data); if (ret_val) { DEBUGOUT("NVM Read Error\n"); goto out; } hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF); hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8); } for (i = 0; i < ETH_ADDR_LEN; i++) hw->mac.addr[i] = hw->mac.perm_addr[i]; out: return ret_val; } Index: stable/10/sys/dev/e1000/e1000_82543.c =================================================================== --- stable/10/sys/dev/e1000/e1000_82543.c (revision 296054) +++ stable/10/sys/dev/e1000/e1000_82543.c (revision 296055) @@ -1,1596 +1,1596 @@ /****************************************************************************** Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ /* * 82543GC Gigabit Ethernet Controller (Fiber) * 82543GC Gigabit Ethernet Controller (Copper) * 82544EI Gigabit Ethernet Controller (Copper) * 82544EI Gigabit Ethernet Controller (Fiber) * 82544GC Gigabit Ethernet Controller (Copper) * 82544GC Gigabit Ethernet Controller (LOM) */ #include "e1000_api.h" static s32 e1000_init_phy_params_82543(struct e1000_hw *hw); static s32 e1000_init_nvm_params_82543(struct e1000_hw *hw); static s32 e1000_init_mac_params_82543(struct e1000_hw *hw); static s32 e1000_read_phy_reg_82543(struct e1000_hw *hw, u32 offset, u16 *data); static s32 e1000_write_phy_reg_82543(struct e1000_hw *hw, u32 offset, u16 data); static s32 e1000_phy_force_speed_duplex_82543(struct e1000_hw *hw); static s32 e1000_phy_hw_reset_82543(struct e1000_hw *hw); static s32 e1000_reset_hw_82543(struct e1000_hw *hw); static s32 e1000_init_hw_82543(struct e1000_hw *hw); static s32 e1000_setup_link_82543(struct e1000_hw *hw); static s32 e1000_setup_copper_link_82543(struct e1000_hw *hw); static s32 e1000_setup_fiber_link_82543(struct e1000_hw *hw); static s32 e1000_check_for_copper_link_82543(struct e1000_hw *hw); static s32 e1000_check_for_fiber_link_82543(struct e1000_hw *hw); static s32 e1000_led_on_82543(struct e1000_hw *hw); static s32 e1000_led_off_82543(struct e1000_hw *hw); static void e1000_write_vfta_82543(struct e1000_hw *hw, u32 offset, u32 value); static void e1000_clear_hw_cntrs_82543(struct e1000_hw *hw); static s32 e1000_config_mac_to_phy_82543(struct e1000_hw *hw); static bool e1000_init_phy_disabled_82543(struct e1000_hw *hw); static void e1000_lower_mdi_clk_82543(struct e1000_hw *hw, u32 *ctrl); static s32 e1000_polarity_reversal_workaround_82543(struct e1000_hw *hw); static void e1000_raise_mdi_clk_82543(struct e1000_hw *hw, u32 *ctrl); static u16 e1000_shift_in_mdi_bits_82543(struct e1000_hw *hw); static void e1000_shift_out_mdi_bits_82543(struct e1000_hw *hw, u32 data, u16 count); static bool e1000_tbi_compatibility_enabled_82543(struct e1000_hw *hw); static void e1000_set_tbi_sbp_82543(struct e1000_hw *hw, bool state); static s32 e1000_read_mac_addr_82543(struct e1000_hw *hw); /** * e1000_init_phy_params_82543 - Init PHY func ptrs. * @hw: pointer to the HW structure **/ static s32 e1000_init_phy_params_82543(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val = E1000_SUCCESS; DEBUGFUNC("e1000_init_phy_params_82543"); if (hw->phy.media_type != e1000_media_type_copper) { phy->type = e1000_phy_none; goto out; } else { phy->ops.power_up = e1000_power_up_phy_copper; phy->ops.power_down = e1000_power_down_phy_copper; } phy->addr = 1; phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; phy->reset_delay_us = 10000; phy->type = e1000_phy_m88; /* Function Pointers */ phy->ops.check_polarity = e1000_check_polarity_m88; phy->ops.commit = e1000_phy_sw_reset_generic; phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_82543; phy->ops.get_cable_length = e1000_get_cable_length_m88; phy->ops.get_cfg_done = e1000_get_cfg_done_generic; phy->ops.read_reg = (hw->mac.type == e1000_82543) ? e1000_read_phy_reg_82543 : e1000_read_phy_reg_m88; phy->ops.reset = (hw->mac.type == e1000_82543) ? e1000_phy_hw_reset_82543 : e1000_phy_hw_reset_generic; phy->ops.write_reg = (hw->mac.type == e1000_82543) ? e1000_write_phy_reg_82543 : e1000_write_phy_reg_m88; phy->ops.get_info = e1000_get_phy_info_m88; /* * The external PHY of the 82543 can be in a funky state. * Resetting helps us read the PHY registers for acquiring * the PHY ID. */ if (!e1000_init_phy_disabled_82543(hw)) { ret_val = phy->ops.reset(hw); if (ret_val) { DEBUGOUT("Resetting PHY during init failed.\n"); goto out; } msec_delay(20); } ret_val = e1000_get_phy_id(hw); if (ret_val) goto out; /* Verify phy id */ switch (hw->mac.type) { case e1000_82543: if (phy->id != M88E1000_E_PHY_ID) { ret_val = -E1000_ERR_PHY; goto out; } break; case e1000_82544: if (phy->id != M88E1000_I_PHY_ID) { ret_val = -E1000_ERR_PHY; goto out; } break; default: ret_val = -E1000_ERR_PHY; goto out; break; } out: return ret_val; } /** * e1000_init_nvm_params_82543 - Init NVM func ptrs. * @hw: pointer to the HW structure **/ static s32 e1000_init_nvm_params_82543(struct e1000_hw *hw) { struct e1000_nvm_info *nvm = &hw->nvm; DEBUGFUNC("e1000_init_nvm_params_82543"); nvm->type = e1000_nvm_eeprom_microwire; nvm->word_size = 64; nvm->delay_usec = 50; nvm->address_bits = 6; nvm->opcode_bits = 3; /* Function Pointers */ nvm->ops.read = e1000_read_nvm_microwire; nvm->ops.update = e1000_update_nvm_checksum_generic; nvm->ops.valid_led_default = e1000_valid_led_default_generic; nvm->ops.validate = e1000_validate_nvm_checksum_generic; nvm->ops.write = e1000_write_nvm_microwire; return E1000_SUCCESS; } /** * e1000_init_mac_params_82543 - Init MAC func ptrs. * @hw: pointer to the HW structure **/ static s32 e1000_init_mac_params_82543(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; DEBUGFUNC("e1000_init_mac_params_82543"); /* Set media type */ switch (hw->device_id) { case E1000_DEV_ID_82543GC_FIBER: case E1000_DEV_ID_82544EI_FIBER: hw->phy.media_type = e1000_media_type_fiber; break; default: hw->phy.media_type = e1000_media_type_copper; break; } /* Set mta register count */ mac->mta_reg_count = 128; /* Set rar entry count */ mac->rar_entry_count = E1000_RAR_ENTRIES; /* Function pointers */ /* bus type/speed/width */ mac->ops.get_bus_info = e1000_get_bus_info_pci_generic; /* function id */ mac->ops.set_lan_id = e1000_set_lan_id_multi_port_pci; /* reset */ mac->ops.reset_hw = e1000_reset_hw_82543; /* hw initialization */ mac->ops.init_hw = e1000_init_hw_82543; /* link setup */ mac->ops.setup_link = e1000_setup_link_82543; /* physical interface setup */ mac->ops.setup_physical_interface = (hw->phy.media_type == e1000_media_type_copper) ? e1000_setup_copper_link_82543 : e1000_setup_fiber_link_82543; /* check for link */ mac->ops.check_for_link = (hw->phy.media_type == e1000_media_type_copper) ? e1000_check_for_copper_link_82543 : e1000_check_for_fiber_link_82543; /* link info */ mac->ops.get_link_up_info = (hw->phy.media_type == e1000_media_type_copper) ? e1000_get_speed_and_duplex_copper_generic : e1000_get_speed_and_duplex_fiber_serdes_generic; /* multicast address update */ mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; /* writing VFTA */ mac->ops.write_vfta = e1000_write_vfta_82543; /* clearing VFTA */ mac->ops.clear_vfta = e1000_clear_vfta_generic; /* read mac address */ mac->ops.read_mac_addr = e1000_read_mac_addr_82543; /* turn on/off LED */ mac->ops.led_on = e1000_led_on_82543; mac->ops.led_off = e1000_led_off_82543; /* clear hardware counters */ mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82543; /* Set tbi compatibility */ if ((hw->mac.type != e1000_82543) || (hw->phy.media_type == e1000_media_type_fiber)) e1000_set_tbi_compatibility_82543(hw, FALSE); return E1000_SUCCESS; } /** * e1000_init_function_pointers_82543 - Init func ptrs. * @hw: pointer to the HW structure * * Called to initialize all function pointers and parameters. **/ void e1000_init_function_pointers_82543(struct e1000_hw *hw) { DEBUGFUNC("e1000_init_function_pointers_82543"); hw->mac.ops.init_params = e1000_init_mac_params_82543; hw->nvm.ops.init_params = e1000_init_nvm_params_82543; hw->phy.ops.init_params = e1000_init_phy_params_82543; } /** * e1000_tbi_compatibility_enabled_82543 - Returns TBI compat status * @hw: pointer to the HW structure * * Returns the current status of 10-bit Interface (TBI) compatibility * (enabled/disabled). **/ static bool e1000_tbi_compatibility_enabled_82543(struct e1000_hw *hw) { struct e1000_dev_spec_82543 *dev_spec = &hw->dev_spec._82543; bool state = FALSE; DEBUGFUNC("e1000_tbi_compatibility_enabled_82543"); if (hw->mac.type != e1000_82543) { DEBUGOUT("TBI compatibility workaround for 82543 only.\n"); goto out; } state = !!(dev_spec->tbi_compatibility & TBI_COMPAT_ENABLED); out: return state; } /** * e1000_set_tbi_compatibility_82543 - Set TBI compatibility * @hw: pointer to the HW structure * @state: enable/disable TBI compatibility * * Enables or disabled 10-bit Interface (TBI) compatibility. **/ void e1000_set_tbi_compatibility_82543(struct e1000_hw *hw, bool state) { struct e1000_dev_spec_82543 *dev_spec = &hw->dev_spec._82543; DEBUGFUNC("e1000_set_tbi_compatibility_82543"); if (hw->mac.type != e1000_82543) { DEBUGOUT("TBI compatibility workaround for 82543 only.\n"); goto out; } if (state) dev_spec->tbi_compatibility |= TBI_COMPAT_ENABLED; else dev_spec->tbi_compatibility &= ~TBI_COMPAT_ENABLED; out: return; } /** * e1000_tbi_sbp_enabled_82543 - Returns TBI SBP status * @hw: pointer to the HW structure * * Returns the current status of 10-bit Interface (TBI) store bad packet (SBP) * (enabled/disabled). **/ bool e1000_tbi_sbp_enabled_82543(struct e1000_hw *hw) { struct e1000_dev_spec_82543 *dev_spec = &hw->dev_spec._82543; bool state = FALSE; DEBUGFUNC("e1000_tbi_sbp_enabled_82543"); if (hw->mac.type != e1000_82543) { DEBUGOUT("TBI compatibility workaround for 82543 only.\n"); goto out; } state = !!(dev_spec->tbi_compatibility & TBI_SBP_ENABLED); out: return state; } /** * e1000_set_tbi_sbp_82543 - Set TBI SBP * @hw: pointer to the HW structure * @state: enable/disable TBI store bad packet * * Enables or disabled 10-bit Interface (TBI) store bad packet (SBP). **/ static void e1000_set_tbi_sbp_82543(struct e1000_hw *hw, bool state) { struct e1000_dev_spec_82543 *dev_spec = &hw->dev_spec._82543; DEBUGFUNC("e1000_set_tbi_sbp_82543"); if (state && e1000_tbi_compatibility_enabled_82543(hw)) dev_spec->tbi_compatibility |= TBI_SBP_ENABLED; else dev_spec->tbi_compatibility &= ~TBI_SBP_ENABLED; return; } /** * e1000_init_phy_disabled_82543 - Returns init PHY status * @hw: pointer to the HW structure * * Returns the current status of whether PHY initialization is disabled. * True if PHY initialization is disabled else FALSE. **/ static bool e1000_init_phy_disabled_82543(struct e1000_hw *hw) { struct e1000_dev_spec_82543 *dev_spec = &hw->dev_spec._82543; bool ret_val; DEBUGFUNC("e1000_init_phy_disabled_82543"); if (hw->mac.type != e1000_82543) { ret_val = FALSE; goto out; } ret_val = dev_spec->init_phy_disabled; out: return ret_val; } /** * e1000_tbi_adjust_stats_82543 - Adjust stats when TBI enabled * @hw: pointer to the HW structure * @stats: Struct containing statistic register values * @frame_len: The length of the frame in question * @mac_addr: The Ethernet destination address of the frame in question * @max_frame_size: The maximum frame size * * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT **/ void e1000_tbi_adjust_stats_82543(struct e1000_hw *hw, struct e1000_hw_stats *stats, u32 frame_len, u8 *mac_addr, u32 max_frame_size) { if (!(e1000_tbi_sbp_enabled_82543(hw))) goto out; /* First adjust the frame length. */ frame_len--; /* * We need to adjust the statistics counters, since the hardware * counters overcount this packet as a CRC error and undercount * the packet as a good packet */ /* This packet should not be counted as a CRC error. */ stats->crcerrs--; /* This packet does count as a Good Packet Received. */ stats->gprc++; /* Adjust the Good Octets received counters */ stats->gorc += frame_len; /* * Is this a broadcast or multicast? Check broadcast first, * since the test for a multicast frame will test positive on * a broadcast frame. */ if ((mac_addr[0] == 0xff) && (mac_addr[1] == 0xff)) /* Broadcast packet */ stats->bprc++; else if (*mac_addr & 0x01) /* Multicast packet */ stats->mprc++; /* * In this case, the hardware has over counted the number of * oversize frames. */ if ((frame_len == max_frame_size) && (stats->roc > 0)) stats->roc--; /* * Adjust the bin counters when the extra byte put the frame in the * wrong bin. Remember that the frame_len was adjusted above. */ if (frame_len == 64) { stats->prc64++; stats->prc127--; } else if (frame_len == 127) { stats->prc127++; stats->prc255--; } else if (frame_len == 255) { stats->prc255++; stats->prc511--; } else if (frame_len == 511) { stats->prc511++; stats->prc1023--; } else if (frame_len == 1023) { stats->prc1023++; stats->prc1522--; } else if (frame_len == 1522) { stats->prc1522++; } out: return; } /** * e1000_read_phy_reg_82543 - Read PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Reads the PHY at offset and stores the information read to data. **/ static s32 e1000_read_phy_reg_82543(struct e1000_hw *hw, u32 offset, u16 *data) { u32 mdic; s32 ret_val = E1000_SUCCESS; DEBUGFUNC("e1000_read_phy_reg_82543"); if (offset > MAX_PHY_REG_ADDRESS) { DEBUGOUT1("PHY Address %d is out of range\n", offset); ret_val = -E1000_ERR_PARAM; goto out; } /* * We must first send a preamble through the MDIO pin to signal the * beginning of an MII instruction. This is done by sending 32 * consecutive "1" bits. */ e1000_shift_out_mdi_bits_82543(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); /* * Now combine the next few fields that are required for a read * operation. We use this method instead of calling the * e1000_shift_out_mdi_bits routine five different times. The format * of an MII read instruction consists of a shift out of 14 bits and * is defined as follows: * * followed by a shift in of 18 bits. This first two bits shifted in * are TurnAround bits used to avoid contention on the MDIO pin when a * READ operation is performed. These two bits are thrown away * followed by a shift in of 16 bits which contains the desired data. */ mdic = (offset | (hw->phy.addr << 5) | (PHY_OP_READ << 10) | (PHY_SOF << 12)); e1000_shift_out_mdi_bits_82543(hw, mdic, 14); /* * Now that we've shifted out the read command to the MII, we need to * "shift in" the 16-bit value (18 total bits) of the requested PHY * register address. */ *data = e1000_shift_in_mdi_bits_82543(hw); out: return ret_val; } /** * e1000_write_phy_reg_82543 - Write PHY register * @hw: pointer to the HW structure * @offset: register offset to be written * @data: pointer to the data to be written at offset * * Writes data to the PHY at offset. **/ static s32 e1000_write_phy_reg_82543(struct e1000_hw *hw, u32 offset, u16 data) { u32 mdic; s32 ret_val = E1000_SUCCESS; DEBUGFUNC("e1000_write_phy_reg_82543"); if (offset > MAX_PHY_REG_ADDRESS) { DEBUGOUT1("PHY Address %d is out of range\n", offset); ret_val = -E1000_ERR_PARAM; goto out; } /* * We'll need to use the SW defined pins to shift the write command * out to the PHY. We first send a preamble to the PHY to signal the * beginning of the MII instruction. This is done by sending 32 * consecutive "1" bits. */ e1000_shift_out_mdi_bits_82543(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); /* * Now combine the remaining required fields that will indicate a * write operation. We use this method instead of calling the * e1000_shift_out_mdi_bits routine for each field in the command. The * format of a MII write instruction is as follows: * . */ mdic = ((PHY_TURNAROUND) | (offset << 2) | (hw->phy.addr << 7) | (PHY_OP_WRITE << 12) | (PHY_SOF << 14)); mdic <<= 16; mdic |= (u32)data; e1000_shift_out_mdi_bits_82543(hw, mdic, 32); out: return ret_val; } /** * e1000_raise_mdi_clk_82543 - Raise Management Data Input clock * @hw: pointer to the HW structure * @ctrl: pointer to the control register * * Raise the management data input clock by setting the MDC bit in the control * register. **/ static void e1000_raise_mdi_clk_82543(struct e1000_hw *hw, u32 *ctrl) { /* * Raise the clock input to the Management Data Clock (by setting the * MDC bit), and then delay a sufficient amount of time. */ E1000_WRITE_REG(hw, E1000_CTRL, (*ctrl | E1000_CTRL_MDC)); E1000_WRITE_FLUSH(hw); usec_delay(10); } /** * e1000_lower_mdi_clk_82543 - Lower Management Data Input clock * @hw: pointer to the HW structure * @ctrl: pointer to the control register * * Lower the management data input clock by clearing the MDC bit in the * control register. **/ static void e1000_lower_mdi_clk_82543(struct e1000_hw *hw, u32 *ctrl) { /* * Lower the clock input to the Management Data Clock (by clearing the * MDC bit), and then delay a sufficient amount of time. */ E1000_WRITE_REG(hw, E1000_CTRL, (*ctrl & ~E1000_CTRL_MDC)); E1000_WRITE_FLUSH(hw); usec_delay(10); } /** * e1000_shift_out_mdi_bits_82543 - Shift data bits our to the PHY * @hw: pointer to the HW structure * @data: data to send to the PHY * @count: number of bits to shift out * * We need to shift 'count' bits out to the PHY. So, the value in the * "data" parameter will be shifted out to the PHY one bit at a time. * In order to do this, "data" must be broken down into bits. **/ static void e1000_shift_out_mdi_bits_82543(struct e1000_hw *hw, u32 data, u16 count) { u32 ctrl, mask; /* * We need to shift "count" number of bits out to the PHY. So, the * value in the "data" parameter will be shifted out to the PHY one * bit at a time. In order to do this, "data" must be broken down * into bits. */ mask = 0x01; mask <<= (count - 1); ctrl = E1000_READ_REG(hw, E1000_CTRL); /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */ ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR); while (mask) { /* * A "1" is shifted out to the PHY by setting the MDIO bit to * "1" and then raising and lowering the Management Data Clock. * A "0" is shifted out to the PHY by setting the MDIO bit to * "0" and then raising and lowering the clock. */ if (data & mask) ctrl |= E1000_CTRL_MDIO; else ctrl &= ~E1000_CTRL_MDIO; E1000_WRITE_REG(hw, E1000_CTRL, ctrl); E1000_WRITE_FLUSH(hw); usec_delay(10); e1000_raise_mdi_clk_82543(hw, &ctrl); e1000_lower_mdi_clk_82543(hw, &ctrl); mask >>= 1; } } /** * e1000_shift_in_mdi_bits_82543 - Shift data bits in from the PHY * @hw: pointer to the HW structure * * In order to read a register from the PHY, we need to shift 18 bits * in from the PHY. Bits are "shifted in" by raising the clock input to * the PHY (setting the MDC bit), and then reading the value of the data out * MDIO bit. **/ static u16 e1000_shift_in_mdi_bits_82543(struct e1000_hw *hw) { u32 ctrl; u16 data = 0; u8 i; /* * In order to read a register from the PHY, we need to shift in a * total of 18 bits from the PHY. The first two bit (turnaround) * times are used to avoid contention on the MDIO pin when a read * operation is performed. These two bits are ignored by us and * thrown away. Bits are "shifted in" by raising the input to the * Management Data Clock (setting the MDC bit) and then reading the * value of the MDIO bit. */ ctrl = E1000_READ_REG(hw, E1000_CTRL); /* * Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as * input. */ ctrl &= ~E1000_CTRL_MDIO_DIR; ctrl &= ~E1000_CTRL_MDIO; E1000_WRITE_REG(hw, E1000_CTRL, ctrl); E1000_WRITE_FLUSH(hw); /* * Raise and lower the clock before reading in the data. This accounts * for the turnaround bits. The first clock occurred when we clocked * out the last bit of the Register Address. */ e1000_raise_mdi_clk_82543(hw, &ctrl); e1000_lower_mdi_clk_82543(hw, &ctrl); for (data = 0, i = 0; i < 16; i++) { data <<= 1; e1000_raise_mdi_clk_82543(hw, &ctrl); ctrl = E1000_READ_REG(hw, E1000_CTRL); /* Check to see if we shifted in a "1". */ if (ctrl & E1000_CTRL_MDIO) data |= 1; e1000_lower_mdi_clk_82543(hw, &ctrl); } e1000_raise_mdi_clk_82543(hw, &ctrl); e1000_lower_mdi_clk_82543(hw, &ctrl); return data; } /** * e1000_phy_force_speed_duplex_82543 - Force speed/duplex for PHY * @hw: pointer to the HW structure * * Calls the function to force speed and duplex for the m88 PHY, and * if the PHY is not auto-negotiating and the speed is forced to 10Mbit, * then call the function for polarity reversal workaround. **/ static s32 e1000_phy_force_speed_duplex_82543(struct e1000_hw *hw) { s32 ret_val; DEBUGFUNC("e1000_phy_force_speed_duplex_82543"); ret_val = e1000_phy_force_speed_duplex_m88(hw); if (ret_val) goto out; if (!hw->mac.autoneg && (hw->mac.forced_speed_duplex & E1000_ALL_10_SPEED)) ret_val = e1000_polarity_reversal_workaround_82543(hw); out: return ret_val; } /** * e1000_polarity_reversal_workaround_82543 - Workaround polarity reversal * @hw: pointer to the HW structure * * When forcing link to 10 Full or 10 Half, the PHY can reverse the polarity * inadvertently. To workaround the issue, we disable the transmitter on * the PHY until we have established the link partner's link parameters. **/ static s32 e1000_polarity_reversal_workaround_82543(struct e1000_hw *hw) { s32 ret_val = E1000_SUCCESS; u16 mii_status_reg; u16 i; bool link; if (!(hw->phy.ops.write_reg)) goto out; /* Polarity reversal workaround for forced 10F/10H links. */ /* Disable the transmitter on the PHY */ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); if (ret_val) goto out; ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF); if (ret_val) goto out; ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); if (ret_val) goto out; /* * This loop will early-out if the NO link condition has been met. * In other words, DO NOT use e1000_phy_has_link_generic() here. */ for (i = PHY_FORCE_TIME; i > 0; i--) { /* * Read the MII Status Register and wait for Link Status bit * to be clear. */ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg); if (ret_val) goto out; ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg); if (ret_val) goto out; if (!(mii_status_reg & ~MII_SR_LINK_STATUS)) break; msec_delay_irq(100); } /* Recommended delay time after link has been lost */ msec_delay_irq(1000); /* Now we will re-enable the transmitter on the PHY */ ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); if (ret_val) goto out; msec_delay_irq(50); ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0); if (ret_val) goto out; msec_delay_irq(50); ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00); if (ret_val) goto out; msec_delay_irq(50); ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000); if (ret_val) goto out; ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); if (ret_val) goto out; /* * Read the MII Status Register and wait for Link Status bit * to be set. */ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_TIME, 100000, &link); if (ret_val) goto out; out: return ret_val; } /** * e1000_phy_hw_reset_82543 - PHY hardware reset * @hw: pointer to the HW structure * * Sets the PHY_RESET_DIR bit in the extended device control register * to put the PHY into a reset and waits for completion. Once the reset * has been accomplished, clear the PHY_RESET_DIR bit to take the PHY out * of reset. **/ static s32 e1000_phy_hw_reset_82543(struct e1000_hw *hw) { u32 ctrl_ext; s32 ret_val; DEBUGFUNC("e1000_phy_hw_reset_82543"); /* * Read the Extended Device Control Register, assert the PHY_RESET_DIR * bit to put the PHY into reset... */ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR; ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA; E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); E1000_WRITE_FLUSH(hw); msec_delay(10); /* ...then take it out of reset. */ ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA; E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); E1000_WRITE_FLUSH(hw); usec_delay(150); if (!(hw->phy.ops.get_cfg_done)) return E1000_SUCCESS; ret_val = hw->phy.ops.get_cfg_done(hw); return ret_val; } /** * e1000_reset_hw_82543 - Reset hardware * @hw: pointer to the HW structure * * This resets the hardware into a known state. **/ static s32 e1000_reset_hw_82543(struct e1000_hw *hw) { - u32 ctrl, icr; + u32 ctrl; s32 ret_val = E1000_SUCCESS; DEBUGFUNC("e1000_reset_hw_82543"); DEBUGOUT("Masking off all interrupts\n"); E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); E1000_WRITE_REG(hw, E1000_RCTL, 0); E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); E1000_WRITE_FLUSH(hw); e1000_set_tbi_sbp_82543(hw, FALSE); /* * Delay to allow any outstanding PCI transactions to complete before * resetting the device */ msec_delay(10); ctrl = E1000_READ_REG(hw, E1000_CTRL); DEBUGOUT("Issuing a global reset to 82543/82544 MAC\n"); if (hw->mac.type == e1000_82543) { E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); } else { /* * The 82544 can't ACK the 64-bit write when issuing the * reset, so use IO-mapping as a workaround. */ E1000_WRITE_REG_IO(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); } /* * After MAC reset, force reload of NVM to restore power-on * settings to device. */ hw->nvm.ops.reload(hw); msec_delay(2); /* Masking off and clearing any pending interrupts */ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); - icr = E1000_READ_REG(hw, E1000_ICR); + E1000_READ_REG(hw, E1000_ICR); return ret_val; } /** * e1000_init_hw_82543 - Initialize hardware * @hw: pointer to the HW structure * * This inits the hardware readying it for operation. **/ static s32 e1000_init_hw_82543(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; struct e1000_dev_spec_82543 *dev_spec = &hw->dev_spec._82543; u32 ctrl; s32 ret_val; u16 i; DEBUGFUNC("e1000_init_hw_82543"); /* Disabling VLAN filtering */ E1000_WRITE_REG(hw, E1000_VET, 0); mac->ops.clear_vfta(hw); /* Setup the receive address. */ e1000_init_rx_addrs_generic(hw, mac->rar_entry_count); /* Zero out the Multicast HASH table */ DEBUGOUT("Zeroing the MTA\n"); for (i = 0; i < mac->mta_reg_count; i++) { E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); E1000_WRITE_FLUSH(hw); } /* * Set the PCI priority bit correctly in the CTRL register. This * determines if the adapter gives priority to receives, or if it * gives equal priority to transmits and receives. */ if (hw->mac.type == e1000_82543 && dev_spec->dma_fairness) { ctrl = E1000_READ_REG(hw, E1000_CTRL); E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PRIOR); } e1000_pcix_mmrbc_workaround_generic(hw); /* Setup link and flow control */ ret_val = mac->ops.setup_link(hw); /* * Clear all of the statistics registers (clear on read). It is * important that we do this after we have tried to establish link * because the symbol error count will increment wildly if there * is no link. */ e1000_clear_hw_cntrs_82543(hw); return ret_val; } /** * e1000_setup_link_82543 - Setup flow control and link settings * @hw: pointer to the HW structure * * Read the EEPROM to determine the initial polarity value and write the * extended device control register with the information before calling * the generic setup link function, which does the following: * Determines which flow control settings to use, then configures flow * control. Calls the appropriate media-specific link configuration * function. Assuming the adapter has a valid link partner, a valid link * should be established. Assumes the hardware has previously been reset * and the transmitter and receiver are not enabled. **/ static s32 e1000_setup_link_82543(struct e1000_hw *hw) { u32 ctrl_ext; s32 ret_val; u16 data; DEBUGFUNC("e1000_setup_link_82543"); /* * Take the 4 bits from NVM word 0xF that determine the initial * polarity value for the SW controlled pins, and setup the * Extended Device Control reg with that info. * This is needed because one of the SW controlled pins is used for * signal detection. So this should be done before phy setup. */ if (hw->mac.type == e1000_82543) { ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &data); if (ret_val) { DEBUGOUT("NVM Read Error\n"); ret_val = -E1000_ERR_NVM; goto out; } ctrl_ext = ((data & NVM_WORD0F_SWPDIO_EXT_MASK) << NVM_SWDPIO_EXT_SHIFT); E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); } ret_val = e1000_setup_link_generic(hw); out: return ret_val; } /** * e1000_setup_copper_link_82543 - Configure copper link settings * @hw: pointer to the HW structure * * Configures the link for auto-neg or forced speed and duplex. Then we check * for link, once link is established calls to configure collision distance * and flow control are called. **/ static s32 e1000_setup_copper_link_82543(struct e1000_hw *hw) { u32 ctrl; s32 ret_val; bool link; DEBUGFUNC("e1000_setup_copper_link_82543"); ctrl = E1000_READ_REG(hw, E1000_CTRL) | E1000_CTRL_SLU; /* * With 82543, we need to force speed and duplex on the MAC * equal to what the PHY speed and duplex configuration is. * In addition, we need to perform a hardware reset on the * PHY to take it out of reset. */ if (hw->mac.type == e1000_82543) { ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); E1000_WRITE_REG(hw, E1000_CTRL, ctrl); ret_val = hw->phy.ops.reset(hw); if (ret_val) goto out; } else { ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); E1000_WRITE_REG(hw, E1000_CTRL, ctrl); } /* Set MDI/MDI-X, Polarity Reversal, and downshift settings */ ret_val = e1000_copper_link_setup_m88(hw); if (ret_val) goto out; if (hw->mac.autoneg) { /* * Setup autoneg and flow control advertisement and perform * autonegotiation. */ ret_val = e1000_copper_link_autoneg(hw); if (ret_val) goto out; } else { /* * PHY will be set to 10H, 10F, 100H or 100F * depending on user settings. */ DEBUGOUT("Forcing Speed and Duplex\n"); ret_val = e1000_phy_force_speed_duplex_82543(hw); if (ret_val) { DEBUGOUT("Error Forcing Speed and Duplex\n"); goto out; } } /* * Check link status. Wait up to 100 microseconds for link to become * valid. */ ret_val = e1000_phy_has_link_generic(hw, COPPER_LINK_UP_LIMIT, 10, &link); if (ret_val) goto out; if (link) { DEBUGOUT("Valid link established!!!\n"); /* Config the MAC and PHY after link is up */ if (hw->mac.type == e1000_82544) { hw->mac.ops.config_collision_dist(hw); } else { ret_val = e1000_config_mac_to_phy_82543(hw); if (ret_val) goto out; } ret_val = e1000_config_fc_after_link_up_generic(hw); } else { DEBUGOUT("Unable to establish link!!!\n"); } out: return ret_val; } /** * e1000_setup_fiber_link_82543 - Setup link for fiber * @hw: pointer to the HW structure * * Configures collision distance and flow control for fiber links. Upon * successful setup, poll for link. **/ static s32 e1000_setup_fiber_link_82543(struct e1000_hw *hw) { u32 ctrl; s32 ret_val; DEBUGFUNC("e1000_setup_fiber_link_82543"); ctrl = E1000_READ_REG(hw, E1000_CTRL); /* Take the link out of reset */ ctrl &= ~E1000_CTRL_LRST; hw->mac.ops.config_collision_dist(hw); ret_val = e1000_commit_fc_settings_generic(hw); if (ret_val) goto out; DEBUGOUT("Auto-negotiation enabled\n"); E1000_WRITE_REG(hw, E1000_CTRL, ctrl); E1000_WRITE_FLUSH(hw); msec_delay(1); /* * For these adapters, the SW definable pin 1 is cleared when the * optics detect a signal. If we have a signal, then poll for a * "Link-Up" indication. */ if (!(E1000_READ_REG(hw, E1000_CTRL) & E1000_CTRL_SWDPIN1)) ret_val = e1000_poll_fiber_serdes_link_generic(hw); else DEBUGOUT("No signal detected\n"); out: return ret_val; } /** * e1000_check_for_copper_link_82543 - Check for link (Copper) * @hw: pointer to the HW structure * * Checks the phy for link, if link exists, do the following: * - check for downshift * - do polarity workaround (if necessary) * - configure collision distance * - configure flow control after link up * - configure tbi compatibility **/ static s32 e1000_check_for_copper_link_82543(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; u32 icr, rctl; s32 ret_val; u16 speed, duplex; bool link; DEBUGFUNC("e1000_check_for_copper_link_82543"); if (!mac->get_link_status) { ret_val = E1000_SUCCESS; goto out; } ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); if (ret_val) goto out; if (!link) goto out; /* No link detected */ mac->get_link_status = FALSE; e1000_check_downshift_generic(hw); /* * If we are forcing speed/duplex, then we can return since * we have already determined whether we have link or not. */ if (!mac->autoneg) { /* * If speed and duplex are forced to 10H or 10F, then we will * implement the polarity reversal workaround. We disable * interrupts first, and upon returning, place the devices * interrupt state to its previous value except for the link * status change interrupt which will happened due to the * execution of this workaround. */ if (mac->forced_speed_duplex & E1000_ALL_10_SPEED) { E1000_WRITE_REG(hw, E1000_IMC, 0xFFFFFFFF); ret_val = e1000_polarity_reversal_workaround_82543(hw); icr = E1000_READ_REG(hw, E1000_ICR); E1000_WRITE_REG(hw, E1000_ICS, (icr & ~E1000_ICS_LSC)); E1000_WRITE_REG(hw, E1000_IMS, IMS_ENABLE_MASK); } ret_val = -E1000_ERR_CONFIG; goto out; } /* * We have a M88E1000 PHY and Auto-Neg is enabled. If we * have Si on board that is 82544 or newer, Auto * Speed Detection takes care of MAC speed/duplex * configuration. So we only need to configure Collision * Distance in the MAC. Otherwise, we need to force * speed/duplex on the MAC to the current PHY speed/duplex * settings. */ if (mac->type == e1000_82544) hw->mac.ops.config_collision_dist(hw); else { ret_val = e1000_config_mac_to_phy_82543(hw); if (ret_val) { DEBUGOUT("Error configuring MAC to PHY settings\n"); goto out; } } /* * Configure Flow Control now that Auto-Neg has completed. * First, we need to restore the desired flow control * settings because we may have had to re-autoneg with a * different link partner. */ ret_val = e1000_config_fc_after_link_up_generic(hw); if (ret_val) DEBUGOUT("Error configuring flow control\n"); /* * At this point we know that we are on copper and we have * auto-negotiated link. These are conditions for checking the link * partner capability register. We use the link speed to determine if * TBI compatibility needs to be turned on or off. If the link is not * at gigabit speed, then TBI compatibility is not needed. If we are * at gigabit speed, we turn on TBI compatibility. */ if (e1000_tbi_compatibility_enabled_82543(hw)) { ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex); if (ret_val) { DEBUGOUT("Error getting link speed and duplex\n"); return ret_val; } if (speed != SPEED_1000) { /* * If link speed is not set to gigabit speed, * we do not need to enable TBI compatibility. */ if (e1000_tbi_sbp_enabled_82543(hw)) { /* * If we previously were in the mode, * turn it off. */ e1000_set_tbi_sbp_82543(hw, FALSE); rctl = E1000_READ_REG(hw, E1000_RCTL); rctl &= ~E1000_RCTL_SBP; E1000_WRITE_REG(hw, E1000_RCTL, rctl); } } else { /* * If TBI compatibility is was previously off, * turn it on. For compatibility with a TBI link * partner, we will store bad packets. Some * frames have an additional byte on the end and * will look like CRC errors to to the hardware. */ if (!e1000_tbi_sbp_enabled_82543(hw)) { e1000_set_tbi_sbp_82543(hw, TRUE); rctl = E1000_READ_REG(hw, E1000_RCTL); rctl |= E1000_RCTL_SBP; E1000_WRITE_REG(hw, E1000_RCTL, rctl); } } } out: return ret_val; } /** * e1000_check_for_fiber_link_82543 - Check for link (Fiber) * @hw: pointer to the HW structure * * Checks for link up on the hardware. If link is not up and we have * a signal, then we need to force link up. **/ static s32 e1000_check_for_fiber_link_82543(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; u32 rxcw, ctrl, status; s32 ret_val = E1000_SUCCESS; DEBUGFUNC("e1000_check_for_fiber_link_82543"); ctrl = E1000_READ_REG(hw, E1000_CTRL); status = E1000_READ_REG(hw, E1000_STATUS); rxcw = E1000_READ_REG(hw, E1000_RXCW); /* * If we don't have link (auto-negotiation failed or link partner * cannot auto-negotiate), the cable is plugged in (we have signal), * and our link partner is not trying to auto-negotiate with us (we * are receiving idles or data), we need to force link up. We also * need to give auto-negotiation time to complete, in case the cable * was just plugged in. The autoneg_failed flag does this. */ /* (ctrl & E1000_CTRL_SWDPIN1) == 0 == have signal */ if ((!(ctrl & E1000_CTRL_SWDPIN1)) && (!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) { if (!mac->autoneg_failed) { mac->autoneg_failed = TRUE; ret_val = 0; goto out; } DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n"); /* Disable auto-negotiation in the TXCW register */ E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE)); /* Force link-up and also force full-duplex. */ ctrl = E1000_READ_REG(hw, E1000_CTRL); ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); E1000_WRITE_REG(hw, E1000_CTRL, ctrl); /* Configure Flow Control after forcing link up. */ ret_val = e1000_config_fc_after_link_up_generic(hw); if (ret_val) { DEBUGOUT("Error configuring flow control\n"); goto out; } } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { /* * If we are forcing link and we are receiving /C/ ordered * sets, re-enable auto-negotiation in the TXCW register * and disable forced link in the Device Control register * in an attempt to auto-negotiate with our link partner. */ DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n"); E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw); E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU)); mac->serdes_has_link = TRUE; } out: return ret_val; } /** * e1000_config_mac_to_phy_82543 - Configure MAC to PHY settings * @hw: pointer to the HW structure * * For the 82543 silicon, we need to set the MAC to match the settings * of the PHY, even if the PHY is auto-negotiating. **/ static s32 e1000_config_mac_to_phy_82543(struct e1000_hw *hw) { u32 ctrl; s32 ret_val = E1000_SUCCESS; u16 phy_data; DEBUGFUNC("e1000_config_mac_to_phy_82543"); if (!(hw->phy.ops.read_reg)) goto out; /* Set the bits to force speed and duplex */ ctrl = E1000_READ_REG(hw, E1000_CTRL); ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS); /* * Set up duplex in the Device Control and Transmit Control * registers depending on negotiated values. */ ret_val = hw->phy.ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); if (ret_val) goto out; ctrl &= ~E1000_CTRL_FD; if (phy_data & M88E1000_PSSR_DPLX) ctrl |= E1000_CTRL_FD; hw->mac.ops.config_collision_dist(hw); /* * Set up speed in the Device Control register depending on * negotiated values. */ if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) ctrl |= E1000_CTRL_SPD_1000; else if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS) ctrl |= E1000_CTRL_SPD_100; E1000_WRITE_REG(hw, E1000_CTRL, ctrl); out: return ret_val; } /** * e1000_write_vfta_82543 - Write value to VLAN filter table * @hw: pointer to the HW structure * @offset: the 32-bit offset in which to write the value to. * @value: the 32-bit value to write at location offset. * * This writes a 32-bit value to a 32-bit offset in the VLAN filter * table. **/ static void e1000_write_vfta_82543(struct e1000_hw *hw, u32 offset, u32 value) { u32 temp; DEBUGFUNC("e1000_write_vfta_82543"); if ((hw->mac.type == e1000_82544) && (offset & 1)) { temp = E1000_READ_REG_ARRAY(hw, E1000_VFTA, offset - 1); E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); E1000_WRITE_FLUSH(hw); E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset - 1, temp); E1000_WRITE_FLUSH(hw); } else { e1000_write_vfta_generic(hw, offset, value); } } /** * e1000_led_on_82543 - Turn on SW controllable LED * @hw: pointer to the HW structure * * Turns the SW defined LED on. **/ static s32 e1000_led_on_82543(struct e1000_hw *hw) { u32 ctrl = E1000_READ_REG(hw, E1000_CTRL); DEBUGFUNC("e1000_led_on_82543"); if (hw->mac.type == e1000_82544 && hw->phy.media_type == e1000_media_type_copper) { /* Clear SW-definable Pin 0 to turn on the LED */ ctrl &= ~E1000_CTRL_SWDPIN0; ctrl |= E1000_CTRL_SWDPIO0; } else { /* Fiber 82544 and all 82543 use this method */ ctrl |= E1000_CTRL_SWDPIN0; ctrl |= E1000_CTRL_SWDPIO0; } E1000_WRITE_REG(hw, E1000_CTRL, ctrl); return E1000_SUCCESS; } /** * e1000_led_off_82543 - Turn off SW controllable LED * @hw: pointer to the HW structure * * Turns the SW defined LED off. **/ static s32 e1000_led_off_82543(struct e1000_hw *hw) { u32 ctrl = E1000_READ_REG(hw, E1000_CTRL); DEBUGFUNC("e1000_led_off_82543"); if (hw->mac.type == e1000_82544 && hw->phy.media_type == e1000_media_type_copper) { /* Set SW-definable Pin 0 to turn off the LED */ ctrl |= E1000_CTRL_SWDPIN0; ctrl |= E1000_CTRL_SWDPIO0; } else { ctrl &= ~E1000_CTRL_SWDPIN0; ctrl |= E1000_CTRL_SWDPIO0; } E1000_WRITE_REG(hw, E1000_CTRL, ctrl); return E1000_SUCCESS; } /** * e1000_clear_hw_cntrs_82543 - Clear device specific hardware counters * @hw: pointer to the HW structure * * Clears the hardware counters by reading the counter registers. **/ static void e1000_clear_hw_cntrs_82543(struct e1000_hw *hw) { DEBUGFUNC("e1000_clear_hw_cntrs_82543"); e1000_clear_hw_cntrs_base_generic(hw); E1000_READ_REG(hw, E1000_PRC64); E1000_READ_REG(hw, E1000_PRC127); E1000_READ_REG(hw, E1000_PRC255); E1000_READ_REG(hw, E1000_PRC511); E1000_READ_REG(hw, E1000_PRC1023); E1000_READ_REG(hw, E1000_PRC1522); E1000_READ_REG(hw, E1000_PTC64); E1000_READ_REG(hw, E1000_PTC127); E1000_READ_REG(hw, E1000_PTC255); E1000_READ_REG(hw, E1000_PTC511); E1000_READ_REG(hw, E1000_PTC1023); E1000_READ_REG(hw, E1000_PTC1522); E1000_READ_REG(hw, E1000_ALGNERRC); E1000_READ_REG(hw, E1000_RXERRC); E1000_READ_REG(hw, E1000_TNCRS); E1000_READ_REG(hw, E1000_CEXTERR); E1000_READ_REG(hw, E1000_TSCTC); E1000_READ_REG(hw, E1000_TSCTFC); } /** * e1000_read_mac_addr_82543 - Read device MAC address * @hw: pointer to the HW structure * * Reads the device MAC address from the EEPROM and stores the value. * Since devices with two ports use the same EEPROM, we increment the * last bit in the MAC address for the second port. * **/ s32 e1000_read_mac_addr_82543(struct e1000_hw *hw) { s32 ret_val = E1000_SUCCESS; u16 offset, nvm_data, i; DEBUGFUNC("e1000_read_mac_addr"); for (i = 0; i < ETH_ADDR_LEN; i += 2) { offset = i >> 1; ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data); if (ret_val) { DEBUGOUT("NVM Read Error\n"); goto out; } hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF); hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8); } /* Flip last bit of mac address if we're on second port */ if (hw->bus.func == E1000_FUNC_1) hw->mac.perm_addr[5] ^= 1; for (i = 0; i < ETH_ADDR_LEN; i++) hw->mac.addr[i] = hw->mac.perm_addr[i]; out: return ret_val; } Index: stable/10/sys/dev/e1000/e1000_82571.h =================================================================== --- stable/10/sys/dev/e1000/e1000_82571.h (revision 296054) +++ stable/10/sys/dev/e1000/e1000_82571.h (revision 296055) @@ -1,65 +1,66 @@ /****************************************************************************** Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _E1000_82571_H_ #define _E1000_82571_H_ #define ID_LED_RESERVED_F746 0xF746 #define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \ (ID_LED_OFF1_ON2 << 8) | \ (ID_LED_DEF1_DEF2 << 4) | \ (ID_LED_DEF1_DEF2)) #define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 #define AN_RETRY_COUNT 5 /* Autoneg Retry Count value */ /* Intr Throttling - RW */ #define E1000_EITR_82574(_n) (0x000E8 + (0x4 * (_n))) #define E1000_EIAC_82574 0x000DC /* Ext. Interrupt Auto Clear - RW */ #define E1000_EIAC_MASK_82574 0x01F00000 -#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */ +#define E1000_IVAR_INT_ALLOC_VALID 0x8 -#define E1000_RXCFGL 0x0B634 /* TimeSync Rx EtherType & Msg Type Reg - RW */ +/* Manageability Operation Mode mask */ +#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 #define E1000_BASE1000T_STATUS 10 #define E1000_IDLE_ERROR_COUNT_MASK 0xFF #define E1000_RECEIVE_ERROR_COUNTER 21 #define E1000_RECEIVE_ERROR_MAX 0xFFFF bool e1000_check_phy_82574(struct e1000_hw *hw); bool e1000_get_laa_state_82571(struct e1000_hw *hw); void e1000_set_laa_state_82571(struct e1000_hw *hw, bool state); #endif Index: stable/10/sys/dev/e1000/e1000_82575.c =================================================================== --- stable/10/sys/dev/e1000/e1000_82575.c (revision 296054) +++ stable/10/sys/dev/e1000/e1000_82575.c (revision 296055) @@ -1,3642 +1,3779 @@ /****************************************************************************** Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ /* * 82575EB Gigabit Network Connection * 82575EB Gigabit Backplane Connection * 82575GB Gigabit Network Connection * 82576 Gigabit Network Connection * 82576 Quad Port Gigabit Mezzanine Adapter * 82580 Gigabit Network Connection * I350 Gigabit Network Connection */ #include "e1000_api.h" #include "e1000_i210.h" static s32 e1000_init_phy_params_82575(struct e1000_hw *hw); static s32 e1000_init_mac_params_82575(struct e1000_hw *hw); static s32 e1000_acquire_phy_82575(struct e1000_hw *hw); static void e1000_release_phy_82575(struct e1000_hw *hw); static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw); static void e1000_release_nvm_82575(struct e1000_hw *hw); static s32 e1000_check_for_link_82575(struct e1000_hw *hw); static s32 e1000_check_for_link_media_swap(struct e1000_hw *hw); static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw); static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, u16 *duplex); static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw); static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, u16 *data); static s32 e1000_reset_hw_82575(struct e1000_hw *hw); static s32 e1000_reset_hw_82580(struct e1000_hw *hw); static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data); static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data); static s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active); static s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active); static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active); static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw); static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw); static s32 e1000_get_media_type_82575(struct e1000_hw *hw); static s32 e1000_set_sfp_media_type_82575(struct e1000_hw *hw); static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data); static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, u16 data); static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw); static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask); static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed, u16 *duplex); static s32 e1000_get_phy_id_82575(struct e1000_hw *hw); static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask); static bool e1000_sgmii_active_82575(struct e1000_hw *hw); static s32 e1000_reset_init_script_82575(struct e1000_hw *hw); static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw); static void e1000_config_collision_dist_82575(struct e1000_hw *hw); static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw); static void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw); static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw); static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw); static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw); static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw); static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw); static s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset); static s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset); static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw); static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw); static void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value); static void e1000_clear_vfta_i350(struct e1000_hw *hw); static void e1000_i2c_start(struct e1000_hw *hw); static void e1000_i2c_stop(struct e1000_hw *hw); static s32 e1000_clock_in_i2c_byte(struct e1000_hw *hw, u8 *data); static s32 e1000_clock_out_i2c_byte(struct e1000_hw *hw, u8 data); static s32 e1000_get_i2c_ack(struct e1000_hw *hw); static s32 e1000_clock_in_i2c_bit(struct e1000_hw *hw, bool *data); static s32 e1000_clock_out_i2c_bit(struct e1000_hw *hw, bool data); static void e1000_raise_i2c_clk(struct e1000_hw *hw, u32 *i2cctl); static void e1000_lower_i2c_clk(struct e1000_hw *hw, u32 *i2cctl); static s32 e1000_set_i2c_data(struct e1000_hw *hw, u32 *i2cctl, bool data); static bool e1000_get_i2c_data(u32 *i2cctl); static const u16 e1000_82580_rxpbs_table[] = { 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 }; #define E1000_82580_RXPBS_TABLE_SIZE \ (sizeof(e1000_82580_rxpbs_table) / \ sizeof(e1000_82580_rxpbs_table[0])) /** * e1000_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO * @hw: pointer to the HW structure * * Called to determine if the I2C pins are being used for I2C or as an * external MDIO interface since the two options are mutually exclusive. **/ static bool e1000_sgmii_uses_mdio_82575(struct e1000_hw *hw) { u32 reg = 0; bool ext_mdio = FALSE; DEBUGFUNC("e1000_sgmii_uses_mdio_82575"); switch (hw->mac.type) { case e1000_82575: case e1000_82576: reg = E1000_READ_REG(hw, E1000_MDIC); ext_mdio = !!(reg & E1000_MDIC_DEST); break; case e1000_82580: case e1000_i350: case e1000_i354: case e1000_i210: case e1000_i211: reg = E1000_READ_REG(hw, E1000_MDICNFG); ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO); break; default: break; } return ext_mdio; } /** * e1000_init_phy_params_82575 - Init PHY func ptrs. * @hw: pointer to the HW structure **/ static s32 e1000_init_phy_params_82575(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val = E1000_SUCCESS; u32 ctrl_ext; DEBUGFUNC("e1000_init_phy_params_82575"); phy->ops.read_i2c_byte = e1000_read_i2c_byte_generic; phy->ops.write_i2c_byte = e1000_write_i2c_byte_generic; if (hw->phy.media_type != e1000_media_type_copper) { phy->type = e1000_phy_none; goto out; } phy->ops.power_up = e1000_power_up_phy_copper; phy->ops.power_down = e1000_power_down_phy_copper_82575; phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; phy->reset_delay_us = 100; phy->ops.acquire = e1000_acquire_phy_82575; phy->ops.check_reset_block = e1000_check_reset_block_generic; phy->ops.commit = e1000_phy_sw_reset_generic; phy->ops.get_cfg_done = e1000_get_cfg_done_82575; phy->ops.release = e1000_release_phy_82575; ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); if (e1000_sgmii_active_82575(hw)) { phy->ops.reset = e1000_phy_hw_reset_sgmii_82575; ctrl_ext |= E1000_CTRL_I2C_ENA; } else { phy->ops.reset = e1000_phy_hw_reset_generic; ctrl_ext &= ~E1000_CTRL_I2C_ENA; } E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); e1000_reset_mdicnfg_82580(hw); if (e1000_sgmii_active_82575(hw) && !e1000_sgmii_uses_mdio_82575(hw)) { phy->ops.read_reg = e1000_read_phy_reg_sgmii_82575; phy->ops.write_reg = e1000_write_phy_reg_sgmii_82575; } else { switch (hw->mac.type) { case e1000_82580: case e1000_i350: case e1000_i354: phy->ops.read_reg = e1000_read_phy_reg_82580; phy->ops.write_reg = e1000_write_phy_reg_82580; break; case e1000_i210: case e1000_i211: phy->ops.read_reg = e1000_read_phy_reg_gs40g; phy->ops.write_reg = e1000_write_phy_reg_gs40g; break; default: phy->ops.read_reg = e1000_read_phy_reg_igp; phy->ops.write_reg = e1000_write_phy_reg_igp; } } /* Set phy->phy_addr and phy->id. */ ret_val = e1000_get_phy_id_82575(hw); /* Verify phy id and set remaining function pointers */ switch (phy->id) { case M88E1543_E_PHY_ID: case M88E1512_E_PHY_ID: case I347AT4_E_PHY_ID: case M88E1112_E_PHY_ID: case M88E1340M_E_PHY_ID: case M88E1111_I_PHY_ID: phy->type = e1000_phy_m88; phy->ops.check_polarity = e1000_check_polarity_m88; phy->ops.get_info = e1000_get_phy_info_m88; if (phy->id == I347AT4_E_PHY_ID || phy->id == M88E1112_E_PHY_ID || phy->id == M88E1340M_E_PHY_ID) phy->ops.get_cable_length = e1000_get_cable_length_m88_gen2; else if (phy->id == M88E1543_E_PHY_ID || phy->id == M88E1512_E_PHY_ID) phy->ops.get_cable_length = e1000_get_cable_length_m88_gen2; else phy->ops.get_cable_length = e1000_get_cable_length_m88; phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; /* Check if this PHY is confgured for media swap. */ if (phy->id == M88E1112_E_PHY_ID) { u16 data; ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 2); if (ret_val) goto out; ret_val = phy->ops.read_reg(hw, E1000_M88E1112_MAC_CTRL_1, &data); if (ret_val) goto out; data = (data & E1000_M88E1112_MAC_CTRL_1_MODE_MASK) >> E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT; if (data == E1000_M88E1112_AUTO_COPPER_SGMII || data == E1000_M88E1112_AUTO_COPPER_BASEX) hw->mac.ops.check_for_link = e1000_check_for_link_media_swap; } if (phy->id == M88E1512_E_PHY_ID) { ret_val = e1000_initialize_M88E1512_phy(hw); if (ret_val) goto out; } + if (phy->id == M88E1543_E_PHY_ID) { + ret_val = e1000_initialize_M88E1543_phy(hw); + if (ret_val) + goto out; + } break; case IGP03E1000_E_PHY_ID: case IGP04E1000_E_PHY_ID: phy->type = e1000_phy_igp_3; phy->ops.check_polarity = e1000_check_polarity_igp; phy->ops.get_info = e1000_get_phy_info_igp; phy->ops.get_cable_length = e1000_get_cable_length_igp_2; phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp; phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82575; phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_generic; break; case I82580_I_PHY_ID: case I350_I_PHY_ID: phy->type = e1000_phy_82580; phy->ops.check_polarity = e1000_check_polarity_82577; phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_82577; phy->ops.get_cable_length = e1000_get_cable_length_82577; phy->ops.get_info = e1000_get_phy_info_82577; phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82580; phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580; break; case I210_I_PHY_ID: phy->type = e1000_phy_i210; phy->ops.check_polarity = e1000_check_polarity_m88; phy->ops.get_info = e1000_get_phy_info_m88; phy->ops.get_cable_length = e1000_get_cable_length_m88_gen2; phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82580; phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580; phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; break; default: ret_val = -E1000_ERR_PHY; goto out; } out: return ret_val; } /** * e1000_init_nvm_params_82575 - Init NVM func ptrs. * @hw: pointer to the HW structure **/ s32 e1000_init_nvm_params_82575(struct e1000_hw *hw) { struct e1000_nvm_info *nvm = &hw->nvm; u32 eecd = E1000_READ_REG(hw, E1000_EECD); u16 size; DEBUGFUNC("e1000_init_nvm_params_82575"); size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> E1000_EECD_SIZE_EX_SHIFT); /* * Added to a constant, "size" becomes the left-shift value * for setting word_size. */ size += NVM_WORD_SIZE_BASE_SHIFT; /* Just in case size is out of range, cap it to the largest * EEPROM size supported */ if (size > 15) size = 15; nvm->word_size = 1 << size; if (hw->mac.type < e1000_i210) { nvm->opcode_bits = 8; nvm->delay_usec = 1; switch (nvm->override) { case e1000_nvm_override_spi_large: nvm->page_size = 32; nvm->address_bits = 16; break; case e1000_nvm_override_spi_small: nvm->page_size = 8; nvm->address_bits = 8; break; default: nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; break; } if (nvm->word_size == (1 << 15)) nvm->page_size = 128; nvm->type = e1000_nvm_eeprom_spi; } else { nvm->type = e1000_nvm_flash_hw; } /* Function Pointers */ nvm->ops.acquire = e1000_acquire_nvm_82575; nvm->ops.release = e1000_release_nvm_82575; if (nvm->word_size < (1 << 15)) nvm->ops.read = e1000_read_nvm_eerd; else nvm->ops.read = e1000_read_nvm_spi; nvm->ops.write = e1000_write_nvm_spi; nvm->ops.validate = e1000_validate_nvm_checksum_generic; nvm->ops.update = e1000_update_nvm_checksum_generic; nvm->ops.valid_led_default = e1000_valid_led_default_82575; /* override generic family function pointers for specific descendants */ switch (hw->mac.type) { case e1000_82580: nvm->ops.validate = e1000_validate_nvm_checksum_82580; nvm->ops.update = e1000_update_nvm_checksum_82580; break; case e1000_i350: case e1000_i354: nvm->ops.validate = e1000_validate_nvm_checksum_i350; nvm->ops.update = e1000_update_nvm_checksum_i350; break; default: break; } return E1000_SUCCESS; } /** * e1000_init_mac_params_82575 - Init MAC func ptrs. * @hw: pointer to the HW structure **/ static s32 e1000_init_mac_params_82575(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; DEBUGFUNC("e1000_init_mac_params_82575"); /* Derives media type */ e1000_get_media_type_82575(hw); /* Set mta register count */ mac->mta_reg_count = 128; /* Set uta register count */ mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128; /* Set rar entry count */ mac->rar_entry_count = E1000_RAR_ENTRIES_82575; if (mac->type == e1000_82576) mac->rar_entry_count = E1000_RAR_ENTRIES_82576; if (mac->type == e1000_82580) mac->rar_entry_count = E1000_RAR_ENTRIES_82580; if (mac->type == e1000_i350 || mac->type == e1000_i354) mac->rar_entry_count = E1000_RAR_ENTRIES_I350; /* Enable EEE default settings for EEE supported devices */ if (mac->type >= e1000_i350) dev_spec->eee_disable = FALSE; /* Allow a single clear of the SW semaphore on I210 and newer */ if (mac->type >= e1000_i210) dev_spec->clear_semaphore_once = TRUE; /* Set if part includes ASF firmware */ mac->asf_firmware_present = TRUE; /* FWSM register */ mac->has_fwsm = TRUE; /* ARC supported; valid only if manageability features are enabled. */ mac->arc_subsystem_valid = !!(E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK); /* Function pointers */ /* bus type/speed/width */ mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic; /* reset */ if (mac->type >= e1000_82580) mac->ops.reset_hw = e1000_reset_hw_82580; else mac->ops.reset_hw = e1000_reset_hw_82575; /* hw initialization */ if ((mac->type == e1000_i210) || (mac->type == e1000_i211)) mac->ops.init_hw = e1000_init_hw_i210; else mac->ops.init_hw = e1000_init_hw_82575; /* link setup */ mac->ops.setup_link = e1000_setup_link_generic; /* physical interface link setup */ mac->ops.setup_physical_interface = (hw->phy.media_type == e1000_media_type_copper) ? e1000_setup_copper_link_82575 : e1000_setup_serdes_link_82575; /* physical interface shutdown */ mac->ops.shutdown_serdes = e1000_shutdown_serdes_link_82575; /* physical interface power up */ mac->ops.power_up_serdes = e1000_power_up_serdes_link_82575; /* check for link */ mac->ops.check_for_link = e1000_check_for_link_82575; /* read mac address */ mac->ops.read_mac_addr = e1000_read_mac_addr_82575; /* configure collision distance */ mac->ops.config_collision_dist = e1000_config_collision_dist_82575; /* multicast address update */ mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; if (hw->mac.type == e1000_i350 || mac->type == e1000_i354) { /* writing VFTA */ mac->ops.write_vfta = e1000_write_vfta_i350; /* clearing VFTA */ mac->ops.clear_vfta = e1000_clear_vfta_i350; } else { /* writing VFTA */ mac->ops.write_vfta = e1000_write_vfta_generic; /* clearing VFTA */ mac->ops.clear_vfta = e1000_clear_vfta_generic; } if (hw->mac.type >= e1000_82580) mac->ops.validate_mdi_setting = e1000_validate_mdi_setting_crossover_generic; /* ID LED init */ mac->ops.id_led_init = e1000_id_led_init_generic; /* blink LED */ mac->ops.blink_led = e1000_blink_led_generic; /* setup LED */ mac->ops.setup_led = e1000_setup_led_generic; /* cleanup LED */ mac->ops.cleanup_led = e1000_cleanup_led_generic; /* turn on/off LED */ mac->ops.led_on = e1000_led_on_generic; mac->ops.led_off = e1000_led_off_generic; /* clear hardware counters */ mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82575; /* link info */ mac->ops.get_link_up_info = e1000_get_link_up_info_82575; /* acquire SW_FW sync */ mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync_82575; mac->ops.release_swfw_sync = e1000_release_swfw_sync_82575; if (mac->type >= e1000_i210) { mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync_i210; mac->ops.release_swfw_sync = e1000_release_swfw_sync_i210; } /* set lan id for port to determine which phy lock to use */ hw->mac.ops.set_lan_id(hw); return E1000_SUCCESS; } /** * e1000_init_function_pointers_82575 - Init func ptrs. * @hw: pointer to the HW structure * * Called to initialize all function pointers and parameters. **/ void e1000_init_function_pointers_82575(struct e1000_hw *hw) { DEBUGFUNC("e1000_init_function_pointers_82575"); hw->mac.ops.init_params = e1000_init_mac_params_82575; hw->nvm.ops.init_params = e1000_init_nvm_params_82575; hw->phy.ops.init_params = e1000_init_phy_params_82575; hw->mbx.ops.init_params = e1000_init_mbx_params_pf; } /** * e1000_acquire_phy_82575 - Acquire rights to access PHY * @hw: pointer to the HW structure * * Acquire access rights to the correct PHY. **/ static s32 e1000_acquire_phy_82575(struct e1000_hw *hw) { u16 mask = E1000_SWFW_PHY0_SM; DEBUGFUNC("e1000_acquire_phy_82575"); if (hw->bus.func == E1000_FUNC_1) mask = E1000_SWFW_PHY1_SM; else if (hw->bus.func == E1000_FUNC_2) mask = E1000_SWFW_PHY2_SM; else if (hw->bus.func == E1000_FUNC_3) mask = E1000_SWFW_PHY3_SM; return hw->mac.ops.acquire_swfw_sync(hw, mask); } /** * e1000_release_phy_82575 - Release rights to access PHY * @hw: pointer to the HW structure * * A wrapper to release access rights to the correct PHY. **/ static void e1000_release_phy_82575(struct e1000_hw *hw) { u16 mask = E1000_SWFW_PHY0_SM; DEBUGFUNC("e1000_release_phy_82575"); if (hw->bus.func == E1000_FUNC_1) mask = E1000_SWFW_PHY1_SM; else if (hw->bus.func == E1000_FUNC_2) mask = E1000_SWFW_PHY2_SM; else if (hw->bus.func == E1000_FUNC_3) mask = E1000_SWFW_PHY3_SM; hw->mac.ops.release_swfw_sync(hw, mask); } /** * e1000_read_phy_reg_sgmii_82575 - Read PHY register using sgmii * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Reads the PHY register at offset using the serial gigabit media independent * interface and stores the retrieved information in data. **/ static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, u16 *data) { s32 ret_val = -E1000_ERR_PARAM; DEBUGFUNC("e1000_read_phy_reg_sgmii_82575"); if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { DEBUGOUT1("PHY Address %u is out of range\n", offset); goto out; } ret_val = hw->phy.ops.acquire(hw); if (ret_val) goto out; ret_val = e1000_read_phy_reg_i2c(hw, offset, data); hw->phy.ops.release(hw); out: return ret_val; } /** * e1000_write_phy_reg_sgmii_82575 - Write PHY register using sgmii * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Writes the data to PHY register at the offset using the serial gigabit * media independent interface. **/ static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, u16 data) { s32 ret_val = -E1000_ERR_PARAM; DEBUGFUNC("e1000_write_phy_reg_sgmii_82575"); if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { DEBUGOUT1("PHY Address %d is out of range\n", offset); goto out; } ret_val = hw->phy.ops.acquire(hw); if (ret_val) goto out; ret_val = e1000_write_phy_reg_i2c(hw, offset, data); hw->phy.ops.release(hw); out: return ret_val; } /** * e1000_get_phy_id_82575 - Retrieve PHY addr and id * @hw: pointer to the HW structure * * Retrieves the PHY address and ID for both PHY's which do and do not use * sgmi interface. **/ static s32 e1000_get_phy_id_82575(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val = E1000_SUCCESS; u16 phy_id; u32 ctrl_ext; u32 mdic; DEBUGFUNC("e1000_get_phy_id_82575"); /* some i354 devices need an extra read for phy id */ if (hw->mac.type == e1000_i354) e1000_get_phy_id(hw); /* * For SGMII PHYs, we try the list of possible addresses until * we find one that works. For non-SGMII PHYs * (e.g. integrated copper PHYs), an address of 1 should * work. The result of this function should mean phy->phy_addr * and phy->id are set correctly. */ if (!e1000_sgmii_active_82575(hw)) { phy->addr = 1; ret_val = e1000_get_phy_id(hw); goto out; } if (e1000_sgmii_uses_mdio_82575(hw)) { switch (hw->mac.type) { case e1000_82575: case e1000_82576: mdic = E1000_READ_REG(hw, E1000_MDIC); mdic &= E1000_MDIC_PHY_MASK; phy->addr = mdic >> E1000_MDIC_PHY_SHIFT; break; case e1000_82580: case e1000_i350: case e1000_i354: case e1000_i210: case e1000_i211: mdic = E1000_READ_REG(hw, E1000_MDICNFG); mdic &= E1000_MDICNFG_PHY_MASK; phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT; break; default: ret_val = -E1000_ERR_PHY; goto out; break; } ret_val = e1000_get_phy_id(hw); goto out; } /* Power on sgmii phy if it is disabled */ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); E1000_WRITE_FLUSH(hw); msec_delay(300); /* * The address field in the I2CCMD register is 3 bits and 0 is invalid. * Therefore, we need to test 1-7 */ for (phy->addr = 1; phy->addr < 8; phy->addr++) { ret_val = e1000_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id); if (ret_val == E1000_SUCCESS) { DEBUGOUT2("Vendor ID 0x%08X read at address %u\n", phy_id, phy->addr); /* * At the time of this writing, The M88 part is * the only supported SGMII PHY product. */ if (phy_id == M88_VENDOR) break; } else { DEBUGOUT1("PHY address %u was unreadable\n", phy->addr); } } /* A valid PHY type couldn't be found. */ if (phy->addr == 8) { phy->addr = 0; ret_val = -E1000_ERR_PHY; } else { ret_val = e1000_get_phy_id(hw); } /* restore previous sfp cage power state */ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); out: return ret_val; } /** * e1000_phy_hw_reset_sgmii_82575 - Performs a PHY reset * @hw: pointer to the HW structure * * Resets the PHY using the serial gigabit media independent interface. **/ static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) { s32 ret_val = E1000_SUCCESS; struct e1000_phy_info *phy = &hw->phy; DEBUGFUNC("e1000_phy_hw_reset_sgmii_82575"); /* * This isn't a TRUE "hard" reset, but is the only reset * available to us at this time. */ DEBUGOUT("Soft resetting SGMII attached PHY...\n"); if (!(hw->phy.ops.write_reg)) goto out; /* * SFP documentation requires the following to configure the SPF module * to work on SGMII. No further documentation is given. */ ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084); if (ret_val) goto out; ret_val = hw->phy.ops.commit(hw); if (ret_val) goto out; if (phy->id == M88E1512_E_PHY_ID) ret_val = e1000_initialize_M88E1512_phy(hw); out: return ret_val; } /** * e1000_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state * @hw: pointer to the HW structure * @active: TRUE to enable LPLU, FALSE to disable * * Sets the LPLU D0 state according to the active flag. When * activating LPLU this function also disables smart speed * and vice versa. LPLU will not be activated unless the * device autonegotiation advertisement meets standards of * either 10 or 10/100 or 10/100/1000 at all duplexes. * This is a function pointer entry point only called by * PHY setup routines. **/ static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val = E1000_SUCCESS; u16 data; DEBUGFUNC("e1000_set_d0_lplu_state_82575"); if (!(hw->phy.ops.read_reg)) goto out; ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); if (ret_val) goto out; if (active) { data |= IGP02E1000_PM_D0_LPLU; ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, data); if (ret_val) goto out; /* When LPLU is enabled, we should disable SmartSpeed */ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &data); data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) goto out; } else { data &= ~IGP02E1000_PM_D0_LPLU; ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, data); /* * LPLU and SmartSpeed are mutually exclusive. LPLU is used * during Dx states where the power conservation is most * important. During driver activity we should enable * SmartSpeed, so performance is maintained. */ if (phy->smart_speed == e1000_smart_speed_on) { ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) goto out; data |= IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) goto out; } else if (phy->smart_speed == e1000_smart_speed_off) { ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) goto out; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) goto out; } } out: return ret_val; } /** * e1000_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state * @hw: pointer to the HW structure * @active: TRUE to enable LPLU, FALSE to disable * * Sets the LPLU D0 state according to the active flag. When * activating LPLU this function also disables smart speed * and vice versa. LPLU will not be activated unless the * device autonegotiation advertisement meets standards of * either 10 or 10/100 or 10/100/1000 at all duplexes. * This is a function pointer entry point only called by * PHY setup routines. **/ static s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active) { struct e1000_phy_info *phy = &hw->phy; u32 data; DEBUGFUNC("e1000_set_d0_lplu_state_82580"); data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); if (active) { data |= E1000_82580_PM_D0_LPLU; /* When LPLU is enabled, we should disable SmartSpeed */ data &= ~E1000_82580_PM_SPD; } else { data &= ~E1000_82580_PM_D0_LPLU; /* * LPLU and SmartSpeed are mutually exclusive. LPLU is used * during Dx states where the power conservation is most * important. During driver activity we should enable * SmartSpeed, so performance is maintained. */ if (phy->smart_speed == e1000_smart_speed_on) data |= E1000_82580_PM_SPD; else if (phy->smart_speed == e1000_smart_speed_off) data &= ~E1000_82580_PM_SPD; } E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data); return E1000_SUCCESS; } /** * e1000_set_d3_lplu_state_82580 - Sets low power link up state for D3 * @hw: pointer to the HW structure * @active: boolean used to enable/disable lplu * * Success returns 0, Failure returns 1 * * The low power link up (lplu) state is set to the power management level D3 * and SmartSpeed is disabled when active is TRUE, else clear lplu for D3 * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU * is used during Dx states where the power conservation is most important. * During driver activity, SmartSpeed should be enabled so performance is * maintained. **/ s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active) { struct e1000_phy_info *phy = &hw->phy; u32 data; DEBUGFUNC("e1000_set_d3_lplu_state_82580"); data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); if (!active) { data &= ~E1000_82580_PM_D3_LPLU; /* * LPLU and SmartSpeed are mutually exclusive. LPLU is used * during Dx states where the power conservation is most * important. During driver activity we should enable * SmartSpeed, so performance is maintained. */ if (phy->smart_speed == e1000_smart_speed_on) data |= E1000_82580_PM_SPD; else if (phy->smart_speed == e1000_smart_speed_off) data &= ~E1000_82580_PM_SPD; } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { data |= E1000_82580_PM_D3_LPLU; /* When LPLU is enabled, we should disable SmartSpeed */ data &= ~E1000_82580_PM_SPD; } E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data); return E1000_SUCCESS; } /** * e1000_acquire_nvm_82575 - Request for access to EEPROM * @hw: pointer to the HW structure * * Acquire the necessary semaphores for exclusive access to the EEPROM. * Set the EEPROM access request bit and wait for EEPROM access grant bit. * Return successful if access grant bit set, else clear the request for * EEPROM access and return -E1000_ERR_NVM (-1). **/ static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw) { s32 ret_val = E1000_SUCCESS; DEBUGFUNC("e1000_acquire_nvm_82575"); ret_val = e1000_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); if (ret_val) goto out; /* * Check if there is some access * error this access may hook on */ if (hw->mac.type == e1000_i350) { u32 eecd = E1000_READ_REG(hw, E1000_EECD); if (eecd & (E1000_EECD_BLOCKED | E1000_EECD_ABORT | E1000_EECD_TIMEOUT)) { /* Clear all access error flags */ E1000_WRITE_REG(hw, E1000_EECD, eecd | E1000_EECD_ERROR_CLR); DEBUGOUT("Nvm bit banging access error detected and cleared.\n"); } } if (hw->mac.type == e1000_82580) { u32 eecd = E1000_READ_REG(hw, E1000_EECD); if (eecd & E1000_EECD_BLOCKED) { /* Clear access error flag */ E1000_WRITE_REG(hw, E1000_EECD, eecd | E1000_EECD_BLOCKED); DEBUGOUT("Nvm bit banging access error detected and cleared.\n"); } } ret_val = e1000_acquire_nvm_generic(hw); if (ret_val) e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); out: return ret_val; } /** * e1000_release_nvm_82575 - Release exclusive access to EEPROM * @hw: pointer to the HW structure * * Stop any current commands to the EEPROM and clear the EEPROM request bit, * then release the semaphores acquired. **/ static void e1000_release_nvm_82575(struct e1000_hw *hw) { DEBUGFUNC("e1000_release_nvm_82575"); e1000_release_nvm_generic(hw); e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); } /** * e1000_acquire_swfw_sync_82575 - Acquire SW/FW semaphore * @hw: pointer to the HW structure * @mask: specifies which semaphore to acquire * * Acquire the SW/FW semaphore to access the PHY or NVM. The mask * will also specify which port we're acquiring the lock for. **/ static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) { u32 swfw_sync; u32 swmask = mask; u32 fwmask = mask << 16; s32 ret_val = E1000_SUCCESS; s32 i = 0, timeout = 200; DEBUGFUNC("e1000_acquire_swfw_sync_82575"); while (i < timeout) { if (e1000_get_hw_semaphore_generic(hw)) { ret_val = -E1000_ERR_SWFW_SYNC; goto out; } swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); if (!(swfw_sync & (fwmask | swmask))) break; /* * Firmware currently using resource (fwmask) * or other software thread using resource (swmask) */ e1000_put_hw_semaphore_generic(hw); msec_delay_irq(5); i++; } if (i == timeout) { DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); ret_val = -E1000_ERR_SWFW_SYNC; goto out; } swfw_sync |= swmask; E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); e1000_put_hw_semaphore_generic(hw); out: return ret_val; } /** * e1000_release_swfw_sync_82575 - Release SW/FW semaphore * @hw: pointer to the HW structure * @mask: specifies which semaphore to acquire * * Release the SW/FW semaphore used to access the PHY or NVM. The mask * will also specify which port we're releasing the lock for. **/ static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask) { u32 swfw_sync; DEBUGFUNC("e1000_release_swfw_sync_82575"); while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS) ; /* Empty */ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); swfw_sync &= ~mask; E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); e1000_put_hw_semaphore_generic(hw); } /** * e1000_get_cfg_done_82575 - Read config done bit * @hw: pointer to the HW structure * * Read the management control register for the config done bit for * completion status. NOTE: silicon which is EEPROM-less will fail trying * to read the config done bit, so an error is *ONLY* logged and returns * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon * would not be able to be reset or change link. **/ static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw) { s32 timeout = PHY_CFG_TIMEOUT; u32 mask = E1000_NVM_CFG_DONE_PORT_0; DEBUGFUNC("e1000_get_cfg_done_82575"); if (hw->bus.func == E1000_FUNC_1) mask = E1000_NVM_CFG_DONE_PORT_1; else if (hw->bus.func == E1000_FUNC_2) mask = E1000_NVM_CFG_DONE_PORT_2; else if (hw->bus.func == E1000_FUNC_3) mask = E1000_NVM_CFG_DONE_PORT_3; while (timeout) { if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask) break; msec_delay(1); timeout--; } if (!timeout) DEBUGOUT("MNG configuration cycle has not completed.\n"); /* If EEPROM is not marked present, init the PHY manually */ if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) && (hw->phy.type == e1000_phy_igp_3)) e1000_phy_init_script_igp3(hw); return E1000_SUCCESS; } /** * e1000_get_link_up_info_82575 - Get link speed/duplex info * @hw: pointer to the HW structure * @speed: stores the current speed * @duplex: stores the current duplex * * This is a wrapper function, if using the serial gigabit media independent * interface, use PCS to retrieve the link speed and duplex information. * Otherwise, use the generic function to get the link speed and duplex info. **/ static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, u16 *duplex) { s32 ret_val; DEBUGFUNC("e1000_get_link_up_info_82575"); if (hw->phy.media_type != e1000_media_type_copper) ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, speed, duplex); else ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex); return ret_val; } /** * e1000_check_for_link_82575 - Check for link * @hw: pointer to the HW structure * * If sgmii is enabled, then use the pcs register to determine link, otherwise * use the generic interface for determining link. **/ static s32 e1000_check_for_link_82575(struct e1000_hw *hw) { s32 ret_val; u16 speed, duplex; DEBUGFUNC("e1000_check_for_link_82575"); if (hw->phy.media_type != e1000_media_type_copper) { ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, &speed, &duplex); /* * Use this flag to determine if link needs to be checked or * not. If we have link clear the flag so that we do not * continue to check for link. */ hw->mac.get_link_status = !hw->mac.serdes_has_link; /* * Configure Flow Control now that Auto-Neg has completed. * First, we need to restore the desired flow control * settings because we may have had to re-autoneg with a * different link partner. */ ret_val = e1000_config_fc_after_link_up_generic(hw); if (ret_val) DEBUGOUT("Error configuring flow control\n"); } else { ret_val = e1000_check_for_copper_link_generic(hw); } return ret_val; } /** * e1000_check_for_link_media_swap - Check which M88E1112 interface linked * @hw: pointer to the HW structure * * Poll the M88E1112 interfaces to see which interface achieved link. */ static s32 e1000_check_for_link_media_swap(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; u8 port = 0; DEBUGFUNC("e1000_check_for_link_media_swap"); - /* Check the copper medium. */ + /* Check for copper. */ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); if (ret_val) return ret_val; ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); if (ret_val) return ret_val; if (data & E1000_M88E1112_STATUS_LINK) port = E1000_MEDIA_PORT_COPPER; - /* Check the other medium. */ + /* Check for other. */ ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1); if (ret_val) return ret_val; ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); if (ret_val) return ret_val; - /* reset page to 0 */ - ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); - if (ret_val) - return ret_val; - if (data & E1000_M88E1112_STATUS_LINK) port = E1000_MEDIA_PORT_OTHER; /* Determine if a swap needs to happen. */ if (port && (hw->dev_spec._82575.media_port != port)) { hw->dev_spec._82575.media_port = port; hw->dev_spec._82575.media_changed = TRUE; + } + + if (port == E1000_MEDIA_PORT_COPPER) { + /* reset page to 0 */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); + if (ret_val) + return ret_val; + e1000_check_for_link_82575(hw); } else { - ret_val = e1000_check_for_link_82575(hw); + e1000_check_for_link_82575(hw); + /* reset page to 0 */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); + if (ret_val) + return ret_val; } return E1000_SUCCESS; } /** * e1000_power_up_serdes_link_82575 - Power up the serdes link after shutdown * @hw: pointer to the HW structure **/ static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw) { u32 reg; DEBUGFUNC("e1000_power_up_serdes_link_82575"); if ((hw->phy.media_type != e1000_media_type_internal_serdes) && !e1000_sgmii_active_82575(hw)) return; /* Enable PCS to turn on link */ reg = E1000_READ_REG(hw, E1000_PCS_CFG0); reg |= E1000_PCS_CFG_PCS_EN; E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg); /* Power up the laser */ reg = E1000_READ_REG(hw, E1000_CTRL_EXT); reg &= ~E1000_CTRL_EXT_SDP3_DATA; E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); /* flush the write to verify completion */ E1000_WRITE_FLUSH(hw); msec_delay(1); } /** * e1000_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex * @hw: pointer to the HW structure * @speed: stores the current speed * @duplex: stores the current duplex * * Using the physical coding sub-layer (PCS), retrieve the current speed and * duplex, then store the values in the pointers provided. **/ static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed, u16 *duplex) { struct e1000_mac_info *mac = &hw->mac; u32 pcs; u32 status; DEBUGFUNC("e1000_get_pcs_speed_and_duplex_82575"); /* * Read the PCS Status register for link state. For non-copper mode, * the status register is not accurate. The PCS status register is * used instead. */ pcs = E1000_READ_REG(hw, E1000_PCS_LSTAT); /* * The link up bit determines when link is up on autoneg. */ if (pcs & E1000_PCS_LSTS_LINK_OK) { mac->serdes_has_link = TRUE; /* Detect and store PCS speed */ if (pcs & E1000_PCS_LSTS_SPEED_1000) *speed = SPEED_1000; else if (pcs & E1000_PCS_LSTS_SPEED_100) *speed = SPEED_100; else *speed = SPEED_10; /* Detect and store PCS duplex */ if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) *duplex = FULL_DUPLEX; else *duplex = HALF_DUPLEX; /* Check if it is an I354 2.5Gb backplane connection. */ if (mac->type == e1000_i354) { status = E1000_READ_REG(hw, E1000_STATUS); if ((status & E1000_STATUS_2P5_SKU) && !(status & E1000_STATUS_2P5_SKU_OVER)) { *speed = SPEED_2500; *duplex = FULL_DUPLEX; DEBUGOUT("2500 Mbs, "); DEBUGOUT("Full Duplex\n"); } } } else { mac->serdes_has_link = FALSE; *speed = 0; *duplex = 0; } return E1000_SUCCESS; } /** * e1000_shutdown_serdes_link_82575 - Remove link during power down * @hw: pointer to the HW structure * * In the case of serdes shut down sfp and PCS on driver unload * when management pass thru is not enabled. **/ void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw) { u32 reg; DEBUGFUNC("e1000_shutdown_serdes_link_82575"); if ((hw->phy.media_type != e1000_media_type_internal_serdes) && !e1000_sgmii_active_82575(hw)) return; if (!e1000_enable_mng_pass_thru(hw)) { /* Disable PCS to turn off link */ reg = E1000_READ_REG(hw, E1000_PCS_CFG0); reg &= ~E1000_PCS_CFG_PCS_EN; E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg); /* shutdown the laser */ reg = E1000_READ_REG(hw, E1000_CTRL_EXT); reg |= E1000_CTRL_EXT_SDP3_DATA; E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); /* flush the write to verify completion */ E1000_WRITE_FLUSH(hw); msec_delay(1); } return; } /** * e1000_reset_hw_82575 - Reset hardware * @hw: pointer to the HW structure * * This resets the hardware into a known state. **/ static s32 e1000_reset_hw_82575(struct e1000_hw *hw) { u32 ctrl; s32 ret_val; DEBUGFUNC("e1000_reset_hw_82575"); /* * Prevent the PCI-E bus from sticking if there is no TLP connection * on the last TLP read/write transaction when MAC is reset. */ ret_val = e1000_disable_pcie_master_generic(hw); if (ret_val) DEBUGOUT("PCI-E Master disable polling has failed.\n"); /* set the completion timeout for interface */ ret_val = e1000_set_pcie_completion_timeout(hw); if (ret_val) DEBUGOUT("PCI-E Set completion timeout has failed.\n"); DEBUGOUT("Masking off all interrupts\n"); E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); E1000_WRITE_REG(hw, E1000_RCTL, 0); E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); E1000_WRITE_FLUSH(hw); msec_delay(10); ctrl = E1000_READ_REG(hw, E1000_CTRL); DEBUGOUT("Issuing a global reset to MAC\n"); E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); ret_val = e1000_get_auto_rd_done_generic(hw); if (ret_val) { /* * When auto config read does not complete, do not * return with an error. This can happen in situations * where there is no eeprom and prevents getting link. */ DEBUGOUT("Auto Read Done did not complete\n"); } /* If EEPROM is not present, run manual init scripts */ if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES)) e1000_reset_init_script_82575(hw); /* Clear any pending interrupt events. */ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); E1000_READ_REG(hw, E1000_ICR); /* Install any alternate MAC address into RAR0 */ ret_val = e1000_check_alt_mac_addr_generic(hw); return ret_val; } /** * e1000_init_hw_82575 - Initialize hardware * @hw: pointer to the HW structure * * This inits the hardware readying it for operation. **/ s32 e1000_init_hw_82575(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; s32 ret_val; u16 i, rar_count = mac->rar_entry_count; DEBUGFUNC("e1000_init_hw_82575"); /* Initialize identification LED */ ret_val = mac->ops.id_led_init(hw); if (ret_val) { DEBUGOUT("Error initializing identification LED\n"); /* This is not fatal and we should not stop init due to this */ } /* Disabling VLAN filtering */ DEBUGOUT("Initializing the IEEE VLAN\n"); mac->ops.clear_vfta(hw); /* Setup the receive address */ e1000_init_rx_addrs_generic(hw, rar_count); /* Zero out the Multicast HASH table */ DEBUGOUT("Zeroing the MTA\n"); for (i = 0; i < mac->mta_reg_count; i++) E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); /* Zero out the Unicast HASH table */ DEBUGOUT("Zeroing the UTA\n"); for (i = 0; i < mac->uta_reg_count; i++) E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, 0); /* Setup link and flow control */ ret_val = mac->ops.setup_link(hw); /* Set the default MTU size */ hw->dev_spec._82575.mtu = 1500; /* * Clear all of the statistics registers (clear on read). It is * important that we do this after we have tried to establish link * because the symbol error count will increment wildly if there * is no link. */ e1000_clear_hw_cntrs_82575(hw); return ret_val; } /** * e1000_setup_copper_link_82575 - Configure copper link settings * @hw: pointer to the HW structure * * Configures the link for auto-neg or forced speed and duplex. Then we check * for link, once link is established calls to configure collision distance * and flow control are called. **/ static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw) { u32 ctrl; s32 ret_val; u32 phpm_reg; DEBUGFUNC("e1000_setup_copper_link_82575"); ctrl = E1000_READ_REG(hw, E1000_CTRL); ctrl |= E1000_CTRL_SLU; ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); E1000_WRITE_REG(hw, E1000_CTRL, ctrl); /* Clear Go Link Disconnect bit on supported devices */ switch (hw->mac.type) { case e1000_82580: case e1000_i350: case e1000_i210: case e1000_i211: phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); phpm_reg &= ~E1000_82580_PM_GO_LINKD; E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg); break; default: break; } ret_val = e1000_setup_serdes_link_82575(hw); if (ret_val) goto out; if (e1000_sgmii_active_82575(hw)) { /* allow time for SFP cage time to power up phy */ msec_delay(300); ret_val = hw->phy.ops.reset(hw); if (ret_val) { DEBUGOUT("Error resetting the PHY.\n"); goto out; } } switch (hw->phy.type) { case e1000_phy_i210: case e1000_phy_m88: switch (hw->phy.id) { case I347AT4_E_PHY_ID: case M88E1112_E_PHY_ID: case M88E1340M_E_PHY_ID: case M88E1543_E_PHY_ID: case M88E1512_E_PHY_ID: case I210_I_PHY_ID: ret_val = e1000_copper_link_setup_m88_gen2(hw); break; default: ret_val = e1000_copper_link_setup_m88(hw); break; } break; case e1000_phy_igp_3: ret_val = e1000_copper_link_setup_igp(hw); break; case e1000_phy_82580: ret_val = e1000_copper_link_setup_82577(hw); break; default: ret_val = -E1000_ERR_PHY; break; } if (ret_val) goto out; ret_val = e1000_setup_copper_link_generic(hw); out: return ret_val; } /** * e1000_setup_serdes_link_82575 - Setup link for serdes * @hw: pointer to the HW structure * * Configure the physical coding sub-layer (PCS) link. The PCS link is * used on copper connections where the serialized gigabit media independent * interface (sgmii), or serdes fiber is being used. Configures the link * for auto-negotiation or forces speed/duplex. **/ static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw) { u32 ctrl_ext, ctrl_reg, reg, anadv_reg; bool pcs_autoneg; s32 ret_val = E1000_SUCCESS; u16 data; DEBUGFUNC("e1000_setup_serdes_link_82575"); if ((hw->phy.media_type != e1000_media_type_internal_serdes) && !e1000_sgmii_active_82575(hw)) return ret_val; /* * On the 82575, SerDes loopback mode persists until it is * explicitly turned off or a power cycle is performed. A read to * the register does not indicate its status. Therefore, we ensure * loopback mode is disabled during initialization. */ E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); /* power on the sfp cage if present */ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); ctrl_reg = E1000_READ_REG(hw, E1000_CTRL); ctrl_reg |= E1000_CTRL_SLU; /* set both sw defined pins on 82575/82576*/ if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1; reg = E1000_READ_REG(hw, E1000_PCS_LCTL); /* default pcs_autoneg to the same setting as mac autoneg */ pcs_autoneg = hw->mac.autoneg; switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { case E1000_CTRL_EXT_LINK_MODE_SGMII: /* sgmii mode lets the phy handle forcing speed/duplex */ pcs_autoneg = TRUE; /* autoneg time out should be disabled for SGMII mode */ reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT); break; case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: /* disable PCS autoneg and support parallel detect only */ pcs_autoneg = FALSE; /* fall through to default case */ default: if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) { ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data); if (ret_val) { DEBUGOUT("NVM Read Error\n"); return ret_val; } if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT) pcs_autoneg = FALSE; } /* * non-SGMII modes only supports a speed of 1000/Full for the * link so it is best to just force the MAC and let the pcs * link either autoneg or be forced to 1000/Full */ ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD | E1000_CTRL_FD | E1000_CTRL_FRCDPX; /* set speed of 1000/Full if speed/duplex is forced */ reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL; break; } E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg); /* * New SerDes mode allows for forcing speed or autonegotiating speed * at 1gb. Autoneg should be default set by most drivers. This is the * mode that will be compatible with older link partners and switches. * However, both are supported by the hardware and some drivers/tools. */ reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP | E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); if (pcs_autoneg) { /* Set PCS register for autoneg */ reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */ E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */ /* Disable force flow control for autoneg */ reg &= ~E1000_PCS_LCTL_FORCE_FCTRL; /* Configure flow control advertisement for autoneg */ anadv_reg = E1000_READ_REG(hw, E1000_PCS_ANADV); anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE); switch (hw->fc.requested_mode) { case e1000_fc_full: case e1000_fc_rx_pause: anadv_reg |= E1000_TXCW_ASM_DIR; anadv_reg |= E1000_TXCW_PAUSE; break; case e1000_fc_tx_pause: anadv_reg |= E1000_TXCW_ASM_DIR; break; default: break; } E1000_WRITE_REG(hw, E1000_PCS_ANADV, anadv_reg); DEBUGOUT1("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); } else { /* Set PCS register for forced link */ reg |= E1000_PCS_LCTL_FSD; /* Force Speed */ /* Force flow control for forced link */ reg |= E1000_PCS_LCTL_FORCE_FCTRL; DEBUGOUT1("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); } E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg); if (!pcs_autoneg && !e1000_sgmii_active_82575(hw)) e1000_force_mac_fc_generic(hw); return ret_val; } /** * e1000_get_media_type_82575 - derives current media type. * @hw: pointer to the HW structure * * The media type is chosen reflecting few settings. * The following are taken into account: * - link mode set in the current port Init Control Word #3 * - current link mode settings in CSR register * - MDIO vs. I2C PHY control interface chosen * - SFP module media type **/ static s32 e1000_get_media_type_82575(struct e1000_hw *hw) { struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; s32 ret_val = E1000_SUCCESS; u32 ctrl_ext = 0; u32 link_mode = 0; /* Set internal phy as default */ dev_spec->sgmii_active = FALSE; dev_spec->module_plugged = FALSE; /* Get CSR setting */ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); /* extract link mode setting */ link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK; switch (link_mode) { case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: hw->phy.media_type = e1000_media_type_internal_serdes; break; case E1000_CTRL_EXT_LINK_MODE_GMII: hw->phy.media_type = e1000_media_type_copper; break; case E1000_CTRL_EXT_LINK_MODE_SGMII: /* Get phy control interface type set (MDIO vs. I2C)*/ if (e1000_sgmii_uses_mdio_82575(hw)) { hw->phy.media_type = e1000_media_type_copper; dev_spec->sgmii_active = TRUE; break; } /* fall through for I2C based SGMII */ case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: /* read media type from SFP EEPROM */ ret_val = e1000_set_sfp_media_type_82575(hw); if ((ret_val != E1000_SUCCESS) || (hw->phy.media_type == e1000_media_type_unknown)) { /* * If media type was not identified then return media * type defined by the CTRL_EXT settings. */ hw->phy.media_type = e1000_media_type_internal_serdes; if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) { hw->phy.media_type = e1000_media_type_copper; dev_spec->sgmii_active = TRUE; } break; } /* do not change link mode for 100BaseFX */ if (dev_spec->eth_flags.e100_base_fx) break; /* change current link mode setting */ ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK; if (hw->phy.media_type == e1000_media_type_copper) ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII; else ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); break; } return ret_val; } /** * e1000_set_sfp_media_type_82575 - derives SFP module media type. * @hw: pointer to the HW structure * * The media type is chosen based on SFP module. * compatibility flags retrieved from SFP ID EEPROM. **/ static s32 e1000_set_sfp_media_type_82575(struct e1000_hw *hw) { s32 ret_val = E1000_ERR_CONFIG; u32 ctrl_ext = 0; struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; struct sfp_e1000_flags *eth_flags = &dev_spec->eth_flags; u8 tranceiver_type = 0; s32 timeout = 3; /* Turn I2C interface ON and power on sfp cage */ ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA); E1000_WRITE_FLUSH(hw); /* Read SFP module data */ while (timeout) { ret_val = e1000_read_sfp_data_byte(hw, E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET), &tranceiver_type); if (ret_val == E1000_SUCCESS) break; msec_delay(100); timeout--; } if (ret_val != E1000_SUCCESS) goto out; ret_val = e1000_read_sfp_data_byte(hw, E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET), (u8 *)eth_flags); if (ret_val != E1000_SUCCESS) goto out; /* Check if there is some SFP module plugged and powered */ if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) || (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) { dev_spec->module_plugged = TRUE; if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) { hw->phy.media_type = e1000_media_type_internal_serdes; } else if (eth_flags->e100_base_fx) { dev_spec->sgmii_active = TRUE; hw->phy.media_type = e1000_media_type_internal_serdes; } else if (eth_flags->e1000_base_t) { dev_spec->sgmii_active = TRUE; hw->phy.media_type = e1000_media_type_copper; } else { hw->phy.media_type = e1000_media_type_unknown; DEBUGOUT("PHY module has not been recognized\n"); goto out; } } else { hw->phy.media_type = e1000_media_type_unknown; } ret_val = E1000_SUCCESS; out: /* Restore I2C interface setting */ E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); return ret_val; } /** * e1000_valid_led_default_82575 - Verify a valid default LED config * @hw: pointer to the HW structure * @data: pointer to the NVM (EEPROM) * * Read the EEPROM for the current default LED configuration. If the * LED configuration is not valid, set to a valid LED configuration. **/ static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data) { s32 ret_val; DEBUGFUNC("e1000_valid_led_default_82575"); ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); if (ret_val) { DEBUGOUT("NVM Read Error\n"); goto out; } if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { switch (hw->phy.media_type) { case e1000_media_type_internal_serdes: *data = ID_LED_DEFAULT_82575_SERDES; break; case e1000_media_type_copper: default: *data = ID_LED_DEFAULT; break; } } out: return ret_val; } /** * e1000_sgmii_active_82575 - Return sgmii state * @hw: pointer to the HW structure * * 82575 silicon has a serialized gigabit media independent interface (sgmii) * which can be enabled for use in the embedded applications. Simply * return the current state of the sgmii interface. **/ static bool e1000_sgmii_active_82575(struct e1000_hw *hw) { struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; return dev_spec->sgmii_active; } /** * e1000_reset_init_script_82575 - Inits HW defaults after reset * @hw: pointer to the HW structure * * Inits recommended HW defaults after a reset when there is no EEPROM * detected. This is only for the 82575. **/ static s32 e1000_reset_init_script_82575(struct e1000_hw *hw) { DEBUGFUNC("e1000_reset_init_script_82575"); if (hw->mac.type == e1000_82575) { DEBUGOUT("Running reset init script for 82575\n"); /* SerDes configuration via SERDESCTRL */ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x00, 0x0C); e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x01, 0x78); e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x1B, 0x23); e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x23, 0x15); /* CCM configuration via CCMCTL register */ e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x14, 0x00); e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x10, 0x00); /* PCIe lanes configuration */ e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x00, 0xEC); e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x61, 0xDF); e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x34, 0x05); e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x2F, 0x81); /* PCIe PLL Configuration */ e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x02, 0x47); e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x14, 0x00); e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x10, 0x00); } return E1000_SUCCESS; } /** * e1000_read_mac_addr_82575 - Read device MAC address * @hw: pointer to the HW structure **/ static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw) { s32 ret_val; DEBUGFUNC("e1000_read_mac_addr_82575"); /* * If there's an alternate MAC address place it in RAR0 * so that it will override the Si installed default perm * address. */ ret_val = e1000_check_alt_mac_addr_generic(hw); if (ret_val) goto out; ret_val = e1000_read_mac_addr_generic(hw); out: return ret_val; } /** * e1000_config_collision_dist_82575 - Configure collision distance * @hw: pointer to the HW structure * * Configures the collision distance to the default value and is used * during link setup. **/ static void e1000_config_collision_dist_82575(struct e1000_hw *hw) { u32 tctl_ext; DEBUGFUNC("e1000_config_collision_dist_82575"); tctl_ext = E1000_READ_REG(hw, E1000_TCTL_EXT); tctl_ext &= ~E1000_TCTL_EXT_COLD; tctl_ext |= E1000_COLLISION_DISTANCE << E1000_TCTL_EXT_COLD_SHIFT; E1000_WRITE_REG(hw, E1000_TCTL_EXT, tctl_ext); E1000_WRITE_FLUSH(hw); } /** * e1000_power_down_phy_copper_82575 - Remove link during PHY power down * @hw: pointer to the HW structure * * In the case of a PHY power down to save power, or to turn off link during a * driver unload, or wake on lan is not enabled, remove the link. **/ static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; if (!(phy->ops.check_reset_block)) return; /* If the management interface is not enabled, then power down */ if (!(e1000_enable_mng_pass_thru(hw) || phy->ops.check_reset_block(hw))) e1000_power_down_phy_copper(hw); return; } /** * e1000_clear_hw_cntrs_82575 - Clear device specific hardware counters * @hw: pointer to the HW structure * * Clears the hardware counters by reading the counter registers. **/ static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw) { DEBUGFUNC("e1000_clear_hw_cntrs_82575"); e1000_clear_hw_cntrs_base_generic(hw); E1000_READ_REG(hw, E1000_PRC64); E1000_READ_REG(hw, E1000_PRC127); E1000_READ_REG(hw, E1000_PRC255); E1000_READ_REG(hw, E1000_PRC511); E1000_READ_REG(hw, E1000_PRC1023); E1000_READ_REG(hw, E1000_PRC1522); E1000_READ_REG(hw, E1000_PTC64); E1000_READ_REG(hw, E1000_PTC127); E1000_READ_REG(hw, E1000_PTC255); E1000_READ_REG(hw, E1000_PTC511); E1000_READ_REG(hw, E1000_PTC1023); E1000_READ_REG(hw, E1000_PTC1522); E1000_READ_REG(hw, E1000_ALGNERRC); E1000_READ_REG(hw, E1000_RXERRC); E1000_READ_REG(hw, E1000_TNCRS); E1000_READ_REG(hw, E1000_CEXTERR); E1000_READ_REG(hw, E1000_TSCTC); E1000_READ_REG(hw, E1000_TSCTFC); E1000_READ_REG(hw, E1000_MGTPRC); E1000_READ_REG(hw, E1000_MGTPDC); E1000_READ_REG(hw, E1000_MGTPTC); E1000_READ_REG(hw, E1000_IAC); E1000_READ_REG(hw, E1000_ICRXOC); E1000_READ_REG(hw, E1000_ICRXPTC); E1000_READ_REG(hw, E1000_ICRXATC); E1000_READ_REG(hw, E1000_ICTXPTC); E1000_READ_REG(hw, E1000_ICTXATC); E1000_READ_REG(hw, E1000_ICTXQEC); E1000_READ_REG(hw, E1000_ICTXQMTC); E1000_READ_REG(hw, E1000_ICRXDMTC); E1000_READ_REG(hw, E1000_CBTMPC); E1000_READ_REG(hw, E1000_HTDPMC); E1000_READ_REG(hw, E1000_CBRMPC); E1000_READ_REG(hw, E1000_RPTHC); E1000_READ_REG(hw, E1000_HGPTC); E1000_READ_REG(hw, E1000_HTCBDPC); E1000_READ_REG(hw, E1000_HGORCL); E1000_READ_REG(hw, E1000_HGORCH); E1000_READ_REG(hw, E1000_HGOTCL); E1000_READ_REG(hw, E1000_HGOTCH); E1000_READ_REG(hw, E1000_LENERRS); /* This register should not be read in copper configurations */ if ((hw->phy.media_type == e1000_media_type_internal_serdes) || e1000_sgmii_active_82575(hw)) E1000_READ_REG(hw, E1000_SCVPC); } /** * e1000_rx_fifo_flush_82575 - Clean rx fifo after Rx enable * @hw: pointer to the HW structure * * After Rx enable, if manageability is enabled then there is likely some * bad data at the start of the fifo and possibly in the DMA fifo. This * function clears the fifos and flushes any packets that came in as rx was * being enabled. **/ void e1000_rx_fifo_flush_82575(struct e1000_hw *hw) { u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; int i, ms_wait; - DEBUGFUNC("e1000_rx_fifo_workaround_82575"); + DEBUGFUNC("e1000_rx_fifo_flush_82575"); + + /* disable IPv6 options as per hardware errata */ + rfctl = E1000_READ_REG(hw, E1000_RFCTL); + rfctl |= E1000_RFCTL_IPV6_EX_DIS; + E1000_WRITE_REG(hw, E1000_RFCTL, rfctl); + if (hw->mac.type != e1000_82575 || !(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN)) return; /* Disable all Rx queues */ for (i = 0; i < 4; i++) { rxdctl[i] = E1000_READ_REG(hw, E1000_RXDCTL(i)); E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE); } /* Poll all queues to verify they have shut down */ for (ms_wait = 0; ms_wait < 10; ms_wait++) { msec_delay(1); rx_enabled = 0; for (i = 0; i < 4; i++) rx_enabled |= E1000_READ_REG(hw, E1000_RXDCTL(i)); if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE)) break; } if (ms_wait == 10) DEBUGOUT("Queue disable timed out after 10ms\n"); /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all * incoming packets are rejected. Set enable and wait 2ms so that * any packet that was coming in as RCTL.EN was set is flushed */ - rfctl = E1000_READ_REG(hw, E1000_RFCTL); E1000_WRITE_REG(hw, E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF); rlpml = E1000_READ_REG(hw, E1000_RLPML); E1000_WRITE_REG(hw, E1000_RLPML, 0); rctl = E1000_READ_REG(hw, E1000_RCTL); temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP); temp_rctl |= E1000_RCTL_LPE; E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl); E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl | E1000_RCTL_EN); E1000_WRITE_FLUSH(hw); msec_delay(2); /* Enable Rx queues that were previously enabled and restore our * previous state */ for (i = 0; i < 4; i++) E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl[i]); E1000_WRITE_REG(hw, E1000_RCTL, rctl); E1000_WRITE_FLUSH(hw); E1000_WRITE_REG(hw, E1000_RLPML, rlpml); E1000_WRITE_REG(hw, E1000_RFCTL, rfctl); /* Flush receive errors generated by workaround */ E1000_READ_REG(hw, E1000_ROC); E1000_READ_REG(hw, E1000_RNBC); E1000_READ_REG(hw, E1000_MPC); } /** * e1000_set_pcie_completion_timeout - set pci-e completion timeout * @hw: pointer to the HW structure * * The defaults for 82575 and 82576 should be in the range of 50us to 50ms, * however the hardware default for these parts is 500us to 1ms which is less * than the 10ms recommended by the pci-e spec. To address this we need to * increase the value to either 10ms to 200ms for capability version 1 config, * or 16ms to 55ms for version 2. **/ static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw) { u32 gcr = E1000_READ_REG(hw, E1000_GCR); s32 ret_val = E1000_SUCCESS; u16 pcie_devctl2; /* only take action if timeout value is defaulted to 0 */ if (gcr & E1000_GCR_CMPL_TMOUT_MASK) goto out; /* * if capababilities version is type 1 we can write the * timeout of 10ms to 200ms through the GCR register */ if (!(gcr & E1000_GCR_CAP_VER2)) { gcr |= E1000_GCR_CMPL_TMOUT_10ms; goto out; } /* * for version 2 capabilities we need to write the config space * directly in order to set the completion timeout value for * 16ms to 55ms */ ret_val = e1000_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, &pcie_devctl2); if (ret_val) goto out; pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms; ret_val = e1000_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, &pcie_devctl2); out: /* disable completion timeout resend */ gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND; E1000_WRITE_REG(hw, E1000_GCR, gcr); return ret_val; } /** * e1000_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing * @hw: pointer to the hardware struct * @enable: state to enter, either enabled or disabled * @pf: Physical Function pool - do not set anti-spoofing for the PF * * enables/disables L2 switch anti-spoofing functionality. **/ void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) { u32 reg_val, reg_offset; switch (hw->mac.type) { case e1000_82576: reg_offset = E1000_DTXSWC; break; case e1000_i350: case e1000_i354: reg_offset = E1000_TXSWC; break; default: return; } reg_val = E1000_READ_REG(hw, reg_offset); if (enable) { reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK | E1000_DTXSWC_VLAN_SPOOF_MASK); /* The PF can spoof - it has to in order to * support emulation mode NICs */ reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS)); } else { reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | E1000_DTXSWC_VLAN_SPOOF_MASK); } E1000_WRITE_REG(hw, reg_offset, reg_val); } /** * e1000_vmdq_set_loopback_pf - enable or disable vmdq loopback * @hw: pointer to the hardware struct * @enable: state to enter, either enabled or disabled * * enables/disables L2 switch loopback functionality. **/ void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable) { u32 dtxswc; switch (hw->mac.type) { case e1000_82576: dtxswc = E1000_READ_REG(hw, E1000_DTXSWC); if (enable) dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; else dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc); break; case e1000_i350: case e1000_i354: dtxswc = E1000_READ_REG(hw, E1000_TXSWC); if (enable) dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; else dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; E1000_WRITE_REG(hw, E1000_TXSWC, dtxswc); break; default: /* Currently no other hardware supports loopback */ break; } } /** * e1000_vmdq_set_replication_pf - enable or disable vmdq replication * @hw: pointer to the hardware struct * @enable: state to enter, either enabled or disabled * * enables/disables replication of packets across multiple pools. **/ void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) { u32 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL); if (enable) vt_ctl |= E1000_VT_CTL_VM_REPL_EN; else vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN; E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl); } /** * e1000_read_phy_reg_82580 - Read 82580 MDI control register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Reads the MDI control register in the PHY at offset and stores the * information read to data. **/ static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) { s32 ret_val; DEBUGFUNC("e1000_read_phy_reg_82580"); ret_val = hw->phy.ops.acquire(hw); if (ret_val) goto out; ret_val = e1000_read_phy_reg_mdic(hw, offset, data); hw->phy.ops.release(hw); out: return ret_val; } /** * e1000_write_phy_reg_82580 - Write 82580 MDI control register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write to register at offset * * Writes data to MDI control register in the PHY at offset. **/ static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) { s32 ret_val; DEBUGFUNC("e1000_write_phy_reg_82580"); ret_val = hw->phy.ops.acquire(hw); if (ret_val) goto out; ret_val = e1000_write_phy_reg_mdic(hw, offset, data); hw->phy.ops.release(hw); out: return ret_val; } /** * e1000_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits * @hw: pointer to the HW structure * * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on * the values found in the EEPROM. This addresses an issue in which these * bits are not restored from EEPROM after reset. **/ static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw) { s32 ret_val = E1000_SUCCESS; u32 mdicnfg; u16 nvm_data = 0; DEBUGFUNC("e1000_reset_mdicnfg_82580"); if (hw->mac.type != e1000_82580) goto out; if (!e1000_sgmii_active_82575(hw)) goto out; ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, &nvm_data); if (ret_val) { DEBUGOUT("NVM Read Error\n"); goto out; } mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG); if (nvm_data & NVM_WORD24_EXT_MDIO) mdicnfg |= E1000_MDICNFG_EXT_MDIO; if (nvm_data & NVM_WORD24_COM_MDIO) mdicnfg |= E1000_MDICNFG_COM_MDIO; E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg); out: return ret_val; } /** * e1000_reset_hw_82580 - Reset hardware * @hw: pointer to the HW structure * * This resets function or entire device (all ports, etc.) * to a known state. **/ static s32 e1000_reset_hw_82580(struct e1000_hw *hw) { s32 ret_val = E1000_SUCCESS; /* BH SW mailbox bit in SW_FW_SYNC */ u16 swmbsw_mask = E1000_SW_SYNCH_MB; u32 ctrl; bool global_device_reset = hw->dev_spec._82575.global_device_reset; DEBUGFUNC("e1000_reset_hw_82580"); hw->dev_spec._82575.global_device_reset = FALSE; /* 82580 does not reliably do global_device_reset due to hw errata */ if (hw->mac.type == e1000_82580) global_device_reset = FALSE; /* Get current control state. */ ctrl = E1000_READ_REG(hw, E1000_CTRL); /* * Prevent the PCI-E bus from sticking if there is no TLP connection * on the last TLP read/write transaction when MAC is reset. */ ret_val = e1000_disable_pcie_master_generic(hw); if (ret_val) DEBUGOUT("PCI-E Master disable polling has failed.\n"); DEBUGOUT("Masking off all interrupts\n"); E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); E1000_WRITE_REG(hw, E1000_RCTL, 0); E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); E1000_WRITE_FLUSH(hw); msec_delay(10); /* Determine whether or not a global dev reset is requested */ if (global_device_reset && hw->mac.ops.acquire_swfw_sync(hw, swmbsw_mask)) global_device_reset = FALSE; if (global_device_reset && !(E1000_READ_REG(hw, E1000_STATUS) & E1000_STAT_DEV_RST_SET)) ctrl |= E1000_CTRL_DEV_RST; else ctrl |= E1000_CTRL_RST; E1000_WRITE_REG(hw, E1000_CTRL, ctrl); switch (hw->device_id) { case E1000_DEV_ID_DH89XXCC_SGMII: break; default: E1000_WRITE_FLUSH(hw); break; } /* Add delay to insure DEV_RST or RST has time to complete */ msec_delay(5); ret_val = e1000_get_auto_rd_done_generic(hw); if (ret_val) { /* * When auto config read does not complete, do not * return with an error. This can happen in situations * where there is no eeprom and prevents getting link. */ DEBUGOUT("Auto Read Done did not complete\n"); } /* clear global device reset status bit */ E1000_WRITE_REG(hw, E1000_STATUS, E1000_STAT_DEV_RST_SET); /* Clear any pending interrupt events. */ E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); E1000_READ_REG(hw, E1000_ICR); ret_val = e1000_reset_mdicnfg_82580(hw); if (ret_val) DEBUGOUT("Could not reset MDICNFG based on EEPROM\n"); /* Install any alternate MAC address into RAR0 */ ret_val = e1000_check_alt_mac_addr_generic(hw); /* Release semaphore */ if (global_device_reset) hw->mac.ops.release_swfw_sync(hw, swmbsw_mask); return ret_val; } /** * e1000_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual Rx PBA size * @data: data received by reading RXPBS register * * The 82580 uses a table based approach for packet buffer allocation sizes. * This function converts the retrieved value into the correct table value * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7 * 0x0 36 72 144 1 2 4 8 16 * 0x8 35 70 140 rsv rsv rsv rsv rsv */ u16 e1000_rxpbs_adjust_82580(u32 data) { u16 ret_val = 0; if (data < E1000_82580_RXPBS_TABLE_SIZE) ret_val = e1000_82580_rxpbs_table[data]; return ret_val; } /** * e1000_validate_nvm_checksum_with_offset - Validate EEPROM * checksum * @hw: pointer to the HW structure * @offset: offset in words of the checksum protected region * * Calculates the EEPROM checksum by reading/adding each word of the EEPROM * and then verifies that the sum of the EEPROM is equal to 0xBABA. **/ s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) { s32 ret_val = E1000_SUCCESS; u16 checksum = 0; u16 i, nvm_data; DEBUGFUNC("e1000_validate_nvm_checksum_with_offset"); for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) { ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); if (ret_val) { DEBUGOUT("NVM Read Error\n"); goto out; } checksum += nvm_data; } if (checksum != (u16) NVM_SUM) { DEBUGOUT("NVM Checksum Invalid\n"); ret_val = -E1000_ERR_NVM; goto out; } out: return ret_val; } /** * e1000_update_nvm_checksum_with_offset - Update EEPROM * checksum * @hw: pointer to the HW structure * @offset: offset in words of the checksum protected region * * Updates the EEPROM checksum by reading/adding each word of the EEPROM * up to the checksum. Then calculates the EEPROM checksum and writes the * value to the EEPROM. **/ s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) { s32 ret_val; u16 checksum = 0; u16 i, nvm_data; DEBUGFUNC("e1000_update_nvm_checksum_with_offset"); for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) { ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); if (ret_val) { DEBUGOUT("NVM Read Error while updating checksum.\n"); goto out; } checksum += nvm_data; } checksum = (u16) NVM_SUM - checksum; ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1, &checksum); if (ret_val) DEBUGOUT("NVM Write Error while updating checksum.\n"); out: return ret_val; } /** * e1000_validate_nvm_checksum_82580 - Validate EEPROM checksum * @hw: pointer to the HW structure * * Calculates the EEPROM section checksum by reading/adding each word of * the EEPROM and then verifies that the sum of the EEPROM is * equal to 0xBABA. **/ static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw) { s32 ret_val; u16 eeprom_regions_count = 1; u16 j, nvm_data; u16 nvm_offset; DEBUGFUNC("e1000_validate_nvm_checksum_82580"); ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); if (ret_val) { DEBUGOUT("NVM Read Error\n"); goto out; } if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) { /* if chekcsums compatibility bit is set validate checksums * for all 4 ports. */ eeprom_regions_count = 4; } for (j = 0; j < eeprom_regions_count; j++) { nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); ret_val = e1000_validate_nvm_checksum_with_offset(hw, nvm_offset); if (ret_val != E1000_SUCCESS) goto out; } out: return ret_val; } /** * e1000_update_nvm_checksum_82580 - Update EEPROM checksum * @hw: pointer to the HW structure * * Updates the EEPROM section checksums for all 4 ports by reading/adding * each word of the EEPROM up to the checksum. Then calculates the EEPROM * checksum and writes the value to the EEPROM. **/ static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw) { s32 ret_val; u16 j, nvm_data; u16 nvm_offset; DEBUGFUNC("e1000_update_nvm_checksum_82580"); ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); if (ret_val) { DEBUGOUT("NVM Read Error while updating checksum compatibility bit.\n"); goto out; } if (!(nvm_data & NVM_COMPATIBILITY_BIT_MASK)) { /* set compatibility bit to validate checksums appropriately */ nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK; ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); if (ret_val) { DEBUGOUT("NVM Write Error while updating checksum compatibility bit.\n"); goto out; } } for (j = 0; j < 4; j++) { nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset); if (ret_val) goto out; } out: return ret_val; } /** * e1000_validate_nvm_checksum_i350 - Validate EEPROM checksum * @hw: pointer to the HW structure * * Calculates the EEPROM section checksum by reading/adding each word of * the EEPROM and then verifies that the sum of the EEPROM is * equal to 0xBABA. **/ static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw) { s32 ret_val = E1000_SUCCESS; u16 j; u16 nvm_offset; DEBUGFUNC("e1000_validate_nvm_checksum_i350"); for (j = 0; j < 4; j++) { nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); ret_val = e1000_validate_nvm_checksum_with_offset(hw, nvm_offset); if (ret_val != E1000_SUCCESS) goto out; } out: return ret_val; } /** * e1000_update_nvm_checksum_i350 - Update EEPROM checksum * @hw: pointer to the HW structure * * Updates the EEPROM section checksums for all 4 ports by reading/adding * each word of the EEPROM up to the checksum. Then calculates the EEPROM * checksum and writes the value to the EEPROM. **/ static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw) { s32 ret_val = E1000_SUCCESS; u16 j; u16 nvm_offset; DEBUGFUNC("e1000_update_nvm_checksum_i350"); for (j = 0; j < 4; j++) { nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset); if (ret_val != E1000_SUCCESS) goto out; } out: return ret_val; } /** * __e1000_access_emi_reg - Read/write EMI register * @hw: pointer to the HW structure * @addr: EMI address to program * @data: pointer to value to read/write from/to the EMI address * @read: boolean flag to indicate read or write **/ static s32 __e1000_access_emi_reg(struct e1000_hw *hw, u16 address, u16 *data, bool read) { s32 ret_val; DEBUGFUNC("__e1000_access_emi_reg"); ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address); if (ret_val) return ret_val; if (read) ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data); else ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data); return ret_val; } /** * e1000_read_emi_reg - Read Extended Management Interface register * @hw: pointer to the HW structure * @addr: EMI address to program * @data: value to be read from the EMI address **/ s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data) { DEBUGFUNC("e1000_read_emi_reg"); return __e1000_access_emi_reg(hw, addr, data, TRUE); } /** * e1000_initialize_M88E1512_phy - Initialize M88E1512 PHY * @hw: pointer to the HW structure * - * Initialize Marverl 1512 to work correctly with Avoton. + * Initialize Marvell 1512 to work correctly with Avoton. **/ s32 e1000_initialize_M88E1512_phy(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val = E1000_SUCCESS; DEBUGFUNC("e1000_initialize_M88E1512_phy"); /* Check if this is correct PHY. */ if (phy->id != M88E1512_E_PHY_ID) goto out; /* Switch to PHY page 0xFF. */ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF); if (ret_val) goto out; ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B); if (ret_val) goto out; ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144); if (ret_val) goto out; ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28); if (ret_val) goto out; ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146); if (ret_val) goto out; ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233); if (ret_val) goto out; ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D); if (ret_val) goto out; ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xCC0C); if (ret_val) goto out; ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159); if (ret_val) goto out; /* Switch to PHY page 0xFB. */ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB); if (ret_val) goto out; ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0x000D); if (ret_val) goto out; /* Switch to PHY page 0x12. */ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12); if (ret_val) goto out; /* Change mode to SGMII-to-Copper */ ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001); if (ret_val) goto out; /* Return the PHY to page 0. */ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); if (ret_val) goto out; ret_val = phy->ops.commit(hw); if (ret_val) { DEBUGOUT("Error committing the PHY changes\n"); return ret_val; } msec_delay(1000); out: return ret_val; } /** + * e1000_initialize_M88E1543_phy - Initialize M88E1543 PHY + * @hw: pointer to the HW structure + * + * Initialize Marvell 1543 to work correctly with Avoton. + **/ +s32 e1000_initialize_M88E1543_phy(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_initialize_M88E1543_phy"); + + /* Check if this is correct PHY. */ + if (phy->id != M88E1543_E_PHY_ID) + goto out; + + /* Switch to PHY page 0xFF. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xDC0C); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159); + if (ret_val) + goto out; + + /* Switch to PHY page 0xFB. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0xC00D); + if (ret_val) + goto out; + + /* Switch to PHY page 0x12. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12); + if (ret_val) + goto out; + + /* Change mode to SGMII-to-Copper */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001); + if (ret_val) + goto out; + + /* Switch to PHY page 1. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x1); + if (ret_val) + goto out; + + /* Change mode to 1000BASE-X/SGMII and autoneg enable; reset */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_FIBER_CTRL, 0x9140); + if (ret_val) + goto out; + + /* Return the PHY to page 0. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); + if (ret_val) + goto out; + + ret_val = phy->ops.commit(hw); + if (ret_val) { + DEBUGOUT("Error committing the PHY changes\n"); + return ret_val; + } + + msec_delay(1000); +out: + return ret_val; +} + +/** * e1000_set_eee_i350 - Enable/disable EEE support * @hw: pointer to the HW structure + * @adv1g: boolean flag enabling 1G EEE advertisement + * @adv100m: boolean flag enabling 100M EEE advertisement * * Enable/disable EEE based on setting in dev_spec structure. * **/ -s32 e1000_set_eee_i350(struct e1000_hw *hw) +s32 e1000_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M) { u32 ipcnfg, eeer; DEBUGFUNC("e1000_set_eee_i350"); if ((hw->mac.type < e1000_i350) || (hw->phy.media_type != e1000_media_type_copper)) goto out; ipcnfg = E1000_READ_REG(hw, E1000_IPCNFG); eeer = E1000_READ_REG(hw, E1000_EEER); /* enable or disable per user setting */ if (!(hw->dev_spec._82575.eee_disable)) { u32 eee_su = E1000_READ_REG(hw, E1000_EEE_SU); - ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN); + if (adv100M) + ipcnfg |= E1000_IPCNFG_EEE_100M_AN; + else + ipcnfg &= ~E1000_IPCNFG_EEE_100M_AN; + + if (adv1G) + ipcnfg |= E1000_IPCNFG_EEE_1G_AN; + else + ipcnfg &= ~E1000_IPCNFG_EEE_1G_AN; + eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | E1000_EEER_LPI_FC); /* This bit should not be set in normal operation. */ if (eee_su & E1000_EEE_SU_LPI_CLK_STP) DEBUGOUT("LPI Clock Stop Bit should not be set!\n"); } else { ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN); eeer &= ~(E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | E1000_EEER_LPI_FC); } E1000_WRITE_REG(hw, E1000_IPCNFG, ipcnfg); E1000_WRITE_REG(hw, E1000_EEER, eeer); E1000_READ_REG(hw, E1000_IPCNFG); E1000_READ_REG(hw, E1000_EEER); out: return E1000_SUCCESS; } /** * e1000_set_eee_i354 - Enable/disable EEE support * @hw: pointer to the HW structure + * @adv1g: boolean flag enabling 1G EEE advertisement + * @adv100m: boolean flag enabling 100M EEE advertisement * * Enable/disable EEE legacy mode based on setting in dev_spec structure. * **/ -s32 e1000_set_eee_i354(struct e1000_hw *hw) +s32 e1000_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val = E1000_SUCCESS; u16 phy_data; DEBUGFUNC("e1000_set_eee_i354"); if ((hw->phy.media_type != e1000_media_type_copper) || ((phy->id != M88E1543_E_PHY_ID) && (phy->id != M88E1512_E_PHY_ID))) goto out; if (!hw->dev_spec._82575.eee_disable) { /* Switch to PHY page 18. */ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18); if (ret_val) goto out; ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1, &phy_data); if (ret_val) goto out; phy_data |= E1000_M88E1543_EEE_CTRL_1_MS; ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1, phy_data); if (ret_val) goto out; /* Return the PHY to page 0. */ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); if (ret_val) goto out; /* Turn on EEE advertisement. */ ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, E1000_EEE_ADV_DEV_I354, &phy_data); if (ret_val) goto out; - phy_data |= E1000_EEE_ADV_100_SUPPORTED | - E1000_EEE_ADV_1000_SUPPORTED; + if (adv100M) + phy_data |= E1000_EEE_ADV_100_SUPPORTED; + else + phy_data &= ~E1000_EEE_ADV_100_SUPPORTED; + + if (adv1G) + phy_data |= E1000_EEE_ADV_1000_SUPPORTED; + else + phy_data &= ~E1000_EEE_ADV_1000_SUPPORTED; + ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, E1000_EEE_ADV_DEV_I354, phy_data); } else { /* Turn off EEE advertisement. */ ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, E1000_EEE_ADV_DEV_I354, &phy_data); if (ret_val) goto out; phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED | E1000_EEE_ADV_1000_SUPPORTED); ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, E1000_EEE_ADV_DEV_I354, phy_data); } out: return ret_val; } /** * e1000_get_eee_status_i354 - Get EEE status * @hw: pointer to the HW structure * @status: EEE status * * Get EEE status by guessing based on whether Tx or Rx LPI indications have * been received. **/ s32 e1000_get_eee_status_i354(struct e1000_hw *hw, bool *status) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val = E1000_SUCCESS; u16 phy_data; DEBUGFUNC("e1000_get_eee_status_i354"); /* Check if EEE is supported on this device. */ if ((hw->phy.media_type != e1000_media_type_copper) || ((phy->id != M88E1543_E_PHY_ID) && (phy->id != M88E1512_E_PHY_ID))) goto out; ret_val = e1000_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354, E1000_PCS_STATUS_DEV_I354, &phy_data); if (ret_val) goto out; *status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD | E1000_PCS_STATUS_RX_LPI_RCVD) ? TRUE : FALSE; out: return ret_val; } /* Due to a hw errata, if the host tries to configure the VFTA register * while performing queries from the BMC or DMA, then the VFTA in some * cases won't be written. */ /** * e1000_clear_vfta_i350 - Clear VLAN filter table * @hw: pointer to the HW structure * * Clears the register array which contains the VLAN filter table by * setting all the values to 0. **/ void e1000_clear_vfta_i350(struct e1000_hw *hw) { u32 offset; int i; DEBUGFUNC("e1000_clear_vfta_350"); for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { for (i = 0; i < 10; i++) E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0); E1000_WRITE_FLUSH(hw); } } /** * e1000_write_vfta_i350 - Write value to VLAN filter table * @hw: pointer to the HW structure * @offset: register offset in VLAN filter table * @value: register value written to VLAN filter table * * Writes value at the given offset in the register array which stores * the VLAN filter table. **/ void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value) { int i; DEBUGFUNC("e1000_write_vfta_350"); for (i = 0; i < 10; i++) E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); E1000_WRITE_FLUSH(hw); } /** * e1000_set_i2c_bb - Enable I2C bit-bang * @hw: pointer to the HW structure * * Enable I2C bit-bang interface * **/ s32 e1000_set_i2c_bb(struct e1000_hw *hw) { s32 ret_val = E1000_SUCCESS; u32 ctrl_ext, i2cparams; DEBUGFUNC("e1000_set_i2c_bb"); ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); ctrl_ext |= E1000_CTRL_I2C_ENA; E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); E1000_WRITE_FLUSH(hw); i2cparams = E1000_READ_REG(hw, E1000_I2CPARAMS); i2cparams |= E1000_I2CBB_EN; i2cparams |= E1000_I2C_DATA_OE_N; i2cparams |= E1000_I2C_CLK_OE_N; E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cparams); E1000_WRITE_FLUSH(hw); return ret_val; } /** * e1000_read_i2c_byte_generic - Reads 8 bit word over I2C * @hw: pointer to hardware structure * @byte_offset: byte offset to read * @dev_addr: device address * @data: value read * * Performs byte read operation over I2C interface at * a specified device address. **/ s32 e1000_read_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr, u8 *data) { s32 status = E1000_SUCCESS; u32 max_retry = 10; u32 retry = 1; u16 swfw_mask = 0; bool nack = TRUE; DEBUGFUNC("e1000_read_i2c_byte_generic"); swfw_mask = E1000_SWFW_PHY0_SM; do { if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS) { status = E1000_ERR_SWFW_SYNC; goto read_byte_out; } e1000_i2c_start(hw); /* Device Address and write indication */ status = e1000_clock_out_i2c_byte(hw, dev_addr); if (status != E1000_SUCCESS) goto fail; status = e1000_get_i2c_ack(hw); if (status != E1000_SUCCESS) goto fail; status = e1000_clock_out_i2c_byte(hw, byte_offset); if (status != E1000_SUCCESS) goto fail; status = e1000_get_i2c_ack(hw); if (status != E1000_SUCCESS) goto fail; e1000_i2c_start(hw); /* Device Address and read indication */ status = e1000_clock_out_i2c_byte(hw, (dev_addr | 0x1)); if (status != E1000_SUCCESS) goto fail; status = e1000_get_i2c_ack(hw); if (status != E1000_SUCCESS) goto fail; status = e1000_clock_in_i2c_byte(hw, data); if (status != E1000_SUCCESS) goto fail; status = e1000_clock_out_i2c_bit(hw, nack); if (status != E1000_SUCCESS) goto fail; e1000_i2c_stop(hw); break; fail: hw->mac.ops.release_swfw_sync(hw, swfw_mask); msec_delay(100); e1000_i2c_bus_clear(hw); retry++; if (retry < max_retry) DEBUGOUT("I2C byte read error - Retrying.\n"); else DEBUGOUT("I2C byte read error.\n"); } while (retry < max_retry); hw->mac.ops.release_swfw_sync(hw, swfw_mask); read_byte_out: return status; } /** * e1000_write_i2c_byte_generic - Writes 8 bit word over I2C * @hw: pointer to hardware structure * @byte_offset: byte offset to write * @dev_addr: device address * @data: value to write * * Performs byte write operation over I2C interface at * a specified device address. **/ s32 e1000_write_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr, u8 data) { s32 status = E1000_SUCCESS; u32 max_retry = 1; u32 retry = 0; u16 swfw_mask = 0; DEBUGFUNC("e1000_write_i2c_byte_generic"); swfw_mask = E1000_SWFW_PHY0_SM; if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS) { status = E1000_ERR_SWFW_SYNC; goto write_byte_out; } do { e1000_i2c_start(hw); status = e1000_clock_out_i2c_byte(hw, dev_addr); if (status != E1000_SUCCESS) goto fail; status = e1000_get_i2c_ack(hw); if (status != E1000_SUCCESS) goto fail; status = e1000_clock_out_i2c_byte(hw, byte_offset); if (status != E1000_SUCCESS) goto fail; status = e1000_get_i2c_ack(hw); if (status != E1000_SUCCESS) goto fail; status = e1000_clock_out_i2c_byte(hw, data); if (status != E1000_SUCCESS) goto fail; status = e1000_get_i2c_ack(hw); if (status != E1000_SUCCESS) goto fail; e1000_i2c_stop(hw); break; fail: e1000_i2c_bus_clear(hw); retry++; if (retry < max_retry) DEBUGOUT("I2C byte write error - Retrying.\n"); else DEBUGOUT("I2C byte write error.\n"); } while (retry < max_retry); hw->mac.ops.release_swfw_sync(hw, swfw_mask); write_byte_out: return status; } /** * e1000_i2c_start - Sets I2C start condition * @hw: pointer to hardware structure * * Sets I2C start condition (High -> Low on SDA while SCL is High) **/ static void e1000_i2c_start(struct e1000_hw *hw) { u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); DEBUGFUNC("e1000_i2c_start"); /* Start condition must begin with data and clock high */ e1000_set_i2c_data(hw, &i2cctl, 1); e1000_raise_i2c_clk(hw, &i2cctl); /* Setup time for start condition (4.7us) */ usec_delay(E1000_I2C_T_SU_STA); e1000_set_i2c_data(hw, &i2cctl, 0); /* Hold time for start condition (4us) */ usec_delay(E1000_I2C_T_HD_STA); e1000_lower_i2c_clk(hw, &i2cctl); /* Minimum low period of clock is 4.7 us */ usec_delay(E1000_I2C_T_LOW); } /** * e1000_i2c_stop - Sets I2C stop condition * @hw: pointer to hardware structure * * Sets I2C stop condition (Low -> High on SDA while SCL is High) **/ static void e1000_i2c_stop(struct e1000_hw *hw) { u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); DEBUGFUNC("e1000_i2c_stop"); /* Stop condition must begin with data low and clock high */ e1000_set_i2c_data(hw, &i2cctl, 0); e1000_raise_i2c_clk(hw, &i2cctl); /* Setup time for stop condition (4us) */ usec_delay(E1000_I2C_T_SU_STO); e1000_set_i2c_data(hw, &i2cctl, 1); /* bus free time between stop and start (4.7us)*/ usec_delay(E1000_I2C_T_BUF); } /** * e1000_clock_in_i2c_byte - Clocks in one byte via I2C * @hw: pointer to hardware structure * @data: data byte to clock in * * Clocks in one byte data via I2C data/clock **/ static s32 e1000_clock_in_i2c_byte(struct e1000_hw *hw, u8 *data) { s32 i; bool bit = 0; DEBUGFUNC("e1000_clock_in_i2c_byte"); *data = 0; for (i = 7; i >= 0; i--) { e1000_clock_in_i2c_bit(hw, &bit); *data |= bit << i; } return E1000_SUCCESS; } /** * e1000_clock_out_i2c_byte - Clocks out one byte via I2C * @hw: pointer to hardware structure * @data: data byte clocked out * * Clocks out one byte data via I2C data/clock **/ static s32 e1000_clock_out_i2c_byte(struct e1000_hw *hw, u8 data) { s32 status = E1000_SUCCESS; s32 i; u32 i2cctl; bool bit = 0; DEBUGFUNC("e1000_clock_out_i2c_byte"); for (i = 7; i >= 0; i--) { bit = (data >> i) & 0x1; status = e1000_clock_out_i2c_bit(hw, bit); if (status != E1000_SUCCESS) break; } /* Release SDA line (set high) */ i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); i2cctl |= E1000_I2C_DATA_OE_N; E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cctl); E1000_WRITE_FLUSH(hw); return status; } /** * e1000_get_i2c_ack - Polls for I2C ACK * @hw: pointer to hardware structure * * Clocks in/out one bit via I2C data/clock **/ static s32 e1000_get_i2c_ack(struct e1000_hw *hw) { s32 status = E1000_SUCCESS; u32 i = 0; u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); u32 timeout = 10; bool ack = TRUE; DEBUGFUNC("e1000_get_i2c_ack"); e1000_raise_i2c_clk(hw, &i2cctl); /* Minimum high period of clock is 4us */ usec_delay(E1000_I2C_T_HIGH); /* Wait until SCL returns high */ for (i = 0; i < timeout; i++) { usec_delay(1); i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); if (i2cctl & E1000_I2C_CLK_IN) break; } if (!(i2cctl & E1000_I2C_CLK_IN)) return E1000_ERR_I2C; ack = e1000_get_i2c_data(&i2cctl); if (ack) { DEBUGOUT("I2C ack was not received.\n"); status = E1000_ERR_I2C; } e1000_lower_i2c_clk(hw, &i2cctl); /* Minimum low period of clock is 4.7 us */ usec_delay(E1000_I2C_T_LOW); return status; } /** * e1000_clock_in_i2c_bit - Clocks in one bit via I2C data/clock * @hw: pointer to hardware structure * @data: read data value * * Clocks in one bit via I2C data/clock **/ static s32 e1000_clock_in_i2c_bit(struct e1000_hw *hw, bool *data) { u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); DEBUGFUNC("e1000_clock_in_i2c_bit"); e1000_raise_i2c_clk(hw, &i2cctl); /* Minimum high period of clock is 4us */ usec_delay(E1000_I2C_T_HIGH); i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); *data = e1000_get_i2c_data(&i2cctl); e1000_lower_i2c_clk(hw, &i2cctl); /* Minimum low period of clock is 4.7 us */ usec_delay(E1000_I2C_T_LOW); return E1000_SUCCESS; } /** * e1000_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock * @hw: pointer to hardware structure * @data: data value to write * * Clocks out one bit via I2C data/clock **/ static s32 e1000_clock_out_i2c_bit(struct e1000_hw *hw, bool data) { s32 status; u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); DEBUGFUNC("e1000_clock_out_i2c_bit"); status = e1000_set_i2c_data(hw, &i2cctl, data); if (status == E1000_SUCCESS) { e1000_raise_i2c_clk(hw, &i2cctl); /* Minimum high period of clock is 4us */ usec_delay(E1000_I2C_T_HIGH); e1000_lower_i2c_clk(hw, &i2cctl); /* Minimum low period of clock is 4.7 us. * This also takes care of the data hold time. */ usec_delay(E1000_I2C_T_LOW); } else { status = E1000_ERR_I2C; DEBUGOUT1("I2C data was not set to %X\n", data); } return status; } /** * e1000_raise_i2c_clk - Raises the I2C SCL clock * @hw: pointer to hardware structure * @i2cctl: Current value of I2CCTL register * * Raises the I2C clock line '0'->'1' **/ static void e1000_raise_i2c_clk(struct e1000_hw *hw, u32 *i2cctl) { DEBUGFUNC("e1000_raise_i2c_clk"); *i2cctl |= E1000_I2C_CLK_OUT; *i2cctl &= ~E1000_I2C_CLK_OE_N; E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl); E1000_WRITE_FLUSH(hw); /* SCL rise time (1000ns) */ usec_delay(E1000_I2C_T_RISE); } /** * e1000_lower_i2c_clk - Lowers the I2C SCL clock * @hw: pointer to hardware structure * @i2cctl: Current value of I2CCTL register * * Lowers the I2C clock line '1'->'0' **/ static void e1000_lower_i2c_clk(struct e1000_hw *hw, u32 *i2cctl) { DEBUGFUNC("e1000_lower_i2c_clk"); *i2cctl &= ~E1000_I2C_CLK_OUT; *i2cctl &= ~E1000_I2C_CLK_OE_N; E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl); E1000_WRITE_FLUSH(hw); /* SCL fall time (300ns) */ usec_delay(E1000_I2C_T_FALL); } /** * e1000_set_i2c_data - Sets the I2C data bit * @hw: pointer to hardware structure * @i2cctl: Current value of I2CCTL register * @data: I2C data value (0 or 1) to set * * Sets the I2C data bit **/ static s32 e1000_set_i2c_data(struct e1000_hw *hw, u32 *i2cctl, bool data) { s32 status = E1000_SUCCESS; DEBUGFUNC("e1000_set_i2c_data"); if (data) *i2cctl |= E1000_I2C_DATA_OUT; else *i2cctl &= ~E1000_I2C_DATA_OUT; *i2cctl &= ~E1000_I2C_DATA_OE_N; *i2cctl |= E1000_I2C_CLK_OE_N; E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl); E1000_WRITE_FLUSH(hw); /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ usec_delay(E1000_I2C_T_RISE + E1000_I2C_T_FALL + E1000_I2C_T_SU_DATA); *i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); if (data != e1000_get_i2c_data(i2cctl)) { status = E1000_ERR_I2C; DEBUGOUT1("Error - I2C data was not set to %X.\n", data); } return status; } /** * e1000_get_i2c_data - Reads the I2C SDA data bit * @hw: pointer to hardware structure * @i2cctl: Current value of I2CCTL register * * Returns the I2C data bit value **/ static bool e1000_get_i2c_data(u32 *i2cctl) { bool data; DEBUGFUNC("e1000_get_i2c_data"); if (*i2cctl & E1000_I2C_DATA_IN) data = 1; else data = 0; return data; } /** * e1000_i2c_bus_clear - Clears the I2C bus * @hw: pointer to hardware structure * * Clears the I2C bus by sending nine clock pulses. * Used when data line is stuck low. **/ void e1000_i2c_bus_clear(struct e1000_hw *hw) { u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); u32 i; DEBUGFUNC("e1000_i2c_bus_clear"); e1000_i2c_start(hw); e1000_set_i2c_data(hw, &i2cctl, 1); for (i = 0; i < 9; i++) { e1000_raise_i2c_clk(hw, &i2cctl); /* Min high period of clock is 4us */ usec_delay(E1000_I2C_T_HIGH); e1000_lower_i2c_clk(hw, &i2cctl); /* Min low period of clock is 4.7us*/ usec_delay(E1000_I2C_T_LOW); } e1000_i2c_start(hw); /* Put the i2c bus back to default state */ e1000_i2c_stop(hw); } Index: stable/10/sys/dev/e1000/e1000_82575.h =================================================================== --- stable/10/sys/dev/e1000/e1000_82575.h (revision 296054) +++ stable/10/sys/dev/e1000/e1000_82575.h (revision 296055) @@ -1,521 +1,522 @@ /****************************************************************************** Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _E1000_82575_H_ #define _E1000_82575_H_ #define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \ (ID_LED_DEF1_DEF2 << 8) | \ (ID_LED_DEF1_DEF2 << 4) | \ (ID_LED_OFF1_ON2)) /* * Receive Address Register Count * Number of high/low register pairs in the RAR. The RAR (Receive Address * Registers) holds the directed and multicast addresses that we monitor. * These entries are also used for MAC-based filtering. */ /* * For 82576, there are an additional set of RARs that begin at an offset * separate from the first set of RARs. */ #define E1000_RAR_ENTRIES_82575 16 #define E1000_RAR_ENTRIES_82576 24 #define E1000_RAR_ENTRIES_82580 24 #define E1000_RAR_ENTRIES_I350 32 #define E1000_SW_SYNCH_MB 0x00000100 #define E1000_STAT_DEV_RST_SET 0x00100000 #define E1000_CTRL_DEV_RST 0x20000000 #ifdef E1000_BIT_FIELDS struct e1000_adv_data_desc { __le64 buffer_addr; /* Address of the descriptor's data buffer */ union { u32 data; struct { u32 datalen:16; /* Data buffer length */ u32 rsvd:4; u32 dtyp:4; /* Descriptor type */ u32 dcmd:8; /* Descriptor command */ } config; } lower; union { u32 data; struct { u32 status:4; /* Descriptor status */ u32 idx:4; u32 popts:6; /* Packet Options */ u32 paylen:18; /* Payload length */ } options; } upper; }; #define E1000_TXD_DTYP_ADV_C 0x2 /* Advanced Context Descriptor */ #define E1000_TXD_DTYP_ADV_D 0x3 /* Advanced Data Descriptor */ #define E1000_ADV_TXD_CMD_DEXT 0x20 /* Descriptor extension (0 = legacy) */ #define E1000_ADV_TUCMD_IPV4 0x2 /* IP Packet Type: 1=IPv4 */ #define E1000_ADV_TUCMD_IPV6 0x0 /* IP Packet Type: 0=IPv6 */ #define E1000_ADV_TUCMD_L4T_UDP 0x0 /* L4 Packet TYPE of UDP */ #define E1000_ADV_TUCMD_L4T_TCP 0x4 /* L4 Packet TYPE of TCP */ #define E1000_ADV_TUCMD_MKRREQ 0x10 /* Indicates markers are required */ #define E1000_ADV_DCMD_EOP 0x1 /* End of Packet */ #define E1000_ADV_DCMD_IFCS 0x2 /* Insert FCS (Ethernet CRC) */ #define E1000_ADV_DCMD_RS 0x8 /* Report Status */ #define E1000_ADV_DCMD_VLE 0x40 /* Add VLAN tag */ #define E1000_ADV_DCMD_TSE 0x80 /* TCP Seg enable */ /* Extended Device Control */ #define E1000_CTRL_EXT_NSICR 0x00000001 /* Disable Intr Clear all on read */ struct e1000_adv_context_desc { union { u32 ip_config; struct { u32 iplen:9; u32 maclen:7; u32 vlan_tag:16; } fields; } ip_setup; u32 seq_num; union { u64 l4_config; struct { u32 mkrloc:9; u32 tucmd:11; u32 dtyp:4; u32 adv:8; u32 rsvd:4; u32 idx:4; u32 l4len:8; u32 mss:16; } fields; } l4_setup; }; #endif /* SRRCTL bit definitions */ #define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ #define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 #define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ #define E1000_SRRCTL_DESCTYPE_LEGACY 0x00000000 #define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 #define E1000_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 #define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 #define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION 0x06000000 #define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 #define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000 #define E1000_SRRCTL_TIMESTAMP 0x40000000 #define E1000_SRRCTL_DROP_EN 0x80000000 #define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F #define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00 #define E1000_TX_HEAD_WB_ENABLE 0x1 #define E1000_TX_SEQNUM_WB_ENABLE 0x2 #define E1000_MRQC_ENABLE_RSS_4Q 0x00000002 #define E1000_MRQC_ENABLE_VMDQ 0x00000003 #define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005 #define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 #define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 #define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000 #define E1000_MRQC_ENABLE_RSS_8Q 0x00000002 #define E1000_VMRCTL_MIRROR_PORT_SHIFT 8 #define E1000_VMRCTL_MIRROR_DSTPORT_MASK (7 << \ E1000_VMRCTL_MIRROR_PORT_SHIFT) #define E1000_VMRCTL_POOL_MIRROR_ENABLE (1 << 0) #define E1000_VMRCTL_UPLINK_MIRROR_ENABLE (1 << 1) #define E1000_VMRCTL_DOWNLINK_MIRROR_ENABLE (1 << 2) #define E1000_EICR_TX_QUEUE ( \ E1000_EICR_TX_QUEUE0 | \ E1000_EICR_TX_QUEUE1 | \ E1000_EICR_TX_QUEUE2 | \ E1000_EICR_TX_QUEUE3) #define E1000_EICR_RX_QUEUE ( \ E1000_EICR_RX_QUEUE0 | \ E1000_EICR_RX_QUEUE1 | \ E1000_EICR_RX_QUEUE2 | \ E1000_EICR_RX_QUEUE3) #define E1000_EIMS_RX_QUEUE E1000_EICR_RX_QUEUE #define E1000_EIMS_TX_QUEUE E1000_EICR_TX_QUEUE #define EIMS_ENABLE_MASK ( \ E1000_EIMS_RX_QUEUE | \ E1000_EIMS_TX_QUEUE | \ E1000_EIMS_TCP_TIMER | \ E1000_EIMS_OTHER) /* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ #define E1000_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ #define E1000_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */ #define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ #define E1000_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */ #define E1000_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */ #define E1000_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */ #define E1000_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */ #define E1000_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */ #define E1000_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */ #define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */ /* Receive Descriptor - Advanced */ union e1000_adv_rx_desc { struct { __le64 pkt_addr; /* Packet buffer address */ __le64 hdr_addr; /* Header buffer address */ } read; struct { struct { union { __le32 data; struct { __le16 pkt_info; /*RSS type, Pkt type*/ /* Split Header, header buffer len */ __le16 hdr_info; } hs_rss; } lo_dword; union { __le32 rss; /* RSS Hash */ struct { __le16 ip_id; /* IP id */ __le16 csum; /* Packet Checksum */ } csum_ip; } hi_dword; } lower; struct { __le32 status_error; /* ext status/error */ __le16 length; /* Packet length */ __le16 vlan; /* VLAN tag */ } upper; } wb; /* writeback */ }; #define E1000_RXDADV_RSSTYPE_MASK 0x0000000F #define E1000_RXDADV_RSSTYPE_SHIFT 12 #define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 #define E1000_RXDADV_HDRBUFLEN_SHIFT 5 #define E1000_RXDADV_SPLITHEADER_EN 0x00001000 #define E1000_RXDADV_SPH 0x8000 #define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */ #define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */ #define E1000_RXDADV_ERR_HBO 0x00800000 /* RSS Hash results */ #define E1000_RXDADV_RSSTYPE_NONE 0x00000000 #define E1000_RXDADV_RSSTYPE_IPV4_TCP 0x00000001 #define E1000_RXDADV_RSSTYPE_IPV4 0x00000002 #define E1000_RXDADV_RSSTYPE_IPV6_TCP 0x00000003 #define E1000_RXDADV_RSSTYPE_IPV6_EX 0x00000004 #define E1000_RXDADV_RSSTYPE_IPV6 0x00000005 #define E1000_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006 #define E1000_RXDADV_RSSTYPE_IPV4_UDP 0x00000007 #define E1000_RXDADV_RSSTYPE_IPV6_UDP 0x00000008 #define E1000_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009 /* RSS Packet Types as indicated in the receive descriptor */ #define E1000_RXDADV_PKTTYPE_ILMASK 0x000000F0 #define E1000_RXDADV_PKTTYPE_TLMASK 0x00000F00 #define E1000_RXDADV_PKTTYPE_NONE 0x00000000 #define E1000_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPV4 hdr present */ #define E1000_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPV4 hdr + extensions */ #define E1000_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPV6 hdr present */ #define E1000_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPV6 hdr + extensions */ #define E1000_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */ #define E1000_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ #define E1000_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ #define E1000_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ #define E1000_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */ #define E1000_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */ #define E1000_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */ #define E1000_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */ #define E1000_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */ #define E1000_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */ /* LinkSec results */ /* Security Processing bit Indication */ #define E1000_RXDADV_LNKSEC_STATUS_SECP 0x00020000 #define E1000_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000 #define E1000_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000 #define E1000_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000 #define E1000_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000 #define E1000_RXDADV_IPSEC_STATUS_SECP 0x00020000 #define E1000_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000 #define E1000_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000 #define E1000_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000 #define E1000_RXDADV_IPSEC_ERROR_AUTHENTICATION_FAILED 0x18000000 /* Transmit Descriptor - Advanced */ union e1000_adv_tx_desc { struct { __le64 buffer_addr; /* Address of descriptor's data buf */ __le32 cmd_type_len; __le32 olinfo_status; } read; struct { __le64 rsvd; /* Reserved */ __le32 nxtseq_seed; __le32 status; } wb; }; /* Adv Transmit Descriptor Config Masks */ #define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ #define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ #define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ #define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ #define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ #define E1000_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ #define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ #define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ #define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ #define E1000_ADVTXD_MAC_LINKSEC 0x00040000 /* Apply LinkSec on pkt */ #define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp pkt */ #define E1000_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED prsnt in WB */ #define E1000_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ #define E1000_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ #define E1000_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ #define E1000_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ /* 1st & Last TSO-full iSCSI PDU*/ #define E1000_ADVTXD_POPTS_ISCO_FULL 0x00001800 #define E1000_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ #define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ /* Context descriptors */ struct e1000_adv_tx_context_desc { __le32 vlan_macip_lens; __le32 seqnum_seed; __le32 type_tucmd_mlhl; __le32 mss_l4len_idx; }; #define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ #define E1000_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ #define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ #define E1000_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ #define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ #define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ #define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ #define E1000_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ /* IPSec Encrypt Enable for ESP */ #define E1000_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000 /* Req requires Markers and CRC */ #define E1000_ADVTXD_TUCMD_MKRREQ 0x00002000 #define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ #define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ /* Adv ctxt IPSec SA IDX mask */ #define E1000_ADVTXD_IPSEC_SA_INDEX_MASK 0x000000FF /* Adv ctxt IPSec ESP len mask */ #define E1000_ADVTXD_IPSEC_ESP_LEN_MASK 0x000000FF /* Additional Transmit Descriptor Control definitions */ #define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */ #define E1000_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wbk flushing */ /* Tx Queue Arbitration Priority 0=low, 1=high */ #define E1000_TXDCTL_PRIORITY 0x08000000 /* Additional Receive Descriptor Control definitions */ #define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */ #define E1000_RXDCTL_SWFLSH 0x04000000 /* Rx Desc. wbk flushing */ /* Direct Cache Access (DCA) definitions */ #define E1000_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */ #define E1000_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */ #define E1000_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */ #define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ #define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ #define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ #define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header ena */ #define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload ena */ #define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx Desc Relax Order */ #define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ #define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ #define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ #define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ #define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ #define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */ #define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */ #define E1000_DCA_TXCTRL_CPUID_SHIFT_82576 24 /* Tx CPUID */ #define E1000_DCA_RXCTRL_CPUID_SHIFT_82576 24 /* Rx CPUID */ /* Additional interrupt register bit definitions */ #define E1000_ICR_LSECPNS 0x00000020 /* PN threshold - server */ #define E1000_IMS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */ #define E1000_ICS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */ /* ETQF register bit definitions */ #define E1000_ETQF_FILTER_ENABLE (1 << 26) #define E1000_ETQF_IMM_INT (1 << 29) #define E1000_ETQF_1588 (1 << 30) #define E1000_ETQF_QUEUE_ENABLE (1 << 31) /* * ETQF filter list: one static filter per filter consumer. This is * to avoid filter collisions later. Add new filters * here!! * * Current filters: * EAPOL 802.1x (0x888e): Filter 0 */ #define E1000_ETQF_FILTER_EAPOL 0 #define E1000_FTQF_VF_BP 0x00008000 #define E1000_FTQF_1588_TIME_STAMP 0x08000000 #define E1000_FTQF_MASK 0xF0000000 #define E1000_FTQF_MASK_PROTO_BP 0x10000000 #define E1000_FTQF_MASK_SOURCE_ADDR_BP 0x20000000 #define E1000_FTQF_MASK_DEST_ADDR_BP 0x40000000 #define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000 #define E1000_NVM_APME_82575 0x0400 #define MAX_NUM_VFS 7 #define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof cntrl */ #define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof cntrl */ #define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */ #define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8 #define E1000_DTXSWC_LLE_SHIFT 16 #define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */ /* Easy defines for setting default pool, would normally be left a zero */ #define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7 #define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT) /* Other useful VMD_CTL register defines */ #define E1000_VT_CTL_IGNORE_MAC (1 << 28) #define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29) #define E1000_VT_CTL_VM_REPL_EN (1 << 30) /* Per VM Offload register setup */ #define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */ #define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */ #define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */ #define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */ #define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */ #define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */ #define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */ #define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */ #define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ #define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */ #define E1000_VMOLR_VPE 0x00800000 /* VLAN promiscuous enable */ #define E1000_VMOLR_UPE 0x20000000 /* Unicast promisuous enable */ #define E1000_DVMOLR_HIDVLAN 0x20000000 /* Vlan hiding enable */ #define E1000_DVMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ #define E1000_DVMOLR_STRCRC 0x80000000 /* CRC stripping enable */ #define E1000_PBRWAC_WALPB 0x00000007 /* Wrap around event on LAN Rx PB */ #define E1000_PBRWAC_PBE 0x00000008 /* Rx packet buffer empty */ #define E1000_VLVF_ARRAY_SIZE 32 #define E1000_VLVF_VLANID_MASK 0x00000FFF #define E1000_VLVF_POOLSEL_SHIFT 12 #define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT) #define E1000_VLVF_LVLAN 0x00100000 #define E1000_VLVF_VLANID_ENABLE 0x80000000 #define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ #define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ #define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ #define E1000_IOVCTL 0x05BBC #define E1000_IOVCTL_REUSE_VFQ 0x00000001 #define E1000_RPLOLR_STRVLAN 0x40000000 #define E1000_RPLOLR_STRCRC 0x80000000 #define E1000_TCTL_EXT_COLD 0x000FFC00 #define E1000_TCTL_EXT_COLD_SHIFT 10 #define E1000_DTXCTL_8023LL 0x0004 #define E1000_DTXCTL_VLAN_ADDED 0x0008 #define E1000_DTXCTL_OOS_ENABLE 0x0010 #define E1000_DTXCTL_MDP_EN 0x0020 #define E1000_DTXCTL_SPOOF_INT 0x0040 #define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT (1 << 14) #define ALL_QUEUES 0xFFFF /* Rx packet buffer size defines */ #define E1000_RXPBS_SIZE_MASK_82576 0x0000007F void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable); void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf); void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable); s32 e1000_init_nvm_params_82575(struct e1000_hw *hw); s32 e1000_init_hw_82575(struct e1000_hw *hw); enum e1000_promisc_type { e1000_promisc_disabled = 0, /* all promisc modes disabled */ e1000_promisc_unicast = 1, /* unicast promiscuous enabled */ e1000_promisc_multicast = 2, /* multicast promiscuous enabled */ e1000_promisc_enabled = 3, /* both uni and multicast promisc */ e1000_num_promisc_types }; void e1000_vfta_set_vf(struct e1000_hw *, u16, bool); void e1000_rlpml_set_vf(struct e1000_hw *, u16); s32 e1000_promisc_set_vf(struct e1000_hw *, enum e1000_promisc_type type); u16 e1000_rxpbs_adjust_82580(u32 data); s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data); -s32 e1000_set_eee_i350(struct e1000_hw *); -s32 e1000_set_eee_i354(struct e1000_hw *); +s32 e1000_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M); +s32 e1000_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M); s32 e1000_get_eee_status_i354(struct e1000_hw *, bool *); s32 e1000_initialize_M88E1512_phy(struct e1000_hw *hw); +s32 e1000_initialize_M88E1543_phy(struct e1000_hw *hw); /* I2C SDA and SCL timing parameters for standard mode */ #define E1000_I2C_T_HD_STA 4 #define E1000_I2C_T_LOW 5 #define E1000_I2C_T_HIGH 4 #define E1000_I2C_T_SU_STA 5 #define E1000_I2C_T_HD_DATA 5 #define E1000_I2C_T_SU_DATA 1 #define E1000_I2C_T_RISE 1 #define E1000_I2C_T_FALL 1 #define E1000_I2C_T_SU_STO 4 #define E1000_I2C_T_BUF 5 s32 e1000_set_i2c_bb(struct e1000_hw *hw); s32 e1000_read_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr, u8 *data); s32 e1000_write_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr, u8 data); void e1000_i2c_bus_clear(struct e1000_hw *hw); #endif /* _E1000_82575_H_ */ Index: stable/10/sys/dev/e1000/e1000_api.c =================================================================== --- stable/10/sys/dev/e1000/e1000_api.c (revision 296054) +++ stable/10/sys/dev/e1000/e1000_api.c (revision 296055) @@ -1,1364 +1,1372 @@ /****************************************************************************** Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "e1000_api.h" /** * e1000_init_mac_params - Initialize MAC function pointers * @hw: pointer to the HW structure * * This function initializes the function pointers for the MAC * set of functions. Called by drivers or by e1000_setup_init_funcs. **/ s32 e1000_init_mac_params(struct e1000_hw *hw) { s32 ret_val = E1000_SUCCESS; if (hw->mac.ops.init_params) { ret_val = hw->mac.ops.init_params(hw); if (ret_val) { DEBUGOUT("MAC Initialization Error\n"); goto out; } } else { DEBUGOUT("mac.init_mac_params was NULL\n"); ret_val = -E1000_ERR_CONFIG; } out: return ret_val; } /** * e1000_init_nvm_params - Initialize NVM function pointers * @hw: pointer to the HW structure * * This function initializes the function pointers for the NVM * set of functions. Called by drivers or by e1000_setup_init_funcs. **/ s32 e1000_init_nvm_params(struct e1000_hw *hw) { s32 ret_val = E1000_SUCCESS; if (hw->nvm.ops.init_params) { ret_val = hw->nvm.ops.init_params(hw); if (ret_val) { DEBUGOUT("NVM Initialization Error\n"); goto out; } } else { DEBUGOUT("nvm.init_nvm_params was NULL\n"); ret_val = -E1000_ERR_CONFIG; } out: return ret_val; } /** * e1000_init_phy_params - Initialize PHY function pointers * @hw: pointer to the HW structure * * This function initializes the function pointers for the PHY * set of functions. Called by drivers or by e1000_setup_init_funcs. **/ s32 e1000_init_phy_params(struct e1000_hw *hw) { s32 ret_val = E1000_SUCCESS; if (hw->phy.ops.init_params) { ret_val = hw->phy.ops.init_params(hw); if (ret_val) { DEBUGOUT("PHY Initialization Error\n"); goto out; } } else { DEBUGOUT("phy.init_phy_params was NULL\n"); ret_val = -E1000_ERR_CONFIG; } out: return ret_val; } /** * e1000_init_mbx_params - Initialize mailbox function pointers * @hw: pointer to the HW structure * * This function initializes the function pointers for the PHY * set of functions. Called by drivers or by e1000_setup_init_funcs. **/ s32 e1000_init_mbx_params(struct e1000_hw *hw) { s32 ret_val = E1000_SUCCESS; if (hw->mbx.ops.init_params) { ret_val = hw->mbx.ops.init_params(hw); if (ret_val) { DEBUGOUT("Mailbox Initialization Error\n"); goto out; } } else { DEBUGOUT("mbx.init_mbx_params was NULL\n"); ret_val = -E1000_ERR_CONFIG; } out: return ret_val; } /** * e1000_set_mac_type - Sets MAC type * @hw: pointer to the HW structure * * This function sets the mac type of the adapter based on the * device ID stored in the hw structure. * MUST BE FIRST FUNCTION CALLED (explicitly or through * e1000_setup_init_funcs()). **/ s32 e1000_set_mac_type(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; s32 ret_val = E1000_SUCCESS; DEBUGFUNC("e1000_set_mac_type"); switch (hw->device_id) { case E1000_DEV_ID_82542: mac->type = e1000_82542; break; case E1000_DEV_ID_82543GC_FIBER: case E1000_DEV_ID_82543GC_COPPER: mac->type = e1000_82543; break; case E1000_DEV_ID_82544EI_COPPER: case E1000_DEV_ID_82544EI_FIBER: case E1000_DEV_ID_82544GC_COPPER: case E1000_DEV_ID_82544GC_LOM: mac->type = e1000_82544; break; case E1000_DEV_ID_82540EM: case E1000_DEV_ID_82540EM_LOM: case E1000_DEV_ID_82540EP: case E1000_DEV_ID_82540EP_LOM: case E1000_DEV_ID_82540EP_LP: mac->type = e1000_82540; break; case E1000_DEV_ID_82545EM_COPPER: case E1000_DEV_ID_82545EM_FIBER: mac->type = e1000_82545; break; case E1000_DEV_ID_82545GM_COPPER: case E1000_DEV_ID_82545GM_FIBER: case E1000_DEV_ID_82545GM_SERDES: mac->type = e1000_82545_rev_3; break; case E1000_DEV_ID_82546EB_COPPER: case E1000_DEV_ID_82546EB_FIBER: case E1000_DEV_ID_82546EB_QUAD_COPPER: mac->type = e1000_82546; break; case E1000_DEV_ID_82546GB_COPPER: case E1000_DEV_ID_82546GB_FIBER: case E1000_DEV_ID_82546GB_SERDES: case E1000_DEV_ID_82546GB_PCIE: case E1000_DEV_ID_82546GB_QUAD_COPPER: case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: mac->type = e1000_82546_rev_3; break; case E1000_DEV_ID_82541EI: case E1000_DEV_ID_82541EI_MOBILE: case E1000_DEV_ID_82541ER_LOM: mac->type = e1000_82541; break; case E1000_DEV_ID_82541ER: case E1000_DEV_ID_82541GI: case E1000_DEV_ID_82541GI_LF: case E1000_DEV_ID_82541GI_MOBILE: mac->type = e1000_82541_rev_2; break; case E1000_DEV_ID_82547EI: case E1000_DEV_ID_82547EI_MOBILE: mac->type = e1000_82547; break; case E1000_DEV_ID_82547GI: mac->type = e1000_82547_rev_2; break; case E1000_DEV_ID_82571EB_COPPER: case E1000_DEV_ID_82571EB_FIBER: case E1000_DEV_ID_82571EB_SERDES: case E1000_DEV_ID_82571EB_SERDES_DUAL: case E1000_DEV_ID_82571EB_SERDES_QUAD: case E1000_DEV_ID_82571EB_QUAD_COPPER: case E1000_DEV_ID_82571PT_QUAD_COPPER: case E1000_DEV_ID_82571EB_QUAD_FIBER: case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: mac->type = e1000_82571; break; case E1000_DEV_ID_82572EI: case E1000_DEV_ID_82572EI_COPPER: case E1000_DEV_ID_82572EI_FIBER: case E1000_DEV_ID_82572EI_SERDES: mac->type = e1000_82572; break; case E1000_DEV_ID_82573E: case E1000_DEV_ID_82573E_IAMT: case E1000_DEV_ID_82573L: mac->type = e1000_82573; break; case E1000_DEV_ID_82574L: case E1000_DEV_ID_82574LA: mac->type = e1000_82574; break; case E1000_DEV_ID_82583V: mac->type = e1000_82583; break; case E1000_DEV_ID_80003ES2LAN_COPPER_DPT: case E1000_DEV_ID_80003ES2LAN_SERDES_DPT: case E1000_DEV_ID_80003ES2LAN_COPPER_SPT: case E1000_DEV_ID_80003ES2LAN_SERDES_SPT: mac->type = e1000_80003es2lan; break; case E1000_DEV_ID_ICH8_IFE: case E1000_DEV_ID_ICH8_IFE_GT: case E1000_DEV_ID_ICH8_IFE_G: case E1000_DEV_ID_ICH8_IGP_M: case E1000_DEV_ID_ICH8_IGP_M_AMT: case E1000_DEV_ID_ICH8_IGP_AMT: case E1000_DEV_ID_ICH8_IGP_C: case E1000_DEV_ID_ICH8_82567V_3: mac->type = e1000_ich8lan; break; case E1000_DEV_ID_ICH9_IFE: case E1000_DEV_ID_ICH9_IFE_GT: case E1000_DEV_ID_ICH9_IFE_G: case E1000_DEV_ID_ICH9_IGP_M: case E1000_DEV_ID_ICH9_IGP_M_AMT: case E1000_DEV_ID_ICH9_IGP_M_V: case E1000_DEV_ID_ICH9_IGP_AMT: case E1000_DEV_ID_ICH9_BM: case E1000_DEV_ID_ICH9_IGP_C: case E1000_DEV_ID_ICH10_R_BM_LM: case E1000_DEV_ID_ICH10_R_BM_LF: case E1000_DEV_ID_ICH10_R_BM_V: mac->type = e1000_ich9lan; break; case E1000_DEV_ID_ICH10_D_BM_LM: case E1000_DEV_ID_ICH10_D_BM_LF: case E1000_DEV_ID_ICH10_D_BM_V: mac->type = e1000_ich10lan; break; case E1000_DEV_ID_PCH_D_HV_DM: case E1000_DEV_ID_PCH_D_HV_DC: case E1000_DEV_ID_PCH_M_HV_LM: case E1000_DEV_ID_PCH_M_HV_LC: mac->type = e1000_pchlan; break; case E1000_DEV_ID_PCH2_LV_LM: case E1000_DEV_ID_PCH2_LV_V: mac->type = e1000_pch2lan; break; case E1000_DEV_ID_PCH_LPT_I217_LM: case E1000_DEV_ID_PCH_LPT_I217_V: case E1000_DEV_ID_PCH_LPTLP_I218_LM: case E1000_DEV_ID_PCH_LPTLP_I218_V: case E1000_DEV_ID_PCH_I218_LM2: case E1000_DEV_ID_PCH_I218_V2: case E1000_DEV_ID_PCH_I218_LM3: case E1000_DEV_ID_PCH_I218_V3: mac->type = e1000_pch_lpt; break; + case E1000_DEV_ID_PCH_SPT_I219_LM: + case E1000_DEV_ID_PCH_SPT_I219_V: + case E1000_DEV_ID_PCH_SPT_I219_LM2: + case E1000_DEV_ID_PCH_SPT_I219_V2: + case E1000_DEV_ID_PCH_LBG_I219_LM3: + mac->type = e1000_pch_spt; + break; case E1000_DEV_ID_82575EB_COPPER: case E1000_DEV_ID_82575EB_FIBER_SERDES: case E1000_DEV_ID_82575GB_QUAD_COPPER: mac->type = e1000_82575; break; case E1000_DEV_ID_82576: case E1000_DEV_ID_82576_FIBER: case E1000_DEV_ID_82576_SERDES: case E1000_DEV_ID_82576_QUAD_COPPER: case E1000_DEV_ID_82576_QUAD_COPPER_ET2: case E1000_DEV_ID_82576_NS: case E1000_DEV_ID_82576_NS_SERDES: case E1000_DEV_ID_82576_SERDES_QUAD: mac->type = e1000_82576; break; case E1000_DEV_ID_82580_COPPER: case E1000_DEV_ID_82580_FIBER: case E1000_DEV_ID_82580_SERDES: case E1000_DEV_ID_82580_SGMII: case E1000_DEV_ID_82580_COPPER_DUAL: case E1000_DEV_ID_82580_QUAD_FIBER: case E1000_DEV_ID_DH89XXCC_SGMII: case E1000_DEV_ID_DH89XXCC_SERDES: case E1000_DEV_ID_DH89XXCC_BACKPLANE: case E1000_DEV_ID_DH89XXCC_SFP: mac->type = e1000_82580; break; case E1000_DEV_ID_I350_COPPER: case E1000_DEV_ID_I350_FIBER: case E1000_DEV_ID_I350_SERDES: case E1000_DEV_ID_I350_SGMII: case E1000_DEV_ID_I350_DA4: mac->type = e1000_i350; break; case E1000_DEV_ID_I210_COPPER_FLASHLESS: case E1000_DEV_ID_I210_SERDES_FLASHLESS: case E1000_DEV_ID_I210_COPPER: case E1000_DEV_ID_I210_COPPER_OEM1: case E1000_DEV_ID_I210_COPPER_IT: case E1000_DEV_ID_I210_FIBER: case E1000_DEV_ID_I210_SERDES: case E1000_DEV_ID_I210_SGMII: mac->type = e1000_i210; break; case E1000_DEV_ID_I211_COPPER: mac->type = e1000_i211; break; case E1000_DEV_ID_82576_VF: case E1000_DEV_ID_82576_VF_HV: mac->type = e1000_vfadapt; break; case E1000_DEV_ID_I350_VF: case E1000_DEV_ID_I350_VF_HV: mac->type = e1000_vfadapt_i350; break; case E1000_DEV_ID_I354_BACKPLANE_1GBPS: case E1000_DEV_ID_I354_SGMII: case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS: mac->type = e1000_i354; break; default: /* Should never have loaded on this device */ ret_val = -E1000_ERR_MAC_INIT; break; } return ret_val; } /** * e1000_setup_init_funcs - Initializes function pointers * @hw: pointer to the HW structure * @init_device: TRUE will initialize the rest of the function pointers * getting the device ready for use. FALSE will only set * MAC type and the function pointers for the other init * functions. Passing FALSE will not generate any hardware * reads or writes. * * This function must be called by a driver in order to use the rest * of the 'shared' code files. Called by drivers only. **/ s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device) { s32 ret_val; /* Can't do much good without knowing the MAC type. */ ret_val = e1000_set_mac_type(hw); if (ret_val) { DEBUGOUT("ERROR: MAC type could not be set properly.\n"); goto out; } if (!hw->hw_addr) { DEBUGOUT("ERROR: Registers not mapped\n"); ret_val = -E1000_ERR_CONFIG; goto out; } /* * Init function pointers to generic implementations. We do this first * allowing a driver module to override it afterward. */ e1000_init_mac_ops_generic(hw); e1000_init_phy_ops_generic(hw); e1000_init_nvm_ops_generic(hw); e1000_init_mbx_ops_generic(hw); /* * Set up the init function pointers. These are functions within the * adapter family file that sets up function pointers for the rest of * the functions in that family. */ switch (hw->mac.type) { case e1000_82542: e1000_init_function_pointers_82542(hw); break; case e1000_82543: case e1000_82544: e1000_init_function_pointers_82543(hw); break; case e1000_82540: case e1000_82545: case e1000_82545_rev_3: case e1000_82546: case e1000_82546_rev_3: e1000_init_function_pointers_82540(hw); break; case e1000_82541: case e1000_82541_rev_2: case e1000_82547: case e1000_82547_rev_2: e1000_init_function_pointers_82541(hw); break; case e1000_82571: case e1000_82572: case e1000_82573: case e1000_82574: case e1000_82583: e1000_init_function_pointers_82571(hw); break; case e1000_80003es2lan: e1000_init_function_pointers_80003es2lan(hw); break; case e1000_ich8lan: case e1000_ich9lan: case e1000_ich10lan: case e1000_pchlan: case e1000_pch2lan: case e1000_pch_lpt: + case e1000_pch_spt: e1000_init_function_pointers_ich8lan(hw); break; case e1000_82575: case e1000_82576: case e1000_82580: case e1000_i350: case e1000_i354: e1000_init_function_pointers_82575(hw); break; case e1000_i210: case e1000_i211: e1000_init_function_pointers_i210(hw); break; case e1000_vfadapt: e1000_init_function_pointers_vf(hw); break; case e1000_vfadapt_i350: e1000_init_function_pointers_vf(hw); break; default: DEBUGOUT("Hardware not supported\n"); ret_val = -E1000_ERR_CONFIG; break; } /* * Initialize the rest of the function pointers. These require some * register reads/writes in some cases. */ if (!(ret_val) && init_device) { ret_val = e1000_init_mac_params(hw); if (ret_val) goto out; ret_val = e1000_init_nvm_params(hw); if (ret_val) goto out; ret_val = e1000_init_phy_params(hw); if (ret_val) goto out; ret_val = e1000_init_mbx_params(hw); if (ret_val) goto out; } out: return ret_val; } /** * e1000_get_bus_info - Obtain bus information for adapter * @hw: pointer to the HW structure * * This will obtain information about the HW bus for which the * adapter is attached and stores it in the hw structure. This is a * function pointer entry point called by drivers. **/ s32 e1000_get_bus_info(struct e1000_hw *hw) { if (hw->mac.ops.get_bus_info) return hw->mac.ops.get_bus_info(hw); return E1000_SUCCESS; } /** * e1000_clear_vfta - Clear VLAN filter table * @hw: pointer to the HW structure * * This clears the VLAN filter table on the adapter. This is a function * pointer entry point called by drivers. **/ void e1000_clear_vfta(struct e1000_hw *hw) { if (hw->mac.ops.clear_vfta) hw->mac.ops.clear_vfta(hw); } /** * e1000_write_vfta - Write value to VLAN filter table * @hw: pointer to the HW structure * @offset: the 32-bit offset in which to write the value to. * @value: the 32-bit value to write at location offset. * * This writes a 32-bit value to a 32-bit offset in the VLAN filter * table. This is a function pointer entry point called by drivers. **/ void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) { if (hw->mac.ops.write_vfta) hw->mac.ops.write_vfta(hw, offset, value); } /** * e1000_update_mc_addr_list - Update Multicast addresses * @hw: pointer to the HW structure * @mc_addr_list: array of multicast addresses to program * @mc_addr_count: number of multicast addresses to program * * Updates the Multicast Table Array. * The caller must have a packed mc_addr_list of multicast addresses. **/ void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list, u32 mc_addr_count) { if (hw->mac.ops.update_mc_addr_list) hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count); } /** * e1000_force_mac_fc - Force MAC flow control * @hw: pointer to the HW structure * * Force the MAC's flow control settings. Currently no func pointer exists * and all implementations are handled in the generic version of this * function. **/ s32 e1000_force_mac_fc(struct e1000_hw *hw) { return e1000_force_mac_fc_generic(hw); } /** * e1000_check_for_link - Check/Store link connection * @hw: pointer to the HW structure * * This checks the link condition of the adapter and stores the * results in the hw->mac structure. This is a function pointer entry * point called by drivers. **/ s32 e1000_check_for_link(struct e1000_hw *hw) { if (hw->mac.ops.check_for_link) return hw->mac.ops.check_for_link(hw); return -E1000_ERR_CONFIG; } /** * e1000_check_mng_mode - Check management mode * @hw: pointer to the HW structure * * This checks if the adapter has manageability enabled. * This is a function pointer entry point called by drivers. **/ bool e1000_check_mng_mode(struct e1000_hw *hw) { if (hw->mac.ops.check_mng_mode) return hw->mac.ops.check_mng_mode(hw); return FALSE; } /** * e1000_mng_write_dhcp_info - Writes DHCP info to host interface * @hw: pointer to the HW structure * @buffer: pointer to the host interface * @length: size of the buffer * * Writes the DHCP information to the host interface. **/ s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length) { return e1000_mng_write_dhcp_info_generic(hw, buffer, length); } /** * e1000_reset_hw - Reset hardware * @hw: pointer to the HW structure * * This resets the hardware into a known state. This is a function pointer * entry point called by drivers. **/ s32 e1000_reset_hw(struct e1000_hw *hw) { if (hw->mac.ops.reset_hw) return hw->mac.ops.reset_hw(hw); return -E1000_ERR_CONFIG; } /** * e1000_init_hw - Initialize hardware * @hw: pointer to the HW structure * * This inits the hardware readying it for operation. This is a function * pointer entry point called by drivers. **/ s32 e1000_init_hw(struct e1000_hw *hw) { if (hw->mac.ops.init_hw) return hw->mac.ops.init_hw(hw); return -E1000_ERR_CONFIG; } /** * e1000_setup_link - Configures link and flow control * @hw: pointer to the HW structure * * This configures link and flow control settings for the adapter. This * is a function pointer entry point called by drivers. While modules can * also call this, they probably call their own version of this function. **/ s32 e1000_setup_link(struct e1000_hw *hw) { if (hw->mac.ops.setup_link) return hw->mac.ops.setup_link(hw); return -E1000_ERR_CONFIG; } /** * e1000_get_speed_and_duplex - Returns current speed and duplex * @hw: pointer to the HW structure * @speed: pointer to a 16-bit value to store the speed * @duplex: pointer to a 16-bit value to store the duplex. * * This returns the speed and duplex of the adapter in the two 'out' * variables passed in. This is a function pointer entry point called * by drivers. **/ s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex) { if (hw->mac.ops.get_link_up_info) return hw->mac.ops.get_link_up_info(hw, speed, duplex); return -E1000_ERR_CONFIG; } /** * e1000_setup_led - Configures SW controllable LED * @hw: pointer to the HW structure * * This prepares the SW controllable LED for use and saves the current state * of the LED so it can be later restored. This is a function pointer entry * point called by drivers. **/ s32 e1000_setup_led(struct e1000_hw *hw) { if (hw->mac.ops.setup_led) return hw->mac.ops.setup_led(hw); return E1000_SUCCESS; } /** * e1000_cleanup_led - Restores SW controllable LED * @hw: pointer to the HW structure * * This restores the SW controllable LED to the value saved off by * e1000_setup_led. This is a function pointer entry point called by drivers. **/ s32 e1000_cleanup_led(struct e1000_hw *hw) { if (hw->mac.ops.cleanup_led) return hw->mac.ops.cleanup_led(hw); return E1000_SUCCESS; } /** * e1000_blink_led - Blink SW controllable LED * @hw: pointer to the HW structure * * This starts the adapter LED blinking. Request the LED to be setup first * and cleaned up after. This is a function pointer entry point called by * drivers. **/ s32 e1000_blink_led(struct e1000_hw *hw) { if (hw->mac.ops.blink_led) return hw->mac.ops.blink_led(hw); return E1000_SUCCESS; } /** * e1000_id_led_init - store LED configurations in SW * @hw: pointer to the HW structure * * Initializes the LED config in SW. This is a function pointer entry point * called by drivers. **/ s32 e1000_id_led_init(struct e1000_hw *hw) { if (hw->mac.ops.id_led_init) return hw->mac.ops.id_led_init(hw); return E1000_SUCCESS; } /** * e1000_led_on - Turn on SW controllable LED * @hw: pointer to the HW structure * * Turns the SW defined LED on. This is a function pointer entry point * called by drivers. **/ s32 e1000_led_on(struct e1000_hw *hw) { if (hw->mac.ops.led_on) return hw->mac.ops.led_on(hw); return E1000_SUCCESS; } /** * e1000_led_off - Turn off SW controllable LED * @hw: pointer to the HW structure * * Turns the SW defined LED off. This is a function pointer entry point * called by drivers. **/ s32 e1000_led_off(struct e1000_hw *hw) { if (hw->mac.ops.led_off) return hw->mac.ops.led_off(hw); return E1000_SUCCESS; } /** * e1000_reset_adaptive - Reset adaptive IFS * @hw: pointer to the HW structure * * Resets the adaptive IFS. Currently no func pointer exists and all * implementations are handled in the generic version of this function. **/ void e1000_reset_adaptive(struct e1000_hw *hw) { e1000_reset_adaptive_generic(hw); } /** * e1000_update_adaptive - Update adaptive IFS * @hw: pointer to the HW structure * * Updates adapter IFS. Currently no func pointer exists and all * implementations are handled in the generic version of this function. **/ void e1000_update_adaptive(struct e1000_hw *hw) { e1000_update_adaptive_generic(hw); } /** * e1000_disable_pcie_master - Disable PCI-Express master access * @hw: pointer to the HW structure * * Disables PCI-Express master access and verifies there are no pending * requests. Currently no func pointer exists and all implementations are * handled in the generic version of this function. **/ s32 e1000_disable_pcie_master(struct e1000_hw *hw) { return e1000_disable_pcie_master_generic(hw); } /** * e1000_config_collision_dist - Configure collision distance * @hw: pointer to the HW structure * * Configures the collision distance to the default value and is used * during link setup. **/ void e1000_config_collision_dist(struct e1000_hw *hw) { if (hw->mac.ops.config_collision_dist) hw->mac.ops.config_collision_dist(hw); } /** * e1000_rar_set - Sets a receive address register * @hw: pointer to the HW structure * @addr: address to set the RAR to * @index: the RAR to set * * Sets a Receive Address Register (RAR) to the specified address. **/ int e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) { if (hw->mac.ops.rar_set) return hw->mac.ops.rar_set(hw, addr, index); return E1000_SUCCESS; } /** * e1000_validate_mdi_setting - Ensures valid MDI/MDIX SW state * @hw: pointer to the HW structure * * Ensures that the MDI/MDIX SW state is valid. **/ s32 e1000_validate_mdi_setting(struct e1000_hw *hw) { if (hw->mac.ops.validate_mdi_setting) return hw->mac.ops.validate_mdi_setting(hw); return E1000_SUCCESS; } /** * e1000_hash_mc_addr - Determines address location in multicast table * @hw: pointer to the HW structure * @mc_addr: Multicast address to hash. * * This hashes an address to determine its location in the multicast * table. Currently no func pointer exists and all implementations * are handled in the generic version of this function. **/ u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) { return e1000_hash_mc_addr_generic(hw, mc_addr); } /** * e1000_enable_tx_pkt_filtering - Enable packet filtering on TX * @hw: pointer to the HW structure * * Enables packet filtering on transmit packets if manageability is enabled * and host interface is enabled. * Currently no func pointer exists and all implementations are handled in the * generic version of this function. **/ bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw) { return e1000_enable_tx_pkt_filtering_generic(hw); } /** * e1000_mng_host_if_write - Writes to the manageability host interface * @hw: pointer to the HW structure * @buffer: pointer to the host interface buffer * @length: size of the buffer * @offset: location in the buffer to write to * @sum: sum of the data (not checksum) * * This function writes the buffer content at the offset given on the host if. * It also does alignment considerations to do the writes in most efficient * way. Also fills up the sum of the buffer in *buffer parameter. **/ s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length, u16 offset, u8 *sum) { return e1000_mng_host_if_write_generic(hw, buffer, length, offset, sum); } /** * e1000_mng_write_cmd_header - Writes manageability command header * @hw: pointer to the HW structure * @hdr: pointer to the host interface command header * * Writes the command header after does the checksum calculation. **/ s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, struct e1000_host_mng_command_header *hdr) { return e1000_mng_write_cmd_header_generic(hw, hdr); } /** * e1000_mng_enable_host_if - Checks host interface is enabled * @hw: pointer to the HW structure * * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND * * This function checks whether the HOST IF is enabled for command operation * and also checks whether the previous command is completed. It busy waits * in case of previous command is not completed. **/ s32 e1000_mng_enable_host_if(struct e1000_hw *hw) { return e1000_mng_enable_host_if_generic(hw); } /** * e1000_set_obff_timer - Set Optimized Buffer Flush/Fill timer * @hw: pointer to the HW structure * @itr: u32 indicating itr value * * Set the OBFF timer based on the given interrupt rate. **/ s32 e1000_set_obff_timer(struct e1000_hw *hw, u32 itr) { if (hw->mac.ops.set_obff_timer) return hw->mac.ops.set_obff_timer(hw, itr); return E1000_SUCCESS; } /** * e1000_check_reset_block - Verifies PHY can be reset * @hw: pointer to the HW structure * * Checks if the PHY is in a state that can be reset or if manageability * has it tied up. This is a function pointer entry point called by drivers. **/ s32 e1000_check_reset_block(struct e1000_hw *hw) { if (hw->phy.ops.check_reset_block) return hw->phy.ops.check_reset_block(hw); return E1000_SUCCESS; } /** * e1000_read_phy_reg - Reads PHY register * @hw: pointer to the HW structure * @offset: the register to read * @data: the buffer to store the 16-bit read. * * Reads the PHY register and returns the value in data. * This is a function pointer entry point called by drivers. **/ s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data) { if (hw->phy.ops.read_reg) return hw->phy.ops.read_reg(hw, offset, data); return E1000_SUCCESS; } /** * e1000_write_phy_reg - Writes PHY register * @hw: pointer to the HW structure * @offset: the register to write * @data: the value to write. * * Writes the PHY register at offset with the value in data. * This is a function pointer entry point called by drivers. **/ s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data) { if (hw->phy.ops.write_reg) return hw->phy.ops.write_reg(hw, offset, data); return E1000_SUCCESS; } /** * e1000_release_phy - Generic release PHY * @hw: pointer to the HW structure * * Return if silicon family does not require a semaphore when accessing the * PHY. **/ void e1000_release_phy(struct e1000_hw *hw) { if (hw->phy.ops.release) hw->phy.ops.release(hw); } /** * e1000_acquire_phy - Generic acquire PHY * @hw: pointer to the HW structure * * Return success if silicon family does not require a semaphore when * accessing the PHY. **/ s32 e1000_acquire_phy(struct e1000_hw *hw) { if (hw->phy.ops.acquire) return hw->phy.ops.acquire(hw); return E1000_SUCCESS; } /** * e1000_cfg_on_link_up - Configure PHY upon link up * @hw: pointer to the HW structure **/ s32 e1000_cfg_on_link_up(struct e1000_hw *hw) { if (hw->phy.ops.cfg_on_link_up) return hw->phy.ops.cfg_on_link_up(hw); return E1000_SUCCESS; } /** * e1000_read_kmrn_reg - Reads register using Kumeran interface * @hw: pointer to the HW structure * @offset: the register to read * @data: the location to store the 16-bit value read. * * Reads a register out of the Kumeran interface. Currently no func pointer * exists and all implementations are handled in the generic version of * this function. **/ s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data) { return e1000_read_kmrn_reg_generic(hw, offset, data); } /** * e1000_write_kmrn_reg - Writes register using Kumeran interface * @hw: pointer to the HW structure * @offset: the register to write * @data: the value to write. * * Writes a register to the Kumeran interface. Currently no func pointer * exists and all implementations are handled in the generic version of * this function. **/ s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data) { return e1000_write_kmrn_reg_generic(hw, offset, data); } /** * e1000_get_cable_length - Retrieves cable length estimation * @hw: pointer to the HW structure * * This function estimates the cable length and stores them in * hw->phy.min_length and hw->phy.max_length. This is a function pointer * entry point called by drivers. **/ s32 e1000_get_cable_length(struct e1000_hw *hw) { if (hw->phy.ops.get_cable_length) return hw->phy.ops.get_cable_length(hw); return E1000_SUCCESS; } /** * e1000_get_phy_info - Retrieves PHY information from registers * @hw: pointer to the HW structure * * This function gets some information from various PHY registers and * populates hw->phy values with it. This is a function pointer entry * point called by drivers. **/ s32 e1000_get_phy_info(struct e1000_hw *hw) { if (hw->phy.ops.get_info) return hw->phy.ops.get_info(hw); return E1000_SUCCESS; } /** * e1000_phy_hw_reset - Hard PHY reset * @hw: pointer to the HW structure * * Performs a hard PHY reset. This is a function pointer entry point called * by drivers. **/ s32 e1000_phy_hw_reset(struct e1000_hw *hw) { if (hw->phy.ops.reset) return hw->phy.ops.reset(hw); return E1000_SUCCESS; } /** * e1000_phy_commit - Soft PHY reset * @hw: pointer to the HW structure * * Performs a soft PHY reset on those that apply. This is a function pointer * entry point called by drivers. **/ s32 e1000_phy_commit(struct e1000_hw *hw) { if (hw->phy.ops.commit) return hw->phy.ops.commit(hw); return E1000_SUCCESS; } /** * e1000_set_d0_lplu_state - Sets low power link up state for D0 * @hw: pointer to the HW structure * @active: boolean used to enable/disable lplu * * Success returns 0, Failure returns 1 * * The low power link up (lplu) state is set to the power management level D0 * and SmartSpeed is disabled when active is TRUE, else clear lplu for D0 * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU * is used during Dx states where the power conservation is most important. * During driver activity, SmartSpeed should be enabled so performance is * maintained. This is a function pointer entry point called by drivers. **/ s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active) { if (hw->phy.ops.set_d0_lplu_state) return hw->phy.ops.set_d0_lplu_state(hw, active); return E1000_SUCCESS; } /** * e1000_set_d3_lplu_state - Sets low power link up state for D3 * @hw: pointer to the HW structure * @active: boolean used to enable/disable lplu * * Success returns 0, Failure returns 1 * * The low power link up (lplu) state is set to the power management level D3 * and SmartSpeed is disabled when active is TRUE, else clear lplu for D3 * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU * is used during Dx states where the power conservation is most important. * During driver activity, SmartSpeed should be enabled so performance is * maintained. This is a function pointer entry point called by drivers. **/ s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) { if (hw->phy.ops.set_d3_lplu_state) return hw->phy.ops.set_d3_lplu_state(hw, active); return E1000_SUCCESS; } /** * e1000_read_mac_addr - Reads MAC address * @hw: pointer to the HW structure * * Reads the MAC address out of the adapter and stores it in the HW structure. * Currently no func pointer exists and all implementations are handled in the * generic version of this function. **/ s32 e1000_read_mac_addr(struct e1000_hw *hw) { if (hw->mac.ops.read_mac_addr) return hw->mac.ops.read_mac_addr(hw); return e1000_read_mac_addr_generic(hw); } /** * e1000_read_pba_string - Read device part number string * @hw: pointer to the HW structure * @pba_num: pointer to device part number * @pba_num_size: size of part number buffer * * Reads the product board assembly (PBA) number from the EEPROM and stores * the value in pba_num. * Currently no func pointer exists and all implementations are handled in the * generic version of this function. **/ s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size) { return e1000_read_pba_string_generic(hw, pba_num, pba_num_size); } /** * e1000_read_pba_length - Read device part number string length * @hw: pointer to the HW structure * @pba_num_size: size of part number buffer * * Reads the product board assembly (PBA) number length from the EEPROM and * stores the value in pba_num. * Currently no func pointer exists and all implementations are handled in the * generic version of this function. **/ s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size) { return e1000_read_pba_length_generic(hw, pba_num_size); } /** * e1000_validate_nvm_checksum - Verifies NVM (EEPROM) checksum * @hw: pointer to the HW structure * * Validates the NVM checksum is correct. This is a function pointer entry * point called by drivers. **/ s32 e1000_validate_nvm_checksum(struct e1000_hw *hw) { if (hw->nvm.ops.validate) return hw->nvm.ops.validate(hw); return -E1000_ERR_CONFIG; } /** * e1000_update_nvm_checksum - Updates NVM (EEPROM) checksum * @hw: pointer to the HW structure * * Updates the NVM checksum. Currently no func pointer exists and all * implementations are handled in the generic version of this function. **/ s32 e1000_update_nvm_checksum(struct e1000_hw *hw) { if (hw->nvm.ops.update) return hw->nvm.ops.update(hw); return -E1000_ERR_CONFIG; } /** * e1000_reload_nvm - Reloads EEPROM * @hw: pointer to the HW structure * * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the * extended control register. **/ void e1000_reload_nvm(struct e1000_hw *hw) { if (hw->nvm.ops.reload) hw->nvm.ops.reload(hw); } /** * e1000_read_nvm - Reads NVM (EEPROM) * @hw: pointer to the HW structure * @offset: the word offset to read * @words: number of 16-bit words to read * @data: pointer to the properly sized buffer for the data. * * Reads 16-bit chunks of data from the NVM (EEPROM). This is a function * pointer entry point called by drivers. **/ s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) { if (hw->nvm.ops.read) return hw->nvm.ops.read(hw, offset, words, data); return -E1000_ERR_CONFIG; } /** * e1000_write_nvm - Writes to NVM (EEPROM) * @hw: pointer to the HW structure * @offset: the word offset to read * @words: number of 16-bit words to write * @data: pointer to the properly sized buffer for the data. * * Writes 16-bit chunks of data to the NVM (EEPROM). This is a function * pointer entry point called by drivers. **/ s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) { if (hw->nvm.ops.write) return hw->nvm.ops.write(hw, offset, words, data); return E1000_SUCCESS; } /** * e1000_write_8bit_ctrl_reg - Writes 8bit Control register * @hw: pointer to the HW structure * @reg: 32bit register offset * @offset: the register to write * @data: the value to write. * * Writes the PHY register at offset with the value in data. * This is a function pointer entry point called by drivers. **/ s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset, u8 data) { return e1000_write_8bit_ctrl_reg_generic(hw, reg, offset, data); } /** * e1000_power_up_phy - Restores link in case of PHY power down * @hw: pointer to the HW structure * * The phy may be powered down to save power, to turn off link when the * driver is unloaded, or wake on lan is not enabled (among others). **/ void e1000_power_up_phy(struct e1000_hw *hw) { if (hw->phy.ops.power_up) hw->phy.ops.power_up(hw); e1000_setup_link(hw); } /** * e1000_power_down_phy - Power down PHY * @hw: pointer to the HW structure * * The phy may be powered down to save power, to turn off link when the * driver is unloaded, or wake on lan is not enabled (among others). **/ void e1000_power_down_phy(struct e1000_hw *hw) { if (hw->phy.ops.power_down) hw->phy.ops.power_down(hw); } /** * e1000_power_up_fiber_serdes_link - Power up serdes link * @hw: pointer to the HW structure * * Power on the optics and PCS. **/ void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw) { if (hw->mac.ops.power_up_serdes) hw->mac.ops.power_up_serdes(hw); } /** * e1000_shutdown_fiber_serdes_link - Remove link during power down * @hw: pointer to the HW structure * * Shutdown the optics and PCS on driver unload. **/ void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw) { if (hw->mac.ops.shutdown_serdes) hw->mac.ops.shutdown_serdes(hw); } Index: stable/10/sys/dev/e1000/e1000_defines.h =================================================================== --- stable/10/sys/dev/e1000/e1000_defines.h (revision 296054) +++ stable/10/sys/dev/e1000/e1000_defines.h (revision 296055) @@ -1,1472 +1,1479 @@ /****************************************************************************** Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _E1000_DEFINES_H_ #define _E1000_DEFINES_H_ /* Number of Transmit and Receive Descriptors must be a multiple of 8 */ #define REQ_TX_DESCRIPTOR_MULTIPLE 8 #define REQ_RX_DESCRIPTOR_MULTIPLE 8 /* Definitions for power management and wakeup registers */ /* Wake Up Control */ #define E1000_WUC_APME 0x00000001 /* APM Enable */ #define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ #define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */ #define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */ #define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */ /* Wake Up Filter Control */ #define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ #define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ #define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ #define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ #define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ #define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ #define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ #define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ /* Wake Up Status */ #define E1000_WUS_LNKC E1000_WUFC_LNKC #define E1000_WUS_MAG E1000_WUFC_MAG #define E1000_WUS_EX E1000_WUFC_EX #define E1000_WUS_MC E1000_WUFC_MC #define E1000_WUS_BC E1000_WUFC_BC /* Extended Device Control */ #define E1000_CTRL_EXT_LPCD 0x00000004 /* LCD Power Cycle Done */ #define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* SW Definable Pin 4 data */ #define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* SW Definable Pin 6 data */ #define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* SW Definable Pin 3 data */ /* SDP 4/5 (bits 8,9) are reserved in >= 82575 */ #define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */ #define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */ #define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* Direction of SDP3 0=in 1=out */ #define E1000_CTRL_EXT_FORCE_SMBUS 0x00000800 /* Force SMBus mode */ #define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ /* Physical Func Reset Done Indication */ #define E1000_CTRL_EXT_PFRSTD 0x00004000 #define E1000_CTRL_EXT_SDLPE 0X00040000 /* SerDes Low Power Enable */ #define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ #define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ #define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clk Gating */ #define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 /* Offset of the link mode field in Ctrl Ext register */ #define E1000_CTRL_EXT_LINK_MODE_OFFSET 22 #define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000 #define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 #define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 #define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 #define E1000_CTRL_EXT_EIAME 0x01000000 #define E1000_CTRL_EXT_IRCA 0x00000001 #define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */ #define E1000_CTRL_EXT_IAME 0x08000000 /* Int ACK Auto-mask */ #define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ #define E1000_CTRL_EXT_LSECCK 0x00001000 #define E1000_CTRL_EXT_PHYPDEN 0x00100000 #define E1000_I2CCMD_REG_ADDR_SHIFT 16 #define E1000_I2CCMD_PHY_ADDR_SHIFT 24 #define E1000_I2CCMD_OPCODE_READ 0x08000000 #define E1000_I2CCMD_OPCODE_WRITE 0x00000000 #define E1000_I2CCMD_READY 0x20000000 #define E1000_I2CCMD_ERROR 0x80000000 #define E1000_I2CCMD_SFP_DATA_ADDR(a) (0x0000 + (a)) #define E1000_I2CCMD_SFP_DIAG_ADDR(a) (0x0100 + (a)) #define E1000_MAX_SGMII_PHY_REG_ADDR 255 #define E1000_I2CCMD_PHY_TIMEOUT 200 #define E1000_IVAR_VALID 0x80 #define E1000_GPIE_NSICR 0x00000001 #define E1000_GPIE_MSIX_MODE 0x00000010 #define E1000_GPIE_EIAME 0x40000000 #define E1000_GPIE_PBA 0x80000000 /* Receive Descriptor bit definitions */ #define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ #define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ #define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ #define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ #define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ #define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ #define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ #define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */ #define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */ #define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ #define E1000_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ #define E1000_RXD_ERR_CE 0x01 /* CRC Error */ #define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ #define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ #define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ #define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ #define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */ #define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ #define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ #define E1000_RXDEXT_STATERR_TST 0x00000100 /* Time Stamp taken */ #define E1000_RXDEXT_STATERR_LB 0x00040000 #define E1000_RXDEXT_STATERR_CE 0x01000000 #define E1000_RXDEXT_STATERR_SE 0x02000000 #define E1000_RXDEXT_STATERR_SEQ 0x04000000 #define E1000_RXDEXT_STATERR_CXE 0x10000000 #define E1000_RXDEXT_STATERR_TCPE 0x20000000 #define E1000_RXDEXT_STATERR_IPE 0x40000000 #define E1000_RXDEXT_STATERR_RXE 0x80000000 /* mask to determine if packets should be dropped due to frame errors */ #define E1000_RXD_ERR_FRAME_ERR_MASK ( \ E1000_RXD_ERR_CE | \ E1000_RXD_ERR_SE | \ E1000_RXD_ERR_SEQ | \ E1000_RXD_ERR_CXE | \ E1000_RXD_ERR_RXE) /* Same mask, but for extended and packet split descriptors */ #define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ E1000_RXDEXT_STATERR_CE | \ E1000_RXDEXT_STATERR_SE | \ E1000_RXDEXT_STATERR_SEQ | \ E1000_RXDEXT_STATERR_CXE | \ E1000_RXDEXT_STATERR_RXE) #define E1000_MRQC_RSS_ENABLE_2Q 0x00000001 #define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 #define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 #define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 #define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 #define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000 #define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 #define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 #define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 /* Management Control */ #define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ #define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ #define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ #define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ #define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ /* Enable MAC address filtering */ #define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MNG packets to host memory */ #define E1000_MANC_EN_MNG2HOST 0x00200000 #define E1000_MANC2H_PORT_623 0x00000020 /* Port 0x26f */ #define E1000_MANC2H_PORT_664 0x00000040 /* Port 0x298 */ #define E1000_MDEF_PORT_623 0x00000800 /* Port 0x26f */ #define E1000_MDEF_PORT_664 0x00000400 /* Port 0x298 */ /* Receive Control */ #define E1000_RCTL_RST 0x00000001 /* Software reset */ #define E1000_RCTL_EN 0x00000002 /* enable */ #define E1000_RCTL_SBP 0x00000004 /* store bad packet */ #define E1000_RCTL_UPE 0x00000008 /* unicast promisc enable */ #define E1000_RCTL_MPE 0x00000010 /* multicast promisc enable */ #define E1000_RCTL_LPE 0x00000020 /* long packet enable */ #define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ #define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ #define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ #define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ #define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */ +#define E1000_RCTL_RDMTS_HEX 0x00010000 +#define E1000_RCTL_RDMTS1_HEX E1000_RCTL_RDMTS_HEX #define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ #define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ #define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ /* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ #define E1000_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */ #define E1000_RCTL_SZ_1024 0x00010000 /* Rx buffer size 1024 */ #define E1000_RCTL_SZ_512 0x00020000 /* Rx buffer size 512 */ #define E1000_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */ /* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ #define E1000_RCTL_SZ_16384 0x00010000 /* Rx buffer size 16384 */ #define E1000_RCTL_SZ_8192 0x00020000 /* Rx buffer size 8192 */ #define E1000_RCTL_SZ_4096 0x00030000 /* Rx buffer size 4096 */ #define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ #define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ #define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ #define E1000_RCTL_DPF 0x00400000 /* discard pause frames */ #define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ #define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ #define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ /* Use byte values for the following shift parameters * Usage: * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & * E1000_PSRCTL_BSIZE0_MASK) | * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & * E1000_PSRCTL_BSIZE1_MASK) | * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & * E1000_PSRCTL_BSIZE2_MASK) | * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; * E1000_PSRCTL_BSIZE3_MASK)) * where value0 = [128..16256], default=256 * value1 = [1024..64512], default=4096 * value2 = [0..64512], default=4096 * value3 = [0..64512], default=0 */ #define E1000_PSRCTL_BSIZE0_MASK 0x0000007F #define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 #define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 #define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 #define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ #define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ #define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ #define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ /* SWFW_SYNC Definitions */ #define E1000_SWFW_EEP_SM 0x01 #define E1000_SWFW_PHY0_SM 0x02 #define E1000_SWFW_PHY1_SM 0x04 #define E1000_SWFW_CSR_SM 0x08 #define E1000_SWFW_PHY2_SM 0x20 #define E1000_SWFW_PHY3_SM 0x40 #define E1000_SWFW_SW_MNG_SM 0x400 /* Device Control */ #define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ #define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */ #define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master reqs */ #define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ #define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ #define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ #define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ #define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ #define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ #define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ #define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ #define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ #define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ #define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */ #define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */ #define E1000_CTRL_MEHE 0x00080000 /* Memory Error Handling Enable */ #define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ #define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ #define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ #define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */ #define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */ #define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ #define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */ #define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */ #define E1000_CTRL_RST 0x04000000 /* Global reset */ #define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ #define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ #define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ #define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ #define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */ #define E1000_CTRL_MDIO_DIR E1000_CTRL_SWDPIO2 #define E1000_CTRL_MDIO E1000_CTRL_SWDPIN2 #define E1000_CTRL_MDC_DIR E1000_CTRL_SWDPIO3 #define E1000_CTRL_MDC E1000_CTRL_SWDPIN3 #define E1000_CONNSW_ENRGSRC 0x4 #define E1000_CONNSW_PHYSD 0x400 #define E1000_CONNSW_PHY_PDN 0x800 #define E1000_CONNSW_SERDESD 0x200 #define E1000_CONNSW_AUTOSENSE_CONF 0x2 #define E1000_CONNSW_AUTOSENSE_EN 0x1 #define E1000_PCS_CFG_PCS_EN 8 #define E1000_PCS_LCTL_FLV_LINK_UP 1 #define E1000_PCS_LCTL_FSV_10 0 #define E1000_PCS_LCTL_FSV_100 2 #define E1000_PCS_LCTL_FSV_1000 4 #define E1000_PCS_LCTL_FDV_FULL 8 #define E1000_PCS_LCTL_FSD 0x10 #define E1000_PCS_LCTL_FORCE_LINK 0x20 #define E1000_PCS_LCTL_FORCE_FCTRL 0x80 #define E1000_PCS_LCTL_AN_ENABLE 0x10000 #define E1000_PCS_LCTL_AN_RESTART 0x20000 #define E1000_PCS_LCTL_AN_TIMEOUT 0x40000 #define E1000_ENABLE_SERDES_LOOPBACK 0x0410 #define E1000_PCS_LSTS_LINK_OK 1 #define E1000_PCS_LSTS_SPEED_100 2 #define E1000_PCS_LSTS_SPEED_1000 4 #define E1000_PCS_LSTS_DUPLEX_FULL 8 #define E1000_PCS_LSTS_SYNK_OK 0x10 #define E1000_PCS_LSTS_AN_COMPLETE 0x10000 /* Device Status */ #define E1000_STATUS_FD 0x00000001 /* Duplex 0=half 1=full */ #define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ #define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ #define E1000_STATUS_FUNC_SHIFT 2 #define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ #define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ #define E1000_STATUS_SPEED_MASK 0x000000C0 #define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ #define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ #define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ #define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Compltn by NVM */ #define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */ #define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master request status */ #define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */ #define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */ #define E1000_STATUS_2P5_SKU 0x00001000 /* Val of 2.5GBE SKU strap */ #define E1000_STATUS_2P5_SKU_OVER 0x00002000 /* Val of 2.5GBE SKU Over */ #define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */ #define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */ /* Constants used to interpret the masked PCI-X bus speed. */ #define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus spd 50-66MHz */ #define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus spd 66-100MHz */ #define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus spd 100-133MHz*/ #define SPEED_10 10 #define SPEED_100 100 #define SPEED_1000 1000 #define SPEED_2500 2500 #define HALF_DUPLEX 1 #define FULL_DUPLEX 2 #define PHY_FORCE_TIME 20 #define ADVERTISE_10_HALF 0x0001 #define ADVERTISE_10_FULL 0x0002 #define ADVERTISE_100_HALF 0x0004 #define ADVERTISE_100_FULL 0x0008 #define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ #define ADVERTISE_1000_FULL 0x0020 /* 1000/H is not supported, nor spec-compliant. */ #define E1000_ALL_SPEED_DUPLEX ( \ ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ ADVERTISE_100_FULL | ADVERTISE_1000_FULL) #define E1000_ALL_NOT_GIG ( \ ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ ADVERTISE_100_FULL) #define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) #define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) #define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) #define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX /* LED Control */ #define E1000_PHY_LED0_MODE_MASK 0x00000007 #define E1000_PHY_LED0_IVRT 0x00000008 #define E1000_PHY_LED0_MASK 0x0000001F #define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F #define E1000_LEDCTL_LED0_MODE_SHIFT 0 #define E1000_LEDCTL_LED0_IVRT 0x00000040 #define E1000_LEDCTL_LED0_BLINK 0x00000080 #define E1000_LEDCTL_MODE_LINK_UP 0x2 #define E1000_LEDCTL_MODE_LED_ON 0xE #define E1000_LEDCTL_MODE_LED_OFF 0xF /* Transmit Descriptor bit definitions */ #define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ #define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */ #define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ #define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ #define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ #define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ #define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ #define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ #define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ #define E1000_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */ #define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ #define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ #define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ #define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ #define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ #define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ #define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ #define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ #define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ #define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ #define E1000_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */ /* Transmit Control */ #define E1000_TCTL_EN 0x00000002 /* enable Tx */ #define E1000_TCTL_PSP 0x00000008 /* pad short packets */ #define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ #define E1000_TCTL_COLD 0x003ff000 /* collision distance */ #define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ #define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ /* Transmit Arbitration Count */ #define E1000_TARC0_ENABLE 0x00000400 /* Enable Tx Queue 0 */ /* SerDes Control */ #define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 #define E1000_SCTL_ENABLE_SERDES_LOOPBACK 0x0410 /* Receive Checksum Control */ #define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ #define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ #define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ #define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ #define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ /* Header split receive */ #define E1000_RFCTL_NFSW_DIS 0x00000040 #define E1000_RFCTL_NFSR_DIS 0x00000080 #define E1000_RFCTL_ACK_DIS 0x00001000 #define E1000_RFCTL_EXTEN 0x00008000 #define E1000_RFCTL_IPV6_EX_DIS 0x00010000 #define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 #define E1000_RFCTL_LEF 0x00040000 /* Collision related configuration parameters */ #define E1000_COLLISION_THRESHOLD 15 #define E1000_CT_SHIFT 4 #define E1000_COLLISION_DISTANCE 63 #define E1000_COLD_SHIFT 12 /* Default values for the transmit IPG register */ #define DEFAULT_82542_TIPG_IPGT 10 #define DEFAULT_82543_TIPG_IPGT_FIBER 9 #define DEFAULT_82543_TIPG_IPGT_COPPER 8 #define E1000_TIPG_IPGT_MASK 0x000003FF #define DEFAULT_82542_TIPG_IPGR1 2 #define DEFAULT_82543_TIPG_IPGR1 8 #define E1000_TIPG_IPGR1_SHIFT 10 #define DEFAULT_82542_TIPG_IPGR2 10 #define DEFAULT_82543_TIPG_IPGR2 6 #define DEFAULT_80003ES2LAN_TIPG_IPGR2 7 #define E1000_TIPG_IPGR2_SHIFT 20 /* Ethertype field values */ #define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ #define ETHERNET_FCS_SIZE 4 #define MAX_JUMBO_FRAME_SIZE 0x3F00 #define E1000_TX_PTR_GAP 0x1F /* Extended Configuration Control and Size */ #define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 #define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 #define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008 #define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 #define E1000_EXTCNF_CTRL_GATE_PHY_CFG 0x00000080 #define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000 #define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16 #define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000 #define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT 16 #define E1000_PHY_CTRL_D0A_LPLU 0x00000002 #define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004 #define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008 #define E1000_PHY_CTRL_GBE_DISABLE 0x00000040 #define E1000_KABGTXD_BGSQLBIAS 0x00050000 /* Low Power IDLE Control */ #define E1000_LPIC_LPIET_SHIFT 24 /* Low Power Idle Entry Time */ /* PBA constants */ #define E1000_PBA_8K 0x0008 /* 8KB */ #define E1000_PBA_10K 0x000A /* 10KB */ #define E1000_PBA_12K 0x000C /* 12KB */ #define E1000_PBA_14K 0x000E /* 14KB */ #define E1000_PBA_16K 0x0010 /* 16KB */ #define E1000_PBA_18K 0x0012 #define E1000_PBA_20K 0x0014 #define E1000_PBA_22K 0x0016 #define E1000_PBA_24K 0x0018 #define E1000_PBA_26K 0x001A #define E1000_PBA_30K 0x001E #define E1000_PBA_32K 0x0020 #define E1000_PBA_34K 0x0022 #define E1000_PBA_35K 0x0023 #define E1000_PBA_38K 0x0026 #define E1000_PBA_40K 0x0028 #define E1000_PBA_48K 0x0030 /* 48KB */ #define E1000_PBA_64K 0x0040 /* 64KB */ #define E1000_PBA_RXA_MASK 0xFFFF #define E1000_PBS_16K E1000_PBA_16K /* Uncorrectable/correctable ECC Error counts and enable bits */ #define E1000_PBECCSTS_CORR_ERR_CNT_MASK 0x000000FF #define E1000_PBECCSTS_UNCORR_ERR_CNT_MASK 0x0000FF00 #define E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT 8 #define E1000_PBECCSTS_ECC_ENABLE 0x00010000 #define IFS_MAX 80 #define IFS_MIN 40 #define IFS_RATIO 4 #define IFS_STEP 10 #define MIN_NUM_XMITS 1000 /* SW Semaphore Register */ #define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ #define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ #define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ #define E1000_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */ /* Interrupt Cause Read */ #define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ #define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */ #define E1000_ICR_LSC 0x00000004 /* Link Status Change */ #define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */ #define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ #define E1000_ICR_RXO 0x00000040 /* Rx overrun */ #define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ #define E1000_ICR_VMMB 0x00000100 /* VM MB event */ #define E1000_ICR_RXCFG 0x00000400 /* Rx /c/ ordered set */ #define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */ #define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */ #define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */ #define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */ #define E1000_ICR_TXD_LOW 0x00008000 #define E1000_ICR_MNG 0x00040000 /* Manageability event */ #define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */ #define E1000_ICR_TS 0x00080000 /* Time Sync Interrupt */ #define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */ /* If this bit asserted, the driver should claim the interrupt */ #define E1000_ICR_INT_ASSERTED 0x80000000 #define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */ #define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */ #define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */ #define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */ #define E1000_ICR_TXQ1 0x00800000 /* Tx Queue 1 Interrupt */ #define E1000_ICR_OTHER 0x01000000 /* Other Interrupts */ #define E1000_ICR_FER 0x00400000 /* Fatal Error */ #define E1000_ICR_THS 0x00800000 /* ICR.THS: Thermal Sensor Event*/ #define E1000_ICR_MDDET 0x10000000 /* Malicious Driver Detect */ #define E1000_ITR_MASK 0x000FFFFF /* ITR value bitfield */ #define E1000_ITR_MULT 256 /* ITR mulitplier in nsec */ /* PBA ECC Register */ #define E1000_PBA_ECC_COUNTER_MASK 0xFFF00000 /* ECC counter mask */ #define E1000_PBA_ECC_COUNTER_SHIFT 20 /* ECC counter shift value */ #define E1000_PBA_ECC_CORR_EN 0x00000001 /* Enable ECC error correction */ #define E1000_PBA_ECC_STAT_CLR 0x00000002 /* Clear ECC error counter */ #define E1000_PBA_ECC_INT_EN 0x00000004 /* Enable ICR bit 5 on ECC error */ /* Extended Interrupt Cause Read */ #define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */ #define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */ #define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */ #define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */ #define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */ #define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */ #define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */ #define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */ #define E1000_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ #define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ /* TCP Timer */ #define E1000_TCPTIMER_KS 0x00000100 /* KickStart */ #define E1000_TCPTIMER_COUNT_ENABLE 0x00000200 /* Count Enable */ #define E1000_TCPTIMER_COUNT_FINISH 0x00000400 /* Count finish */ #define E1000_TCPTIMER_LOOP 0x00000800 /* Loop */ /* This defines the bits that are set in the Interrupt Mask * Set/Read Register. Each bit is documented below: * o RXT0 = Receiver Timer Interrupt (ring 0) * o TXDW = Transmit Descriptor Written Back * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) * o RXSEQ = Receive Sequence Error * o LSC = Link Status Change */ #define IMS_ENABLE_MASK ( \ E1000_IMS_RXT0 | \ E1000_IMS_TXDW | \ E1000_IMS_RXDMT0 | \ E1000_IMS_RXSEQ | \ E1000_IMS_LSC) /* Interrupt Mask Set */ #define E1000_IMS_TXDW E1000_ICR_TXDW /* Tx desc written back */ #define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ #define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ #define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */ #define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ #define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ #define E1000_IMS_RXO E1000_ICR_RXO /* Rx overrun */ #define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */ #define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW #define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */ #define E1000_IMS_TS E1000_ICR_TS /* Time Sync Interrupt */ #define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */ #define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */ #define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */ #define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */ #define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */ #define E1000_IMS_TXQ1 E1000_ICR_TXQ1 /* Tx Queue 1 Interrupt */ #define E1000_IMS_OTHER E1000_ICR_OTHER /* Other Interrupts */ #define E1000_IMS_FER E1000_ICR_FER /* Fatal Error */ #define E1000_IMS_THS E1000_ICR_THS /* ICR.TS: Thermal Sensor Event*/ #define E1000_IMS_MDDET E1000_ICR_MDDET /* Malicious Driver Detect */ /* Extended Interrupt Mask Set */ #define E1000_EIMS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */ #define E1000_EIMS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */ #define E1000_EIMS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */ #define E1000_EIMS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */ #define E1000_EIMS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */ #define E1000_EIMS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */ #define E1000_EIMS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */ #define E1000_EIMS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */ #define E1000_EIMS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */ #define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ /* Interrupt Cause Set */ #define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ #define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ #define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ /* Extended Interrupt Cause Set */ #define E1000_EICS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */ #define E1000_EICS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */ #define E1000_EICS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */ #define E1000_EICS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */ #define E1000_EICS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */ #define E1000_EICS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */ #define E1000_EICS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */ #define E1000_EICS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */ #define E1000_EICS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */ #define E1000_EICS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ #define E1000_EITR_ITR_INT_MASK 0x0000FFFF /* E1000_EITR_CNT_IGNR is only for 82576 and newer */ #define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */ #define E1000_EITR_INTERVAL 0x00007FFC /* Transmit Descriptor Control */ #define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ #define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */ #define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ #define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ #define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ #define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */ /* Enable the counting of descriptors still to be processed. */ #define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Flow Control Constants */ #define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 #define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 #define FLOW_CONTROL_TYPE 0x8808 /* 802.1q VLAN Packet Size */ #define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ #define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ /* Receive Address * Number of high/low register pairs in the RAR. The RAR (Receive Address * Registers) holds the directed and multicast addresses that we monitor. * Technically, we have 16 spots. However, we reserve one of these spots * (RAR[15]) for our directed address used by controllers with * manageability enabled, allowing us room for 15 multicast addresses. */ #define E1000_RAR_ENTRIES 15 #define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ #define E1000_RAL_MAC_ADDR_LEN 4 #define E1000_RAH_MAC_ADDR_LEN 2 #define E1000_RAH_QUEUE_MASK_82575 0x000C0000 #define E1000_RAH_POOL_1 0x00040000 /* Error Codes */ #define E1000_SUCCESS 0 #define E1000_ERR_NVM 1 #define E1000_ERR_PHY 2 #define E1000_ERR_CONFIG 3 #define E1000_ERR_PARAM 4 #define E1000_ERR_MAC_INIT 5 #define E1000_ERR_PHY_TYPE 6 #define E1000_ERR_RESET 9 #define E1000_ERR_MASTER_REQUESTS_PENDING 10 #define E1000_ERR_HOST_INTERFACE_COMMAND 11 #define E1000_BLK_PHY_RESET 12 #define E1000_ERR_SWFW_SYNC 13 #define E1000_NOT_IMPLEMENTED 14 #define E1000_ERR_MBX 15 #define E1000_ERR_INVALID_ARGUMENT 16 #define E1000_ERR_NO_SPACE 17 #define E1000_ERR_NVM_PBA_SECTION 18 #define E1000_ERR_I2C 19 #define E1000_ERR_INVM_VALUE_NOT_FOUND 20 /* Loop limit on how long we wait for auto-negotiation to complete */ #define FIBER_LINK_UP_LIMIT 50 #define COPPER_LINK_UP_LIMIT 10 #define PHY_AUTO_NEG_LIMIT 45 #define PHY_FORCE_LIMIT 20 /* Number of 100 microseconds we wait for PCI Express master disable */ #define MASTER_DISABLE_TIMEOUT 800 /* Number of milliseconds we wait for PHY configuration done after MAC reset */ #define PHY_CFG_TIMEOUT 100 /* Number of 2 milliseconds we wait for acquiring MDIO ownership. */ #define MDIO_OWNERSHIP_TIMEOUT 10 /* Number of milliseconds for NVM auto read done after MAC reset. */ #define AUTO_READ_DONE_TIMEOUT 10 /* Flow Control */ #define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */ #define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ #define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ /* Transmit Configuration Word */ #define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ #define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ #define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ #define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */ #define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ /* Receive Configuration Word */ #define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */ #define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ #define E1000_RXCW_C 0x20000000 /* Receive config */ #define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ #define E1000_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */ #define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */ +/* HH Time Sync */ +#define E1000_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK 0x0000F000 /* max delay */ +#define E1000_TSYNCTXCTL_SYNC_COMP_ERR 0x20000000 /* sync err */ +#define E1000_TSYNCTXCTL_SYNC_COMP 0x40000000 /* sync complete */ +#define E1000_TSYNCTXCTL_START_SYNC 0x80000000 /* initiate sync */ + #define E1000_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */ #define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */ #define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00 #define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02 #define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 #define E1000_TSYNCRXCTL_TYPE_ALL 0x08 #define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A #define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */ #define E1000_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */ #define E1000_RXMTRL_PTP_V1_SYNC_MESSAGE 0x00000000 #define E1000_RXMTRL_PTP_V1_DELAY_REQ_MESSAGE 0x00010000 #define E1000_RXMTRL_PTP_V2_SYNC_MESSAGE 0x00000000 #define E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE 0x01000000 #define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF #define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00 #define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01 #define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02 #define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03 #define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04 #define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00 #define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000 #define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100 #define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200 #define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300 #define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800 #define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900 #define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00 #define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00 #define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00 #define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00 #define E1000_TIMINCA_16NS_SHIFT 24 #define E1000_TIMINCA_INCPERIOD_SHIFT 24 #define E1000_TIMINCA_INCVALUE_MASK 0x00FFFFFF #define E1000_TSICR_TXTS 0x00000002 #define E1000_TSIM_TXTS 0x00000002 /* TUPLE Filtering Configuration */ #define E1000_TTQF_DISABLE_MASK 0xF0008000 /* TTQF Disable Mask */ #define E1000_TTQF_QUEUE_ENABLE 0x100 /* TTQF Queue Enable Bit */ #define E1000_TTQF_PROTOCOL_MASK 0xFF /* TTQF Protocol Mask */ /* TTQF TCP Bit, shift with E1000_TTQF_PROTOCOL SHIFT */ #define E1000_TTQF_PROTOCOL_TCP 0x0 /* TTQF UDP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */ #define E1000_TTQF_PROTOCOL_UDP 0x1 /* TTQF SCTP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */ #define E1000_TTQF_PROTOCOL_SCTP 0x2 #define E1000_TTQF_PROTOCOL_SHIFT 5 /* TTQF Protocol Shift */ #define E1000_TTQF_QUEUE_SHIFT 16 /* TTQF Queue Shfit */ #define E1000_TTQF_RX_QUEUE_MASK 0x70000 /* TTQF Queue Mask */ #define E1000_TTQF_MASK_ENABLE 0x10000000 /* TTQF Mask Enable Bit */ #define E1000_IMIR_CLEAR_MASK 0xF001FFFF /* IMIR Reg Clear Mask */ #define E1000_IMIR_PORT_BYPASS 0x20000 /* IMIR Port Bypass Bit */ #define E1000_IMIR_PRIORITY_SHIFT 29 /* IMIR Priority Shift */ #define E1000_IMIREXT_CLEAR_MASK 0x7FFFF /* IMIREXT Reg Clear Mask */ #define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */ #define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */ #define E1000_MDICNFG_PHY_MASK 0x03E00000 #define E1000_MDICNFG_PHY_SHIFT 21 #define E1000_MEDIA_PORT_COPPER 1 #define E1000_MEDIA_PORT_OTHER 2 #define E1000_M88E1112_AUTO_COPPER_SGMII 0x2 #define E1000_M88E1112_AUTO_COPPER_BASEX 0x3 #define E1000_M88E1112_STATUS_LINK 0x0004 /* Interface Link Bit */ #define E1000_M88E1112_MAC_CTRL_1 0x10 #define E1000_M88E1112_MAC_CTRL_1_MODE_MASK 0x0380 /* Mode Select */ #define E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT 7 #define E1000_M88E1112_PAGE_ADDR 0x16 #define E1000_M88E1112_STATUS 0x01 #define E1000_THSTAT_LOW_EVENT 0x20000000 /* Low thermal threshold */ #define E1000_THSTAT_MID_EVENT 0x00200000 /* Mid thermal threshold */ #define E1000_THSTAT_HIGH_EVENT 0x00002000 /* High thermal threshold */ #define E1000_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */ #define E1000_THSTAT_LINK_THROTTLE 0x00000002 /* Link Spd Throttle Event */ /* I350 EEE defines */ #define E1000_IPCNFG_EEE_1G_AN 0x00000008 /* IPCNFG EEE Ena 1G AN */ #define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* IPCNFG EEE Ena 100M AN */ #define E1000_EEER_TX_LPI_EN 0x00010000 /* EEER Tx LPI Enable */ #define E1000_EEER_RX_LPI_EN 0x00020000 /* EEER Rx LPI Enable */ #define E1000_EEER_LPI_FC 0x00040000 /* EEER Ena on Flow Cntrl */ /* EEE status */ #define E1000_EEER_EEE_NEG 0x20000000 /* EEE capability nego */ #define E1000_EEER_RX_LPI_STATUS 0x40000000 /* Rx in LPI state */ #define E1000_EEER_TX_LPI_STATUS 0x80000000 /* Tx in LPI state */ #define E1000_EEE_LP_ADV_ADDR_I350 0x040F /* EEE LP Advertisement */ #define E1000_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */ #define E1000_M88E1543_EEE_CTRL_1 0x0 #define E1000_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */ +#define E1000_M88E1543_FIBER_CTRL 0x0 /* Fiber Control Register */ #define E1000_EEE_ADV_DEV_I354 7 #define E1000_EEE_ADV_ADDR_I354 60 #define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */ #define E1000_EEE_ADV_1000_SUPPORTED (1 << 2) /* 1000BaseT EEE Supported */ #define E1000_PCS_STATUS_DEV_I354 3 #define E1000_PCS_STATUS_ADDR_I354 1 #define E1000_PCS_STATUS_RX_LPI_RCVD 0x0400 #define E1000_PCS_STATUS_TX_LPI_RCVD 0x0800 #define E1000_M88E1512_CFG_REG_1 0x0010 #define E1000_M88E1512_CFG_REG_2 0x0011 #define E1000_M88E1512_CFG_REG_3 0x0007 #define E1000_M88E1512_MODE 0x0014 #define E1000_EEE_SU_LPI_CLK_STP 0x00800000 /* EEE LPI Clock Stop */ #define E1000_EEE_LP_ADV_DEV_I210 7 /* EEE LP Adv Device */ #define E1000_EEE_LP_ADV_ADDR_I210 61 /* EEE LP Adv Register */ /* PCI Express Control */ #define E1000_GCR_RXD_NO_SNOOP 0x00000001 #define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 #define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 #define E1000_GCR_TXD_NO_SNOOP 0x00000008 #define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 #define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 #define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 #define E1000_GCR_CMPL_TMOUT_10ms 0x00001000 #define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000 #define E1000_GCR_CAP_VER2 0x00040000 #define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ E1000_GCR_RXDSCW_NO_SNOOP | \ E1000_GCR_RXDSCR_NO_SNOOP | \ E1000_GCR_TXD_NO_SNOOP | \ E1000_GCR_TXDSCW_NO_SNOOP | \ E1000_GCR_TXDSCR_NO_SNOOP) #define E1000_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */ /* mPHY address control and data registers */ #define E1000_MPHY_ADDR_CTL 0x0024 /* Address Control Reg */ #define E1000_MPHY_ADDR_CTL_OFFSET_MASK 0xFFFF0000 #define E1000_MPHY_DATA 0x0E10 /* Data Register */ /* AFE CSR Offset for PCS CLK */ #define E1000_MPHY_PCS_CLK_REG_OFFSET 0x0004 /* Override for near end digital loopback. */ #define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10 /* PHY Control Register */ #define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ #define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ #define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ #define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ #define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ #define MII_CR_POWER_DOWN 0x0800 /* Power down */ #define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ #define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ #define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ #define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ #define MII_CR_SPEED_1000 0x0040 #define MII_CR_SPEED_100 0x2000 #define MII_CR_SPEED_10 0x0000 /* PHY Status Register */ #define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ #define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ #define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ #define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ #define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ #define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ #define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ #define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ #define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ #define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ #define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ #define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ #define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ #define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ #define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ /* Autoneg Advertisement Register */ #define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ #define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ #define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ #define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ #define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ #define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ #define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ #define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ #define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ #define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */ /* Link Partner Ability Register (Base Page) */ #define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */ #define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP 10T Half Dplx Capable */ #define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP 10T Full Dplx Capable */ #define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP 100TX Half Dplx Capable */ #define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP 100TX Full Dplx Capable */ #define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */ #define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ #define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asym Pause Direction bit */ #define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP detected Remote Fault */ #define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP rx'd link code word */ #define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */ /* Autoneg Expansion Register */ #define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */ #define NWAY_ER_PAGE_RXD 0x0002 /* LP 10T Half Dplx Capable */ #define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP 10T Full Dplx Capable */ #define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP 100TX Half Dplx Capable */ #define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP 100TX Full Dplx Capable */ /* 1000BASE-T Control Register */ #define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */ #define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ #define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ /* 1=Repeater/switch device port 0=DTE device */ #define CR_1000T_REPEATER_DTE 0x0400 /* 1=Configure PHY as Master 0=Configure PHY as Slave */ #define CR_1000T_MS_VALUE 0x0800 /* 1=Master/Slave manual config value 0=Automatic Master/Slave config */ #define CR_1000T_MS_ENABLE 0x1000 #define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ #define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ #define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ #define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ #define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ /* 1000BASE-T Status Register */ #define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle err since last rd */ #define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asym pause direction bit */ #define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ #define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ #define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ #define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ #define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local Tx Master, 0=Slave */ #define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */ #define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5 /* PHY 1000 MII Register/Bit Definitions */ /* PHY Registers defined by IEEE */ #define PHY_CONTROL 0x00 /* Control Register */ #define PHY_STATUS 0x01 /* Status Register */ #define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ #define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ #define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ #define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ #define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */ #define PHY_NEXT_PAGE_TX 0x07 /* Next Page Tx */ #define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */ #define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ #define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ #define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ #define PHY_CONTROL_LB 0x4000 /* PHY Loopback bit */ /* NVM Control */ #define E1000_EECD_SK 0x00000001 /* NVM Clock */ #define E1000_EECD_CS 0x00000002 /* NVM Chip Select */ #define E1000_EECD_DI 0x00000004 /* NVM Data In */ #define E1000_EECD_DO 0x00000008 /* NVM Data Out */ #define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ #define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ #define E1000_EECD_PRES 0x00000100 /* NVM Present */ #define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */ #define E1000_EECD_BLOCKED 0x00008000 /* Bit banging access blocked flag */ #define E1000_EECD_ABORT 0x00010000 /* NVM operation aborted flag */ #define E1000_EECD_TIMEOUT 0x00020000 /* NVM read operation timeout flag */ #define E1000_EECD_ERROR_CLR 0x00040000 /* NVM error status clear bit */ /* NVM Addressing bits based on type 0=small, 1=large */ #define E1000_EECD_ADDR_BITS 0x00000400 #define E1000_EECD_TYPE 0x00002000 /* NVM Type (1-SPI, 0-Microwire) */ -#ifndef E1000_NVM_GRANT_ATTEMPTS #define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ -#endif #define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ #define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ #define E1000_EECD_SIZE_EX_SHIFT 11 #define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ #define E1000_EECD_AUPDEN 0x00100000 /* Ena Auto FLASH update */ #define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ #define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES) #define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */ #define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done */ #define E1000_EECD_FLASH_DETECTED_I210 0x00080000 /* FLASH detected */ #define E1000_EECD_SEC1VAL_I210 0x02000000 /* Sector One Valid */ #define E1000_FLUDONE_ATTEMPTS 20000 #define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ #define E1000_I210_FIFO_SEL_RX 0x00 #define E1000_I210_FIFO_SEL_TX_QAV(_i) (0x02 + (_i)) #define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0) #define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06 #define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01 #define E1000_I210_FLASH_SECTOR_SIZE 0x1000 /* 4KB FLASH sector unit size */ /* Secure FLASH mode requires removing MSb */ #define E1000_I210_FW_PTR_MASK 0x7FFF /* Firmware code revision field word offset*/ #define E1000_I210_FW_VER_OFFSET 328 #define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write regs */ #define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ #define E1000_NVM_RW_REG_START 1 /* Start operation */ #define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ #define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */ #define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */ #define E1000_FLASH_UPDATES 2000 /* NVM Word Offsets */ #define NVM_COMPAT 0x0003 #define NVM_ID_LED_SETTINGS 0x0004 #define NVM_SERDES_AMPLITUDE 0x0006 /* SERDES output amplitude */ #define NVM_PHY_CLASS_WORD 0x0007 #define E1000_I210_NVM_FW_MODULE_PTR 0x0010 #define E1000_I350_NVM_FW_MODULE_PTR 0x0051 #define NVM_FUTURE_INIT_WORD1 0x0019 #define NVM_MAC_ADDR 0x0000 #define NVM_SUB_DEV_ID 0x000B #define NVM_SUB_VEN_ID 0x000C #define NVM_DEV_ID 0x000D #define NVM_VEN_ID 0x000E #define NVM_INIT_CTRL_2 0x000F #define NVM_INIT_CTRL_4 0x0013 #define NVM_LED_1_CFG 0x001C #define NVM_LED_0_2_CFG 0x001F #define NVM_COMPAT_VALID_CSUM 0x0001 #define NVM_FUTURE_INIT_WORD1_VALID_CSUM 0x0040 #define NVM_INIT_CONTROL2_REG 0x000F #define NVM_INIT_CONTROL3_PORT_B 0x0014 #define NVM_INIT_3GIO_3 0x001A #define NVM_SWDEF_PINS_CTRL_PORT_0 0x0020 #define NVM_INIT_CONTROL3_PORT_A 0x0024 #define NVM_CFG 0x0012 #define NVM_ALT_MAC_ADDR_PTR 0x0037 #define NVM_CHECKSUM_REG 0x003F #define NVM_COMPATIBILITY_REG_3 0x0003 #define NVM_COMPATIBILITY_BIT_MASK 0x8000 #define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */ #define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */ #define E1000_NVM_CFG_DONE_PORT_2 0x100000 /* ...for third port */ #define E1000_NVM_CFG_DONE_PORT_3 0x200000 /* ...for fourth port */ #define NVM_82580_LAN_FUNC_OFFSET(a) ((a) ? (0x40 + (0x40 * (a))) : 0) /* Mask bits for fields in Word 0x24 of the NVM */ #define NVM_WORD24_COM_MDIO 0x0008 /* MDIO interface shared */ #define NVM_WORD24_EXT_MDIO 0x0004 /* MDIO accesses routed extrnl */ /* Offset of Link Mode bits for 82575/82576 */ #define NVM_WORD24_LNK_MODE_OFFSET 8 /* Offset of Link Mode bits for 82580 up */ #define NVM_WORD24_82580_LNK_MODE_OFFSET 4 /* Mask bits for fields in Word 0x0f of the NVM */ #define NVM_WORD0F_PAUSE_MASK 0x3000 #define NVM_WORD0F_PAUSE 0x1000 #define NVM_WORD0F_ASM_DIR 0x2000 #define NVM_WORD0F_SWPDIO_EXT_MASK 0x00F0 /* Mask bits for fields in Word 0x1a of the NVM */ #define NVM_WORD1A_ASPM_MASK 0x000C /* Mask bits for fields in Word 0x03 of the EEPROM */ #define NVM_COMPAT_LOM 0x0800 /* length of string needed to store PBA number */ #define E1000_PBANUM_LENGTH 11 /* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ #define NVM_SUM 0xBABA /* PBA (printed board assembly) number words */ #define NVM_PBA_OFFSET_0 8 #define NVM_PBA_OFFSET_1 9 #define NVM_PBA_PTR_GUARD 0xFAFA #define NVM_RESERVED_WORD 0xFFFF #define NVM_PHY_CLASS_A 0x8000 #define NVM_SERDES_AMPLITUDE_MASK 0x000F #define NVM_SIZE_MASK 0x1C00 #define NVM_SIZE_SHIFT 10 #define NVM_WORD_SIZE_BASE_SHIFT 6 #define NVM_SWDPIO_EXT_SHIFT 4 /* NVM Commands - Microwire */ #define NVM_READ_OPCODE_MICROWIRE 0x6 /* NVM read opcode */ #define NVM_WRITE_OPCODE_MICROWIRE 0x5 /* NVM write opcode */ #define NVM_ERASE_OPCODE_MICROWIRE 0x7 /* NVM erase opcode */ #define NVM_EWEN_OPCODE_MICROWIRE 0x13 /* NVM erase/write enable */ #define NVM_EWDS_OPCODE_MICROWIRE 0x10 /* NVM erase/write disable */ /* NVM Commands - SPI */ #define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ #define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */ #define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ #define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ #define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ #define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ /* SPI NVM Status Register */ #define NVM_STATUS_RDY_SPI 0x01 /* Word definitions for ID LED Settings */ #define ID_LED_RESERVED_0000 0x0000 #define ID_LED_RESERVED_FFFF 0xFFFF #define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ (ID_LED_OFF1_OFF2 << 8) | \ (ID_LED_DEF1_DEF2 << 4) | \ (ID_LED_DEF1_DEF2)) #define ID_LED_DEF1_DEF2 0x1 #define ID_LED_DEF1_ON2 0x2 #define ID_LED_DEF1_OFF2 0x3 #define ID_LED_ON1_DEF2 0x4 #define ID_LED_ON1_ON2 0x5 #define ID_LED_ON1_OFF2 0x6 #define ID_LED_OFF1_DEF2 0x7 #define ID_LED_OFF1_ON2 0x8 #define ID_LED_OFF1_OFF2 0x9 #define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF #define IGP_ACTIVITY_LED_ENABLE 0x0300 #define IGP_LED3_MODE 0x07000000 /* PCI/PCI-X/PCI-EX Config space */ #define PCIX_COMMAND_REGISTER 0xE6 #define PCIX_STATUS_REGISTER_LO 0xE8 #define PCIX_STATUS_REGISTER_HI 0xEA #define PCI_HEADER_TYPE_REGISTER 0x0E #define PCIE_LINK_STATUS 0x12 #define PCIE_DEVICE_CONTROL2 0x28 #define PCIX_COMMAND_MMRBC_MASK 0x000C #define PCIX_COMMAND_MMRBC_SHIFT 0x2 #define PCIX_STATUS_HI_MMRBC_MASK 0x0060 #define PCIX_STATUS_HI_MMRBC_SHIFT 0x5 #define PCIX_STATUS_HI_MMRBC_4K 0x3 #define PCIX_STATUS_HI_MMRBC_2K 0x2 #define PCIX_STATUS_LO_FUNC_MASK 0x7 #define PCI_HEADER_TYPE_MULTIFUNC 0x80 #define PCIE_LINK_WIDTH_MASK 0x3F0 #define PCIE_LINK_WIDTH_SHIFT 4 #define PCIE_LINK_SPEED_MASK 0x0F #define PCIE_LINK_SPEED_2500 0x01 #define PCIE_LINK_SPEED_5000 0x02 #define PCIE_DEVICE_CONTROL2_16ms 0x0005 #ifndef ETH_ADDR_LEN #define ETH_ADDR_LEN 6 #endif #define PHY_REVISION_MASK 0xFFFFFFF0 #define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ #define MAX_PHY_MULTI_PAGE_REG 0xF /* Bit definitions for valid PHY IDs. * I = Integrated * E = External */ #define M88E1000_E_PHY_ID 0x01410C50 #define M88E1000_I_PHY_ID 0x01410C30 #define M88E1011_I_PHY_ID 0x01410C20 #define IGP01E1000_I_PHY_ID 0x02A80380 #define M88E1111_I_PHY_ID 0x01410CC0 #define M88E1543_E_PHY_ID 0x01410EA0 #define M88E1512_E_PHY_ID 0x01410DD0 #define M88E1112_E_PHY_ID 0x01410C90 #define I347AT4_E_PHY_ID 0x01410DC0 #define M88E1340M_E_PHY_ID 0x01410DF0 #define GG82563_E_PHY_ID 0x01410CA0 #define IGP03E1000_E_PHY_ID 0x02A80390 #define IFE_E_PHY_ID 0x02A80330 #define IFE_PLUS_E_PHY_ID 0x02A80320 #define IFE_C_E_PHY_ID 0x02A80310 #define BME1000_E_PHY_ID 0x01410CB0 #define BME1000_E_PHY_ID_R2 0x01410CB1 #define I82577_E_PHY_ID 0x01540050 #define I82578_E_PHY_ID 0x004DD040 #define I82579_E_PHY_ID 0x01540090 #define I217_E_PHY_ID 0x015400A0 #define I82580_I_PHY_ID 0x015403A0 #define I350_I_PHY_ID 0x015403B0 #define I210_I_PHY_ID 0x01410C00 #define IGP04E1000_E_PHY_ID 0x02A80391 #define M88_VENDOR 0x0141 /* M88E1000 Specific Registers */ #define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Reg */ #define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Reg */ #define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Cntrl */ #define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */ #define M88E1000_PHY_EXT_CTRL 0x1A /* PHY extend control register */ #define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for pg number setting */ #define M88E1000_PHY_GEN_CONTROL 0x1E /* meaning depends on reg 29 */ #define M88E1000_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */ #define M88E1000_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */ /* M88E1000 PHY Specific Control Register */ #define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reverse enabled */ /* MDI Crossover Mode bits 6:5 Manual MDI configuration */ #define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 #define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ /* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */ #define M88E1000_PSCR_AUTO_X_1000T 0x0040 /* Auto crossover enabled all speeds */ #define M88E1000_PSCR_AUTO_X_MODE 0x0060 #define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Tx */ /* M88E1000 PHY Specific Status Register */ #define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ #define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ #define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ /* 0 = <50M * 1 = 50-80M * 2 = 80-110M * 3 = 110-140M * 4 = >140M */ #define M88E1000_PSSR_CABLE_LENGTH 0x0380 #define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */ #define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ #define M88E1000_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */ #define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ #define M88E1000_PSSR_100MBS 0x4000 /* 01=100Mbs */ #define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ #define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 /* Number of times we will attempt to autonegotiate before downshifting if we * are the master */ #define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 #define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 /* Number of times we will attempt to autonegotiate before downshifting if we * are the slave */ #define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 #define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 #define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ /* Intel I347AT4 Registers */ #define I347AT4_PCDL 0x10 /* PHY Cable Diagnostics Length */ #define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */ #define I347AT4_PAGE_SELECT 0x16 /* I347AT4 Extended PHY Specific Control Register */ /* Number of times we will attempt to autonegotiate before downshifting if we * are the master */ #define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800 #define I347AT4_PSCR_DOWNSHIFT_MASK 0x7000 #define I347AT4_PSCR_DOWNSHIFT_1X 0x0000 #define I347AT4_PSCR_DOWNSHIFT_2X 0x1000 #define I347AT4_PSCR_DOWNSHIFT_3X 0x2000 #define I347AT4_PSCR_DOWNSHIFT_4X 0x3000 #define I347AT4_PSCR_DOWNSHIFT_5X 0x4000 #define I347AT4_PSCR_DOWNSHIFT_6X 0x5000 #define I347AT4_PSCR_DOWNSHIFT_7X 0x6000 #define I347AT4_PSCR_DOWNSHIFT_8X 0x7000 /* I347AT4 PHY Cable Diagnostics Control */ #define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */ /* M88E1112 only registers */ #define M88E1112_VCT_DSP_DISTANCE 0x001A /* M88EC018 Rev 2 specific DownShift settings */ #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 #define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 #define I82578_EPSCR_DOWNSHIFT_ENABLE 0x0020 #define I82578_EPSCR_DOWNSHIFT_COUNTER_MASK 0x001C /* BME1000 PHY Specific Control Register */ #define BME1000_PSCR_ENABLE_DOWNSHIFT 0x0800 /* 1 = enable downshift */ /* Bits... * 15-5: page * 4-0: register offset */ #define GG82563_PAGE_SHIFT 5 #define GG82563_REG(page, reg) \ (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) #define GG82563_MIN_ALT_REG 30 /* GG82563 Specific Registers */ #define GG82563_PHY_SPEC_CTRL GG82563_REG(0, 16) /* PHY Spec Cntrl */ #define GG82563_PHY_PAGE_SELECT GG82563_REG(0, 22) /* Page Select */ #define GG82563_PHY_SPEC_CTRL_2 GG82563_REG(0, 26) /* PHY Spec Cntrl2 */ #define GG82563_PHY_PAGE_SELECT_ALT GG82563_REG(0, 29) /* Alt Page Select */ /* MAC Specific Control Register */ #define GG82563_PHY_MAC_SPEC_CTRL GG82563_REG(2, 21) #define GG82563_PHY_DSP_DISTANCE GG82563_REG(5, 26) /* DSP Distance */ /* Page 193 - Port Control Registers */ /* Kumeran Mode Control */ #define GG82563_PHY_KMRN_MODE_CTRL GG82563_REG(193, 16) #define GG82563_PHY_PWR_MGMT_CTRL GG82563_REG(193, 20) /* Pwr Mgt Ctrl */ /* Page 194 - KMRN Registers */ #define GG82563_PHY_INBAND_CTRL GG82563_REG(194, 18) /* Inband Ctrl */ /* MDI Control */ #define E1000_MDIC_REG_MASK 0x001F0000 #define E1000_MDIC_REG_SHIFT 16 #define E1000_MDIC_PHY_MASK 0x03E00000 #define E1000_MDIC_PHY_SHIFT 21 #define E1000_MDIC_OP_WRITE 0x04000000 #define E1000_MDIC_OP_READ 0x08000000 #define E1000_MDIC_READY 0x10000000 #define E1000_MDIC_ERROR 0x40000000 #define E1000_MDIC_DEST 0x80000000 /* SerDes Control */ #define E1000_GEN_CTL_READY 0x80000000 #define E1000_GEN_CTL_ADDRESS_SHIFT 8 #define E1000_GEN_POLL_TIMEOUT 640 /* LinkSec register fields */ #define E1000_LSECTXCAP_SUM_MASK 0x00FF0000 #define E1000_LSECTXCAP_SUM_SHIFT 16 #define E1000_LSECRXCAP_SUM_MASK 0x00FF0000 #define E1000_LSECRXCAP_SUM_SHIFT 16 #define E1000_LSECTXCTRL_EN_MASK 0x00000003 #define E1000_LSECTXCTRL_DISABLE 0x0 #define E1000_LSECTXCTRL_AUTH 0x1 #define E1000_LSECTXCTRL_AUTH_ENCRYPT 0x2 #define E1000_LSECTXCTRL_AISCI 0x00000020 #define E1000_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00 #define E1000_LSECTXCTRL_RSV_MASK 0x000000D8 #define E1000_LSECRXCTRL_EN_MASK 0x0000000C #define E1000_LSECRXCTRL_EN_SHIFT 2 #define E1000_LSECRXCTRL_DISABLE 0x0 #define E1000_LSECRXCTRL_CHECK 0x1 #define E1000_LSECRXCTRL_STRICT 0x2 #define E1000_LSECRXCTRL_DROP 0x3 #define E1000_LSECRXCTRL_PLSH 0x00000040 #define E1000_LSECRXCTRL_RP 0x00000080 #define E1000_LSECRXCTRL_RSV_MASK 0xFFFFFF33 /* Tx Rate-Scheduler Config fields */ #define E1000_RTTBCNRC_RS_ENA 0x80000000 #define E1000_RTTBCNRC_RF_DEC_MASK 0x00003FFF #define E1000_RTTBCNRC_RF_INT_SHIFT 14 #define E1000_RTTBCNRC_RF_INT_MASK \ (E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT) /* DMA Coalescing register fields */ /* DMA Coalescing Watchdog Timer */ #define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coalescing Rx Threshold */ #define E1000_DMACR_DMACTHR_MASK 0x00FF0000 #define E1000_DMACR_DMACTHR_SHIFT 16 /* Lx when no PCIe transactions */ #define E1000_DMACR_DMAC_LX_MASK 0x30000000 #define E1000_DMACR_DMAC_LX_SHIFT 28 #define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */ /* DMA Coalescing BMC-to-OS Watchdog Enable */ #define E1000_DMACR_DC_BMC2OSW_EN 0x00008000 /* DMA Coalescing Transmit Threshold */ #define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF #define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */ /* Rx Traffic Rate Threshold */ #define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Rx packet rate in current window */ #define E1000_DMCRTRH_LRPRCW 0x80000000 /* DMA Coal Rx Traffic Current Count */ #define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* Flow ctrl Rx Threshold High val */ #define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 #define E1000_FCRTC_RTH_COAL_SHIFT 4 /* Lx power decision based on DMA coal */ #define E1000_PCIEMISC_LX_DECISION 0x00000080 #define E1000_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */ #define E1000_RXPBS_SIZE_I210_MASK 0x0000003F /* Rx packet buffer size */ #define E1000_TXPB0S_SIZE_I210_MASK 0x0000003F /* Tx packet buffer 0 size */ #define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */ #define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ #define E1000_DOBFFCTL_OBFFTHR_MASK 0x000000FF /* OBFF threshold */ #define E1000_DOBFFCTL_EXIT_ACT_MASK 0x01000000 /* Exit active CB */ /* Proxy Filter Control */ #define E1000_PROXYFC_D0 0x00000001 /* Enable offload in D0 */ #define E1000_PROXYFC_EX 0x00000004 /* Directed exact proxy */ #define E1000_PROXYFC_MC 0x00000008 /* Directed MC Proxy */ #define E1000_PROXYFC_BC 0x00000010 /* Broadcast Proxy Enable */ #define E1000_PROXYFC_ARP_DIRECTED 0x00000020 /* Directed ARP Proxy Ena */ #define E1000_PROXYFC_IPV4 0x00000040 /* Directed IPv4 Enable */ #define E1000_PROXYFC_IPV6 0x00000080 /* Directed IPv6 Enable */ #define E1000_PROXYFC_NS 0x00000200 /* IPv6 Neighbor Solicitation */ #define E1000_PROXYFC_ARP 0x00000800 /* ARP Request Proxy Ena */ /* Proxy Status */ #define E1000_PROXYS_CLEAR 0xFFFFFFFF /* Clear */ /* Firmware Status */ #define E1000_FWSTS_FWRI 0x80000000 /* FW Reset Indication */ /* VF Control */ #define E1000_VTCTRL_RST 0x04000000 /* Reset VF */ #define E1000_STATUS_LAN_ID_MASK 0x00000000C /* Mask for Lan ID field */ /* Lan ID bit field offset in status register */ #define E1000_STATUS_LAN_ID_OFFSET 2 #define E1000_VFTA_ENTRIES 128 #define E1000_UNUSEDARG #ifndef ERROR_REPORT #define ERROR_REPORT(fmt) do { } while (0) #endif /* ERROR_REPORT */ #endif /* _E1000_DEFINES_H_ */ Index: stable/10/sys/dev/e1000/e1000_hw.h =================================================================== --- stable/10/sys/dev/e1000/e1000_hw.h (revision 296054) +++ stable/10/sys/dev/e1000/e1000_hw.h (revision 296055) @@ -1,1026 +1,1032 @@ /****************************************************************************** Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _E1000_HW_H_ #define _E1000_HW_H_ #include "e1000_osdep.h" #include "e1000_regs.h" #include "e1000_defines.h" struct e1000_hw; #define E1000_DEV_ID_82542 0x1000 #define E1000_DEV_ID_82543GC_FIBER 0x1001 #define E1000_DEV_ID_82543GC_COPPER 0x1004 #define E1000_DEV_ID_82544EI_COPPER 0x1008 #define E1000_DEV_ID_82544EI_FIBER 0x1009 #define E1000_DEV_ID_82544GC_COPPER 0x100C #define E1000_DEV_ID_82544GC_LOM 0x100D #define E1000_DEV_ID_82540EM 0x100E #define E1000_DEV_ID_82540EM_LOM 0x1015 #define E1000_DEV_ID_82540EP_LOM 0x1016 #define E1000_DEV_ID_82540EP 0x1017 #define E1000_DEV_ID_82540EP_LP 0x101E #define E1000_DEV_ID_82545EM_COPPER 0x100F #define E1000_DEV_ID_82545EM_FIBER 0x1011 #define E1000_DEV_ID_82545GM_COPPER 0x1026 #define E1000_DEV_ID_82545GM_FIBER 0x1027 #define E1000_DEV_ID_82545GM_SERDES 0x1028 #define E1000_DEV_ID_82546EB_COPPER 0x1010 #define E1000_DEV_ID_82546EB_FIBER 0x1012 #define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D #define E1000_DEV_ID_82546GB_COPPER 0x1079 #define E1000_DEV_ID_82546GB_FIBER 0x107A #define E1000_DEV_ID_82546GB_SERDES 0x107B #define E1000_DEV_ID_82546GB_PCIE 0x108A #define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099 #define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 #define E1000_DEV_ID_82541EI 0x1013 #define E1000_DEV_ID_82541EI_MOBILE 0x1018 #define E1000_DEV_ID_82541ER_LOM 0x1014 #define E1000_DEV_ID_82541ER 0x1078 #define E1000_DEV_ID_82541GI 0x1076 #define E1000_DEV_ID_82541GI_LF 0x107C #define E1000_DEV_ID_82541GI_MOBILE 0x1077 #define E1000_DEV_ID_82547EI 0x1019 #define E1000_DEV_ID_82547EI_MOBILE 0x101A #define E1000_DEV_ID_82547GI 0x1075 #define E1000_DEV_ID_82571EB_COPPER 0x105E #define E1000_DEV_ID_82571EB_FIBER 0x105F #define E1000_DEV_ID_82571EB_SERDES 0x1060 #define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9 #define E1000_DEV_ID_82571EB_SERDES_QUAD 0x10DA #define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4 #define E1000_DEV_ID_82571PT_QUAD_COPPER 0x10D5 #define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5 #define E1000_DEV_ID_82571EB_QUAD_COPPER_LP 0x10BC #define E1000_DEV_ID_82572EI_COPPER 0x107D #define E1000_DEV_ID_82572EI_FIBER 0x107E #define E1000_DEV_ID_82572EI_SERDES 0x107F #define E1000_DEV_ID_82572EI 0x10B9 #define E1000_DEV_ID_82573E 0x108B #define E1000_DEV_ID_82573E_IAMT 0x108C #define E1000_DEV_ID_82573L 0x109A #define E1000_DEV_ID_82574L 0x10D3 #define E1000_DEV_ID_82574LA 0x10F6 #define E1000_DEV_ID_82583V 0x150C #define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096 #define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098 #define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA #define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB #define E1000_DEV_ID_ICH8_82567V_3 0x1501 #define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049 #define E1000_DEV_ID_ICH8_IGP_AMT 0x104A #define E1000_DEV_ID_ICH8_IGP_C 0x104B #define E1000_DEV_ID_ICH8_IFE 0x104C #define E1000_DEV_ID_ICH8_IFE_GT 0x10C4 #define E1000_DEV_ID_ICH8_IFE_G 0x10C5 #define E1000_DEV_ID_ICH8_IGP_M 0x104D #define E1000_DEV_ID_ICH9_IGP_M 0x10BF #define E1000_DEV_ID_ICH9_IGP_M_AMT 0x10F5 #define E1000_DEV_ID_ICH9_IGP_M_V 0x10CB #define E1000_DEV_ID_ICH9_IGP_AMT 0x10BD #define E1000_DEV_ID_ICH9_BM 0x10E5 #define E1000_DEV_ID_ICH9_IGP_C 0x294C #define E1000_DEV_ID_ICH9_IFE 0x10C0 #define E1000_DEV_ID_ICH9_IFE_GT 0x10C3 #define E1000_DEV_ID_ICH9_IFE_G 0x10C2 #define E1000_DEV_ID_ICH10_R_BM_LM 0x10CC #define E1000_DEV_ID_ICH10_R_BM_LF 0x10CD #define E1000_DEV_ID_ICH10_R_BM_V 0x10CE #define E1000_DEV_ID_ICH10_D_BM_LM 0x10DE #define E1000_DEV_ID_ICH10_D_BM_LF 0x10DF #define E1000_DEV_ID_ICH10_D_BM_V 0x1525 #define E1000_DEV_ID_PCH_M_HV_LM 0x10EA #define E1000_DEV_ID_PCH_M_HV_LC 0x10EB #define E1000_DEV_ID_PCH_D_HV_DM 0x10EF #define E1000_DEV_ID_PCH_D_HV_DC 0x10F0 #define E1000_DEV_ID_PCH2_LV_LM 0x1502 #define E1000_DEV_ID_PCH2_LV_V 0x1503 #define E1000_DEV_ID_PCH_LPT_I217_LM 0x153A #define E1000_DEV_ID_PCH_LPT_I217_V 0x153B #define E1000_DEV_ID_PCH_LPTLP_I218_LM 0x155A #define E1000_DEV_ID_PCH_LPTLP_I218_V 0x1559 #define E1000_DEV_ID_PCH_I218_LM2 0x15A0 #define E1000_DEV_ID_PCH_I218_V2 0x15A1 #define E1000_DEV_ID_PCH_I218_LM3 0x15A2 /* Wildcat Point PCH */ #define E1000_DEV_ID_PCH_I218_V3 0x15A3 /* Wildcat Point PCH */ +#define E1000_DEV_ID_PCH_SPT_I219_LM 0x156F /* Sunrise Point PCH */ +#define E1000_DEV_ID_PCH_SPT_I219_V 0x1570 /* Sunrise Point PCH */ +#define E1000_DEV_ID_PCH_SPT_I219_LM2 0x15B7 /* Sunrise Point-H PCH */ +#define E1000_DEV_ID_PCH_SPT_I219_V2 0x15B8 /* Sunrise Point-H PCH */ +#define E1000_DEV_ID_PCH_LBG_I219_LM3 0x15B9 /* LEWISBURG PCH */ #define E1000_DEV_ID_82576 0x10C9 #define E1000_DEV_ID_82576_FIBER 0x10E6 #define E1000_DEV_ID_82576_SERDES 0x10E7 #define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8 #define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526 #define E1000_DEV_ID_82576_NS 0x150A #define E1000_DEV_ID_82576_NS_SERDES 0x1518 #define E1000_DEV_ID_82576_SERDES_QUAD 0x150D #define E1000_DEV_ID_82576_VF 0x10CA #define E1000_DEV_ID_82576_VF_HV 0x152D #define E1000_DEV_ID_I350_VF 0x1520 #define E1000_DEV_ID_I350_VF_HV 0x152F #define E1000_DEV_ID_82575EB_COPPER 0x10A7 #define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9 #define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6 #define E1000_DEV_ID_82580_COPPER 0x150E #define E1000_DEV_ID_82580_FIBER 0x150F #define E1000_DEV_ID_82580_SERDES 0x1510 #define E1000_DEV_ID_82580_SGMII 0x1511 #define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 #define E1000_DEV_ID_82580_QUAD_FIBER 0x1527 #define E1000_DEV_ID_I350_COPPER 0x1521 #define E1000_DEV_ID_I350_FIBER 0x1522 #define E1000_DEV_ID_I350_SERDES 0x1523 #define E1000_DEV_ID_I350_SGMII 0x1524 #define E1000_DEV_ID_I350_DA4 0x1546 #define E1000_DEV_ID_I210_COPPER 0x1533 #define E1000_DEV_ID_I210_COPPER_OEM1 0x1534 #define E1000_DEV_ID_I210_COPPER_IT 0x1535 #define E1000_DEV_ID_I210_FIBER 0x1536 #define E1000_DEV_ID_I210_SERDES 0x1537 #define E1000_DEV_ID_I210_SGMII 0x1538 #define E1000_DEV_ID_I210_COPPER_FLASHLESS 0x157B #define E1000_DEV_ID_I210_SERDES_FLASHLESS 0x157C #define E1000_DEV_ID_I211_COPPER 0x1539 #define E1000_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40 #define E1000_DEV_ID_I354_SGMII 0x1F41 #define E1000_DEV_ID_I354_BACKPLANE_2_5GBPS 0x1F45 #define E1000_DEV_ID_DH89XXCC_SGMII 0x0438 #define E1000_DEV_ID_DH89XXCC_SERDES 0x043A #define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C #define E1000_DEV_ID_DH89XXCC_SFP 0x0440 #define E1000_REVISION_0 0 #define E1000_REVISION_1 1 #define E1000_REVISION_2 2 #define E1000_REVISION_3 3 #define E1000_REVISION_4 4 #define E1000_FUNC_0 0 #define E1000_FUNC_1 1 #define E1000_FUNC_2 2 #define E1000_FUNC_3 3 #define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0 #define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3 #define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6 #define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9 enum e1000_mac_type { e1000_undefined = 0, e1000_82542, e1000_82543, e1000_82544, e1000_82540, e1000_82545, e1000_82545_rev_3, e1000_82546, e1000_82546_rev_3, e1000_82541, e1000_82541_rev_2, e1000_82547, e1000_82547_rev_2, e1000_82571, e1000_82572, e1000_82573, e1000_82574, e1000_82583, e1000_80003es2lan, e1000_ich8lan, e1000_ich9lan, e1000_ich10lan, e1000_pchlan, e1000_pch2lan, e1000_pch_lpt, + e1000_pch_spt, e1000_82575, e1000_82576, e1000_82580, e1000_i350, e1000_i354, e1000_i210, e1000_i211, e1000_vfadapt, e1000_vfadapt_i350, e1000_num_macs /* List is 1-based, so subtract 1 for TRUE count. */ }; enum e1000_media_type { e1000_media_type_unknown = 0, e1000_media_type_copper = 1, e1000_media_type_fiber = 2, e1000_media_type_internal_serdes = 3, e1000_num_media_types }; enum e1000_nvm_type { e1000_nvm_unknown = 0, e1000_nvm_none, e1000_nvm_eeprom_spi, e1000_nvm_eeprom_microwire, e1000_nvm_flash_hw, e1000_nvm_invm, e1000_nvm_flash_sw }; enum e1000_nvm_override { e1000_nvm_override_none = 0, e1000_nvm_override_spi_small, e1000_nvm_override_spi_large, e1000_nvm_override_microwire_small, e1000_nvm_override_microwire_large }; enum e1000_phy_type { e1000_phy_unknown = 0, e1000_phy_none, e1000_phy_m88, e1000_phy_igp, e1000_phy_igp_2, e1000_phy_gg82563, e1000_phy_igp_3, e1000_phy_ife, e1000_phy_bm, e1000_phy_82578, e1000_phy_82577, e1000_phy_82579, e1000_phy_i217, e1000_phy_82580, e1000_phy_vf, e1000_phy_i210, }; enum e1000_bus_type { e1000_bus_type_unknown = 0, e1000_bus_type_pci, e1000_bus_type_pcix, e1000_bus_type_pci_express, e1000_bus_type_reserved }; enum e1000_bus_speed { e1000_bus_speed_unknown = 0, e1000_bus_speed_33, e1000_bus_speed_66, e1000_bus_speed_100, e1000_bus_speed_120, e1000_bus_speed_133, e1000_bus_speed_2500, e1000_bus_speed_5000, e1000_bus_speed_reserved }; enum e1000_bus_width { e1000_bus_width_unknown = 0, e1000_bus_width_pcie_x1, e1000_bus_width_pcie_x2, e1000_bus_width_pcie_x4 = 4, e1000_bus_width_pcie_x8 = 8, e1000_bus_width_32, e1000_bus_width_64, e1000_bus_width_reserved }; enum e1000_1000t_rx_status { e1000_1000t_rx_status_not_ok = 0, e1000_1000t_rx_status_ok, e1000_1000t_rx_status_undefined = 0xFF }; enum e1000_rev_polarity { e1000_rev_polarity_normal = 0, e1000_rev_polarity_reversed, e1000_rev_polarity_undefined = 0xFF }; enum e1000_fc_mode { e1000_fc_none = 0, e1000_fc_rx_pause, e1000_fc_tx_pause, e1000_fc_full, e1000_fc_default = 0xFF }; enum e1000_ffe_config { e1000_ffe_config_enabled = 0, e1000_ffe_config_active, e1000_ffe_config_blocked }; enum e1000_dsp_config { e1000_dsp_config_disabled = 0, e1000_dsp_config_enabled, e1000_dsp_config_activated, e1000_dsp_config_undefined = 0xFF }; enum e1000_ms_type { e1000_ms_hw_default = 0, e1000_ms_force_master, e1000_ms_force_slave, e1000_ms_auto }; enum e1000_smart_speed { e1000_smart_speed_default = 0, e1000_smart_speed_on, e1000_smart_speed_off }; enum e1000_serdes_link_state { e1000_serdes_link_down = 0, e1000_serdes_link_autoneg_progress, e1000_serdes_link_autoneg_complete, e1000_serdes_link_forced_up }; #define __le16 u16 #define __le32 u32 #define __le64 u64 /* Receive Descriptor */ struct e1000_rx_desc { __le64 buffer_addr; /* Address of the descriptor's data buffer */ __le16 length; /* Length of data DMAed into data buffer */ __le16 csum; /* Packet checksum */ u8 status; /* Descriptor status */ u8 errors; /* Descriptor Errors */ __le16 special; }; /* Receive Descriptor - Extended */ union e1000_rx_desc_extended { struct { __le64 buffer_addr; __le64 reserved; } read; struct { struct { __le32 mrq; /* Multiple Rx Queues */ union { __le32 rss; /* RSS Hash */ struct { __le16 ip_id; /* IP id */ __le16 csum; /* Packet Checksum */ } csum_ip; } hi_dword; } lower; struct { __le32 status_error; /* ext status/error */ __le16 length; __le16 vlan; /* VLAN tag */ } upper; } wb; /* writeback */ }; #define MAX_PS_BUFFERS 4 /* Number of packet split data buffers (not including the header buffer) */ #define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) /* Receive Descriptor - Packet Split */ union e1000_rx_desc_packet_split { struct { /* one buffer for protocol header(s), three data buffers */ __le64 buffer_addr[MAX_PS_BUFFERS]; } read; struct { struct { __le32 mrq; /* Multiple Rx Queues */ union { __le32 rss; /* RSS Hash */ struct { __le16 ip_id; /* IP id */ __le16 csum; /* Packet Checksum */ } csum_ip; } hi_dword; } lower; struct { __le32 status_error; /* ext status/error */ __le16 length0; /* length of buffer 0 */ __le16 vlan; /* VLAN tag */ } middle; struct { __le16 header_status; /* length of buffers 1-3 */ __le16 length[PS_PAGE_BUFFERS]; } upper; __le64 reserved; } wb; /* writeback */ }; /* Transmit Descriptor */ struct e1000_tx_desc { __le64 buffer_addr; /* Address of the descriptor's data buffer */ union { __le32 data; struct { __le16 length; /* Data buffer length */ u8 cso; /* Checksum offset */ u8 cmd; /* Descriptor control */ } flags; } lower; union { __le32 data; struct { u8 status; /* Descriptor status */ u8 css; /* Checksum start */ __le16 special; } fields; } upper; }; /* Offload Context Descriptor */ struct e1000_context_desc { union { __le32 ip_config; struct { u8 ipcss; /* IP checksum start */ u8 ipcso; /* IP checksum offset */ __le16 ipcse; /* IP checksum end */ } ip_fields; } lower_setup; union { __le32 tcp_config; struct { u8 tucss; /* TCP checksum start */ u8 tucso; /* TCP checksum offset */ __le16 tucse; /* TCP checksum end */ } tcp_fields; } upper_setup; __le32 cmd_and_length; union { __le32 data; struct { u8 status; /* Descriptor status */ u8 hdr_len; /* Header length */ __le16 mss; /* Maximum segment size */ } fields; } tcp_seg_setup; }; /* Offload data descriptor */ struct e1000_data_desc { __le64 buffer_addr; /* Address of the descriptor's buffer address */ union { __le32 data; struct { __le16 length; /* Data buffer length */ u8 typ_len_ext; u8 cmd; } flags; } lower; union { __le32 data; struct { u8 status; /* Descriptor status */ u8 popts; /* Packet Options */ __le16 special; } fields; } upper; }; /* Statistics counters collected by the MAC */ struct e1000_hw_stats { u64 crcerrs; u64 algnerrc; u64 symerrs; u64 rxerrc; u64 mpc; u64 scc; u64 ecol; u64 mcc; u64 latecol; u64 colc; u64 dc; u64 tncrs; u64 sec; u64 cexterr; u64 rlec; u64 xonrxc; u64 xontxc; u64 xoffrxc; u64 xofftxc; u64 fcruc; u64 prc64; u64 prc127; u64 prc255; u64 prc511; u64 prc1023; u64 prc1522; u64 gprc; u64 bprc; u64 mprc; u64 gptc; u64 gorc; u64 gotc; u64 rnbc; u64 ruc; u64 rfc; u64 roc; u64 rjc; u64 mgprc; u64 mgpdc; u64 mgptc; u64 tor; u64 tot; u64 tpr; u64 tpt; u64 ptc64; u64 ptc127; u64 ptc255; u64 ptc511; u64 ptc1023; u64 ptc1522; u64 mptc; u64 bptc; u64 tsctc; u64 tsctfc; u64 iac; u64 icrxptc; u64 icrxatc; u64 ictxptc; u64 ictxatc; u64 ictxqec; u64 ictxqmtc; u64 icrxdmtc; u64 icrxoc; u64 cbtmpc; u64 htdpmc; u64 cbrdpc; u64 cbrmpc; u64 rpthc; u64 hgptc; u64 htcbdpc; u64 hgorc; u64 hgotc; u64 lenerrs; u64 scvpc; u64 hrmpc; u64 doosync; u64 o2bgptc; u64 o2bspc; u64 b2ospc; u64 b2ogprc; }; struct e1000_vf_stats { u64 base_gprc; u64 base_gptc; u64 base_gorc; u64 base_gotc; u64 base_mprc; u64 base_gotlbc; u64 base_gptlbc; u64 base_gorlbc; u64 base_gprlbc; u32 last_gprc; u32 last_gptc; u32 last_gorc; u32 last_gotc; u32 last_mprc; u32 last_gotlbc; u32 last_gptlbc; u32 last_gorlbc; u32 last_gprlbc; u64 gprc; u64 gptc; u64 gorc; u64 gotc; u64 mprc; u64 gotlbc; u64 gptlbc; u64 gorlbc; u64 gprlbc; }; struct e1000_phy_stats { u32 idle_errors; u32 receive_errors; }; struct e1000_host_mng_dhcp_cookie { u32 signature; u8 status; u8 reserved0; u16 vlan_id; u32 reserved1; u16 reserved2; u8 reserved3; u8 checksum; }; /* Host Interface "Rev 1" */ struct e1000_host_command_header { u8 command_id; u8 command_length; u8 command_options; u8 checksum; }; #define E1000_HI_MAX_DATA_LENGTH 252 struct e1000_host_command_info { struct e1000_host_command_header command_header; u8 command_data[E1000_HI_MAX_DATA_LENGTH]; }; /* Host Interface "Rev 2" */ struct e1000_host_mng_command_header { u8 command_id; u8 checksum; u16 reserved1; u16 reserved2; u16 command_length; }; #define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 struct e1000_host_mng_command_info { struct e1000_host_mng_command_header command_header; u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; }; #include "e1000_mac.h" #include "e1000_phy.h" #include "e1000_nvm.h" #include "e1000_manage.h" #include "e1000_mbx.h" /* Function pointers for the MAC. */ struct e1000_mac_operations { s32 (*init_params)(struct e1000_hw *); s32 (*id_led_init)(struct e1000_hw *); s32 (*blink_led)(struct e1000_hw *); bool (*check_mng_mode)(struct e1000_hw *); s32 (*check_for_link)(struct e1000_hw *); s32 (*cleanup_led)(struct e1000_hw *); void (*clear_hw_cntrs)(struct e1000_hw *); void (*clear_vfta)(struct e1000_hw *); s32 (*get_bus_info)(struct e1000_hw *); void (*set_lan_id)(struct e1000_hw *); s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); s32 (*led_on)(struct e1000_hw *); s32 (*led_off)(struct e1000_hw *); void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32); s32 (*reset_hw)(struct e1000_hw *); s32 (*init_hw)(struct e1000_hw *); void (*shutdown_serdes)(struct e1000_hw *); void (*power_up_serdes)(struct e1000_hw *); s32 (*setup_link)(struct e1000_hw *); s32 (*setup_physical_interface)(struct e1000_hw *); s32 (*setup_led)(struct e1000_hw *); void (*write_vfta)(struct e1000_hw *, u32, u32); void (*config_collision_dist)(struct e1000_hw *); int (*rar_set)(struct e1000_hw *, u8*, u32); s32 (*read_mac_addr)(struct e1000_hw *); s32 (*validate_mdi_setting)(struct e1000_hw *); s32 (*set_obff_timer)(struct e1000_hw *, u32); s32 (*acquire_swfw_sync)(struct e1000_hw *, u16); void (*release_swfw_sync)(struct e1000_hw *, u16); }; /* When to use various PHY register access functions: * * Func Caller * Function Does Does When to use * ~~~~~~~~~~~~ ~~~~~ ~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * X_reg L,P,A n/a for simple PHY reg accesses * X_reg_locked P,A L for multiple accesses of different regs * on different pages * X_reg_page A L,P for multiple accesses of different regs * on the same page * * Where X=[read|write], L=locking, P=sets page, A=register access * */ struct e1000_phy_operations { s32 (*init_params)(struct e1000_hw *); s32 (*acquire)(struct e1000_hw *); s32 (*cfg_on_link_up)(struct e1000_hw *); s32 (*check_polarity)(struct e1000_hw *); s32 (*check_reset_block)(struct e1000_hw *); s32 (*commit)(struct e1000_hw *); s32 (*force_speed_duplex)(struct e1000_hw *); s32 (*get_cfg_done)(struct e1000_hw *hw); s32 (*get_cable_length)(struct e1000_hw *); s32 (*get_info)(struct e1000_hw *); s32 (*set_page)(struct e1000_hw *, u16); s32 (*read_reg)(struct e1000_hw *, u32, u16 *); s32 (*read_reg_locked)(struct e1000_hw *, u32, u16 *); s32 (*read_reg_page)(struct e1000_hw *, u32, u16 *); void (*release)(struct e1000_hw *); s32 (*reset)(struct e1000_hw *); s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); s32 (*write_reg)(struct e1000_hw *, u32, u16); s32 (*write_reg_locked)(struct e1000_hw *, u32, u16); s32 (*write_reg_page)(struct e1000_hw *, u32, u16); void (*power_up)(struct e1000_hw *); void (*power_down)(struct e1000_hw *); s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *); s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8); }; /* Function pointers for the NVM. */ struct e1000_nvm_operations { s32 (*init_params)(struct e1000_hw *); s32 (*acquire)(struct e1000_hw *); s32 (*read)(struct e1000_hw *, u16, u16, u16 *); void (*release)(struct e1000_hw *); void (*reload)(struct e1000_hw *); s32 (*update)(struct e1000_hw *); s32 (*valid_led_default)(struct e1000_hw *, u16 *); s32 (*validate)(struct e1000_hw *); s32 (*write)(struct e1000_hw *, u16, u16, u16 *); }; struct e1000_mac_info { struct e1000_mac_operations ops; u8 addr[ETH_ADDR_LEN]; u8 perm_addr[ETH_ADDR_LEN]; enum e1000_mac_type type; u32 collision_delta; u32 ledctl_default; u32 ledctl_mode1; u32 ledctl_mode2; u32 mc_filter_type; u32 tx_packet_delta; u32 txcw; u16 current_ifs_val; u16 ifs_max_val; u16 ifs_min_val; u16 ifs_ratio; u16 ifs_step_size; u16 mta_reg_count; u16 uta_reg_count; /* Maximum size of the MTA register table in all supported adapters */ #define MAX_MTA_REG 128 u32 mta_shadow[MAX_MTA_REG]; u16 rar_entry_count; u8 forced_speed_duplex; bool adaptive_ifs; bool has_fwsm; bool arc_subsystem_valid; bool asf_firmware_present; bool autoneg; bool autoneg_failed; bool get_link_status; bool in_ifs_mode; bool report_tx_early; enum e1000_serdes_link_state serdes_link_state; bool serdes_has_link; bool tx_pkt_filtering; - u32 max_frame_size; + u32 max_frame_size; }; struct e1000_phy_info { struct e1000_phy_operations ops; enum e1000_phy_type type; enum e1000_1000t_rx_status local_rx; enum e1000_1000t_rx_status remote_rx; enum e1000_ms_type ms_type; enum e1000_ms_type original_ms_type; enum e1000_rev_polarity cable_polarity; enum e1000_smart_speed smart_speed; u32 addr; u32 id; u32 reset_delay_us; /* in usec */ u32 revision; enum e1000_media_type media_type; u16 autoneg_advertised; u16 autoneg_mask; u16 cable_length; u16 max_cable_length; u16 min_cable_length; u8 mdix; bool disable_polarity_correction; bool is_mdix; bool polarity_correction; bool speed_downgraded; bool autoneg_wait_to_complete; }; struct e1000_nvm_info { struct e1000_nvm_operations ops; enum e1000_nvm_type type; enum e1000_nvm_override override; u32 flash_bank_size; u32 flash_base_addr; u16 word_size; u16 delay_usec; u16 address_bits; u16 opcode_bits; u16 page_size; }; struct e1000_bus_info { enum e1000_bus_type type; enum e1000_bus_speed speed; enum e1000_bus_width width; u16 func; u16 pci_cmd_word; }; struct e1000_fc_info { u32 high_water; /* Flow control high-water mark */ u32 low_water; /* Flow control low-water mark */ u16 pause_time; /* Flow control pause timer */ u16 refresh_time; /* Flow control refresh timer */ bool send_xon; /* Flow control send XON */ bool strict_ieee; /* Strict IEEE mode */ enum e1000_fc_mode current_mode; /* FC mode in effect */ enum e1000_fc_mode requested_mode; /* FC mode requested by caller */ }; struct e1000_mbx_operations { s32 (*init_params)(struct e1000_hw *hw); s32 (*read)(struct e1000_hw *, u32 *, u16, u16); s32 (*write)(struct e1000_hw *, u32 *, u16, u16); s32 (*read_posted)(struct e1000_hw *, u32 *, u16, u16); s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16); s32 (*check_for_msg)(struct e1000_hw *, u16); s32 (*check_for_ack)(struct e1000_hw *, u16); s32 (*check_for_rst)(struct e1000_hw *, u16); }; struct e1000_mbx_stats { u32 msgs_tx; u32 msgs_rx; u32 acks; u32 reqs; u32 rsts; }; struct e1000_mbx_info { struct e1000_mbx_operations ops; struct e1000_mbx_stats stats; u32 timeout; u32 usec_delay; u16 size; }; struct e1000_dev_spec_82541 { enum e1000_dsp_config dsp_config; enum e1000_ffe_config ffe_config; u16 spd_default; bool phy_init_script; }; struct e1000_dev_spec_82542 { bool dma_fairness; }; struct e1000_dev_spec_82543 { u32 tbi_compatibility; bool dma_fairness; bool init_phy_disabled; }; struct e1000_dev_spec_82571 { bool laa_is_present; u32 smb_counter; E1000_MUTEX swflag_mutex; }; struct e1000_dev_spec_80003es2lan { bool mdic_wa_enable; }; struct e1000_shadow_ram { u16 value; bool modified; }; #define E1000_SHADOW_RAM_WORDS 2048 /* I218 PHY Ultra Low Power (ULP) states */ enum e1000_ulp_state { e1000_ulp_state_unknown, e1000_ulp_state_off, e1000_ulp_state_on, }; struct e1000_dev_spec_ich8lan { bool kmrn_lock_loss_workaround_enabled; struct e1000_shadow_ram shadow_ram[E1000_SHADOW_RAM_WORDS]; E1000_MUTEX nvm_mutex; E1000_MUTEX swflag_mutex; bool nvm_k1_enabled; bool eee_disable; u16 eee_lp_ability; enum e1000_ulp_state ulp_state; }; struct e1000_dev_spec_82575 { bool sgmii_active; bool global_device_reset; bool eee_disable; bool module_plugged; bool clear_semaphore_once; u32 mtu; struct sfp_e1000_flags eth_flags; u8 media_port; bool media_changed; }; struct e1000_dev_spec_vf { u32 vf_number; u32 v2p_mailbox; }; struct e1000_hw { void *back; u8 *hw_addr; u8 *flash_address; unsigned long io_base; struct e1000_mac_info mac; struct e1000_fc_info fc; struct e1000_phy_info phy; struct e1000_nvm_info nvm; struct e1000_bus_info bus; struct e1000_mbx_info mbx; struct e1000_host_mng_dhcp_cookie mng_cookie; union { struct e1000_dev_spec_82541 _82541; struct e1000_dev_spec_82542 _82542; struct e1000_dev_spec_82543 _82543; struct e1000_dev_spec_82571 _82571; struct e1000_dev_spec_80003es2lan _80003es2lan; struct e1000_dev_spec_ich8lan ich8lan; struct e1000_dev_spec_82575 _82575; struct e1000_dev_spec_vf vf; } dev_spec; u16 device_id; u16 subsystem_vendor_id; u16 subsystem_device_id; u16 vendor_id; u8 revision_id; }; #include "e1000_82541.h" #include "e1000_82543.h" #include "e1000_82571.h" #include "e1000_80003es2lan.h" #include "e1000_ich8lan.h" #include "e1000_82575.h" #include "e1000_i210.h" /* These functions must be implemented by drivers */ void e1000_pci_clear_mwi(struct e1000_hw *hw); void e1000_pci_set_mwi(struct e1000_hw *hw); s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); s32 e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); void e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); void e1000_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); #endif Index: stable/10/sys/dev/e1000/e1000_i210.c =================================================================== --- stable/10/sys/dev/e1000/e1000_i210.c (revision 296054) +++ stable/10/sys/dev/e1000/e1000_i210.c (revision 296055) @@ -1,904 +1,934 @@ /****************************************************************************** Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "e1000_api.h" static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw); static void e1000_release_nvm_i210(struct e1000_hw *hw); static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw); static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); static s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw); static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data); /** * e1000_acquire_nvm_i210 - Request for access to EEPROM * @hw: pointer to the HW structure * * Acquire the necessary semaphores for exclusive access to the EEPROM. * Set the EEPROM access request bit and wait for EEPROM access grant bit. * Return successful if access grant bit set, else clear the request for * EEPROM access and return -E1000_ERR_NVM (-1). **/ static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw) { s32 ret_val; DEBUGFUNC("e1000_acquire_nvm_i210"); ret_val = e1000_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); return ret_val; } /** * e1000_release_nvm_i210 - Release exclusive access to EEPROM * @hw: pointer to the HW structure * * Stop any current commands to the EEPROM and clear the EEPROM request bit, * then release the semaphores acquired. **/ static void e1000_release_nvm_i210(struct e1000_hw *hw) { DEBUGFUNC("e1000_release_nvm_i210"); e1000_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); } /** * e1000_acquire_swfw_sync_i210 - Acquire SW/FW semaphore * @hw: pointer to the HW structure * @mask: specifies which semaphore to acquire * * Acquire the SW/FW semaphore to access the PHY or NVM. The mask * will also specify which port we're acquiring the lock for. **/ s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask) { u32 swfw_sync; u32 swmask = mask; u32 fwmask = mask << 16; s32 ret_val = E1000_SUCCESS; s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ DEBUGFUNC("e1000_acquire_swfw_sync_i210"); while (i < timeout) { if (e1000_get_hw_semaphore_i210(hw)) { ret_val = -E1000_ERR_SWFW_SYNC; goto out; } swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); if (!(swfw_sync & (fwmask | swmask))) break; /* * Firmware currently using resource (fwmask) * or other software thread using resource (swmask) */ e1000_put_hw_semaphore_generic(hw); msec_delay_irq(5); i++; } if (i == timeout) { DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); ret_val = -E1000_ERR_SWFW_SYNC; goto out; } swfw_sync |= swmask; E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); e1000_put_hw_semaphore_generic(hw); out: return ret_val; } /** * e1000_release_swfw_sync_i210 - Release SW/FW semaphore * @hw: pointer to the HW structure * @mask: specifies which semaphore to acquire * * Release the SW/FW semaphore used to access the PHY or NVM. The mask * will also specify which port we're releasing the lock for. **/ void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask) { u32 swfw_sync; DEBUGFUNC("e1000_release_swfw_sync_i210"); while (e1000_get_hw_semaphore_i210(hw) != E1000_SUCCESS) ; /* Empty */ swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); swfw_sync &= ~mask; E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); e1000_put_hw_semaphore_generic(hw); } /** * e1000_get_hw_semaphore_i210 - Acquire hardware semaphore * @hw: pointer to the HW structure * * Acquire the HW semaphore to access the PHY or NVM **/ static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw) { u32 swsm; s32 timeout = hw->nvm.word_size + 1; s32 i = 0; DEBUGFUNC("e1000_get_hw_semaphore_i210"); /* Get the SW semaphore */ while (i < timeout) { swsm = E1000_READ_REG(hw, E1000_SWSM); if (!(swsm & E1000_SWSM_SMBI)) break; usec_delay(50); i++; } if (i == timeout) { /* In rare circumstances, the SW semaphore may already be held * unintentionally. Clear the semaphore once before giving up. */ if (hw->dev_spec._82575.clear_semaphore_once) { hw->dev_spec._82575.clear_semaphore_once = FALSE; e1000_put_hw_semaphore_generic(hw); for (i = 0; i < timeout; i++) { swsm = E1000_READ_REG(hw, E1000_SWSM); if (!(swsm & E1000_SWSM_SMBI)) break; usec_delay(50); } } /* If we do not have the semaphore here, we have to give up. */ if (i == timeout) { DEBUGOUT("Driver can't access device - SMBI bit is set.\n"); return -E1000_ERR_NVM; } } /* Get the FW semaphore. */ for (i = 0; i < timeout; i++) { swsm = E1000_READ_REG(hw, E1000_SWSM); E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI); /* Semaphore acquired if bit latched */ if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI) break; usec_delay(50); } if (i == timeout) { /* Release semaphores */ e1000_put_hw_semaphore_generic(hw); DEBUGOUT("Driver can't access the NVM\n"); return -E1000_ERR_NVM; } return E1000_SUCCESS; } /** * e1000_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register * @hw: pointer to the HW structure * @offset: offset of word in the Shadow Ram to read * @words: number of words to read * @data: word read from the Shadow Ram * * Reads a 16 bit word from the Shadow Ram using the EERD register. * Uses necessary synchronization semaphores. **/ s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) { s32 status = E1000_SUCCESS; u16 i, count; DEBUGFUNC("e1000_read_nvm_srrd_i210"); /* We cannot hold synchronization semaphores for too long, * because of forceful takeover procedure. However it is more efficient * to read in bursts than synchronizing access for each word. */ for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? E1000_EERD_EEWR_MAX_COUNT : (words - i); if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { status = e1000_read_nvm_eerd(hw, offset, count, data + i); hw->nvm.ops.release(hw); } else { status = E1000_ERR_SWFW_SYNC; } if (status != E1000_SUCCESS) break; } return status; } /** * e1000_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR * @hw: pointer to the HW structure * @offset: offset within the Shadow RAM to be written to * @words: number of words to write * @data: 16 bit word(s) to be written to the Shadow RAM * * Writes data to Shadow RAM at offset using EEWR register. * * If e1000_update_nvm_checksum is not called after this function , the * data will not be committed to FLASH and also Shadow RAM will most likely * contain an invalid checksum. * * If error code is returned, data and Shadow RAM may be inconsistent - buffer * partially written. **/ s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) { s32 status = E1000_SUCCESS; u16 i, count; DEBUGFUNC("e1000_write_nvm_srwr_i210"); /* We cannot hold synchronization semaphores for too long, * because of forceful takeover procedure. However it is more efficient * to write in bursts than synchronizing access for each word. */ for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? E1000_EERD_EEWR_MAX_COUNT : (words - i); if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { status = e1000_write_nvm_srwr(hw, offset, count, data + i); hw->nvm.ops.release(hw); } else { status = E1000_ERR_SWFW_SYNC; } if (status != E1000_SUCCESS) break; } return status; } /** * e1000_write_nvm_srwr - Write to Shadow Ram using EEWR * @hw: pointer to the HW structure * @offset: offset within the Shadow Ram to be written to * @words: number of words to write * @data: 16 bit word(s) to be written to the Shadow Ram * * Writes data to Shadow Ram at offset using EEWR register. * * If e1000_update_nvm_checksum is not called after this function , the * Shadow Ram will most likely contain an invalid checksum. **/ static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) { struct e1000_nvm_info *nvm = &hw->nvm; u32 i, k, eewr = 0; u32 attempts = 100000; s32 ret_val = E1000_SUCCESS; DEBUGFUNC("e1000_write_nvm_srwr"); /* * A check for invalid values: offset too large, too many words, * too many words for the offset, and not enough words. */ if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || (words == 0)) { DEBUGOUT("nvm parameter(s) out of bounds\n"); ret_val = -E1000_ERR_NVM; goto out; } for (i = 0; i < words; i++) { eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) | (data[i] << E1000_NVM_RW_REG_DATA) | E1000_NVM_RW_REG_START; E1000_WRITE_REG(hw, E1000_SRWR, eewr); for (k = 0; k < attempts; k++) { if (E1000_NVM_RW_REG_DONE & E1000_READ_REG(hw, E1000_SRWR)) { ret_val = E1000_SUCCESS; break; } usec_delay(5); } if (ret_val != E1000_SUCCESS) { DEBUGOUT("Shadow RAM write EEWR timed out\n"); break; } } out: return ret_val; } /** e1000_read_invm_word_i210 - Reads OTP * @hw: pointer to the HW structure * @address: the word address (aka eeprom offset) to read * @data: pointer to the data read * * Reads 16-bit words from the OTP. Return error when the word is not * stored in OTP. **/ static s32 e1000_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data) { s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; u32 invm_dword; u16 i; u8 record_type, word_address; DEBUGFUNC("e1000_read_invm_word_i210"); for (i = 0; i < E1000_INVM_SIZE; i++) { invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i)); /* Get record type */ record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword); if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE) break; if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE) i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS; if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE) i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS; if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) { word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword); if (word_address == address) { *data = INVM_DWORD_TO_WORD_DATA(invm_dword); DEBUGOUT2("Read INVM Word 0x%02x = %x", address, *data); status = E1000_SUCCESS; break; } } } if (status != E1000_SUCCESS) DEBUGOUT1("Requested word 0x%02x not found in OTP\n", address); return status; } /** e1000_read_invm_i210 - Read invm wrapper function for I210/I211 * @hw: pointer to the HW structure * @address: the word address (aka eeprom offset) to read * @data: pointer to the data read * * Wrapper function to return data formerly found in the NVM. **/ static s32 e1000_read_invm_i210(struct e1000_hw *hw, u16 offset, u16 E1000_UNUSEDARG words, u16 *data) { s32 ret_val = E1000_SUCCESS; DEBUGFUNC("e1000_read_invm_i210"); /* Only the MAC addr is required to be present in the iNVM */ switch (offset) { case NVM_MAC_ADDR: ret_val = e1000_read_invm_word_i210(hw, (u8)offset, &data[0]); ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+1, &data[1]); ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+2, &data[2]); if (ret_val != E1000_SUCCESS) DEBUGOUT("MAC Addr not found in iNVM\n"); break; case NVM_INIT_CTRL_2: ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); if (ret_val != E1000_SUCCESS) { *data = NVM_INIT_CTRL_2_DEFAULT_I211; ret_val = E1000_SUCCESS; } break; case NVM_INIT_CTRL_4: ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); if (ret_val != E1000_SUCCESS) { *data = NVM_INIT_CTRL_4_DEFAULT_I211; ret_val = E1000_SUCCESS; } break; case NVM_LED_1_CFG: ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); if (ret_val != E1000_SUCCESS) { *data = NVM_LED_1_CFG_DEFAULT_I211; ret_val = E1000_SUCCESS; } break; case NVM_LED_0_2_CFG: ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); if (ret_val != E1000_SUCCESS) { *data = NVM_LED_0_2_CFG_DEFAULT_I211; ret_val = E1000_SUCCESS; } break; case NVM_ID_LED_SETTINGS: ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); if (ret_val != E1000_SUCCESS) { *data = ID_LED_RESERVED_FFFF; ret_val = E1000_SUCCESS; } break; case NVM_SUB_DEV_ID: *data = hw->subsystem_device_id; break; case NVM_SUB_VEN_ID: *data = hw->subsystem_vendor_id; break; case NVM_DEV_ID: *data = hw->device_id; break; case NVM_VEN_ID: *data = hw->vendor_id; break; default: DEBUGOUT1("NVM word 0x%02x is not mapped.\n", offset); *data = NVM_RESERVED_WORD; break; } return ret_val; } /** * e1000_validate_nvm_checksum_i210 - Validate EEPROM checksum * @hw: pointer to the HW structure * * Calculates the EEPROM checksum by reading/adding each word of the EEPROM * and then verifies that the sum of the EEPROM is equal to 0xBABA. **/ s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw) { s32 status = E1000_SUCCESS; s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *); DEBUGFUNC("e1000_validate_nvm_checksum_i210"); if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { /* * Replace the read function with semaphore grabbing with * the one that skips this for a while. * We have semaphore taken already here. */ read_op_ptr = hw->nvm.ops.read; hw->nvm.ops.read = e1000_read_nvm_eerd; status = e1000_validate_nvm_checksum_generic(hw); /* Revert original read operation. */ hw->nvm.ops.read = read_op_ptr; hw->nvm.ops.release(hw); } else { status = E1000_ERR_SWFW_SYNC; } return status; } /** * e1000_update_nvm_checksum_i210 - Update EEPROM checksum * @hw: pointer to the HW structure * * Updates the EEPROM checksum by reading/adding each word of the EEPROM * up to the checksum. Then calculates the EEPROM checksum and writes the * value to the EEPROM. Next commit EEPROM data onto the Flash. **/ s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw) { s32 ret_val; u16 checksum = 0; u16 i, nvm_data; DEBUGFUNC("e1000_update_nvm_checksum_i210"); /* * Read the first word from the EEPROM. If this times out or fails, do * not continue or we could be in for a very long wait while every * EEPROM read fails */ ret_val = e1000_read_nvm_eerd(hw, 0, 1, &nvm_data); if (ret_val != E1000_SUCCESS) { DEBUGOUT("EEPROM read failed\n"); goto out; } if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { /* * Do not use hw->nvm.ops.write, hw->nvm.ops.read * because we do not want to take the synchronization * semaphores twice here. */ for (i = 0; i < NVM_CHECKSUM_REG; i++) { ret_val = e1000_read_nvm_eerd(hw, i, 1, &nvm_data); if (ret_val) { hw->nvm.ops.release(hw); DEBUGOUT("NVM Read Error while updating checksum.\n"); goto out; } checksum += nvm_data; } checksum = (u16) NVM_SUM - checksum; ret_val = e1000_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, &checksum); if (ret_val != E1000_SUCCESS) { hw->nvm.ops.release(hw); DEBUGOUT("NVM Write Error while updating checksum.\n"); goto out; } hw->nvm.ops.release(hw); ret_val = e1000_update_flash_i210(hw); } else { ret_val = E1000_ERR_SWFW_SYNC; } out: return ret_val; } /** * e1000_get_flash_presence_i210 - Check if flash device is detected. * @hw: pointer to the HW structure * **/ bool e1000_get_flash_presence_i210(struct e1000_hw *hw) { u32 eec = 0; bool ret_val = FALSE; DEBUGFUNC("e1000_get_flash_presence_i210"); eec = E1000_READ_REG(hw, E1000_EECD); if (eec & E1000_EECD_FLASH_DETECTED_I210) ret_val = TRUE; return ret_val; } /** * e1000_update_flash_i210 - Commit EEPROM to the flash * @hw: pointer to the HW structure * **/ s32 e1000_update_flash_i210(struct e1000_hw *hw) { s32 ret_val; u32 flup; DEBUGFUNC("e1000_update_flash_i210"); ret_val = e1000_pool_flash_update_done_i210(hw); if (ret_val == -E1000_ERR_NVM) { DEBUGOUT("Flash update time out\n"); goto out; } flup = E1000_READ_REG(hw, E1000_EECD) | E1000_EECD_FLUPD_I210; E1000_WRITE_REG(hw, E1000_EECD, flup); ret_val = e1000_pool_flash_update_done_i210(hw); if (ret_val == E1000_SUCCESS) DEBUGOUT("Flash update complete\n"); else DEBUGOUT("Flash update time out\n"); out: return ret_val; } /** * e1000_pool_flash_update_done_i210 - Pool FLUDONE status. * @hw: pointer to the HW structure * **/ s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw) { s32 ret_val = -E1000_ERR_NVM; u32 i, reg; DEBUGFUNC("e1000_pool_flash_update_done_i210"); for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) { reg = E1000_READ_REG(hw, E1000_EECD); if (reg & E1000_EECD_FLUDONE_I210) { ret_val = E1000_SUCCESS; break; } usec_delay(5); } return ret_val; } /** * e1000_init_nvm_params_i210 - Initialize i210 NVM function pointers * @hw: pointer to the HW structure * * Initialize the i210/i211 NVM parameters and function pointers. **/ static s32 e1000_init_nvm_params_i210(struct e1000_hw *hw) { s32 ret_val; struct e1000_nvm_info *nvm = &hw->nvm; DEBUGFUNC("e1000_init_nvm_params_i210"); ret_val = e1000_init_nvm_params_82575(hw); nvm->ops.acquire = e1000_acquire_nvm_i210; nvm->ops.release = e1000_release_nvm_i210; nvm->ops.valid_led_default = e1000_valid_led_default_i210; if (e1000_get_flash_presence_i210(hw)) { hw->nvm.type = e1000_nvm_flash_hw; nvm->ops.read = e1000_read_nvm_srrd_i210; nvm->ops.write = e1000_write_nvm_srwr_i210; nvm->ops.validate = e1000_validate_nvm_checksum_i210; nvm->ops.update = e1000_update_nvm_checksum_i210; } else { hw->nvm.type = e1000_nvm_invm; nvm->ops.read = e1000_read_invm_i210; nvm->ops.write = e1000_null_write_nvm; nvm->ops.validate = e1000_null_ops_generic; nvm->ops.update = e1000_null_ops_generic; } return ret_val; } /** * e1000_init_function_pointers_i210 - Init func ptrs. * @hw: pointer to the HW structure * * Called to initialize all function pointers and parameters. **/ void e1000_init_function_pointers_i210(struct e1000_hw *hw) { e1000_init_function_pointers_82575(hw); hw->nvm.ops.init_params = e1000_init_nvm_params_i210; return; } /** * e1000_valid_led_default_i210 - Verify a valid default LED config * @hw: pointer to the HW structure * @data: pointer to the NVM (EEPROM) * * Read the EEPROM for the current default LED configuration. If the * LED configuration is not valid, set to a valid LED configuration. **/ static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data) { s32 ret_val; DEBUGFUNC("e1000_valid_led_default_i210"); ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); if (ret_val) { DEBUGOUT("NVM Read Error\n"); goto out; } if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { switch (hw->phy.media_type) { case e1000_media_type_internal_serdes: *data = ID_LED_DEFAULT_I210_SERDES; break; case e1000_media_type_copper: default: *data = ID_LED_DEFAULT_I210; break; } } out: return ret_val; } /** * __e1000_access_xmdio_reg - Read/write XMDIO register * @hw: pointer to the HW structure * @address: XMDIO address to program * @dev_addr: device address to program * @data: pointer to value to read/write from/to the XMDIO address * @read: boolean flag to indicate read or write **/ static s32 __e1000_access_xmdio_reg(struct e1000_hw *hw, u16 address, u8 dev_addr, u16 *data, bool read) { s32 ret_val; DEBUGFUNC("__e1000_access_xmdio_reg"); ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr); if (ret_val) return ret_val; ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address); if (ret_val) return ret_val; ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA | dev_addr); if (ret_val) return ret_val; if (read) ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data); else ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data); if (ret_val) return ret_val; /* Recalibrate the device back to 0 */ ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0); if (ret_val) return ret_val; return ret_val; } /** * e1000_read_xmdio_reg - Read XMDIO register * @hw: pointer to the HW structure * @addr: XMDIO address to program * @dev_addr: device address to program * @data: value to be read from the EMI address **/ s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data) { DEBUGFUNC("e1000_read_xmdio_reg"); return __e1000_access_xmdio_reg(hw, addr, dev_addr, data, TRUE); } /** * e1000_write_xmdio_reg - Write XMDIO register * @hw: pointer to the HW structure * @addr: XMDIO address to program * @dev_addr: device address to program * @data: value to be written to the XMDIO address **/ s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data) { DEBUGFUNC("e1000_read_xmdio_reg"); return __e1000_access_xmdio_reg(hw, addr, dev_addr, &data, FALSE); } /** * e1000_pll_workaround_i210 * @hw: pointer to the HW structure * * Works around an errata in the PLL circuit where it occasionally * provides the wrong clock frequency after power up. **/ static s32 e1000_pll_workaround_i210(struct e1000_hw *hw) { s32 ret_val; u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val; u16 nvm_word, phy_word, pci_word, tmp_nvm; int i; /* Get and set needed register values */ wuc = E1000_READ_REG(hw, E1000_WUC); mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG); reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO; E1000_WRITE_REG(hw, E1000_MDICNFG, reg_val); /* Get data from NVM, or set default */ ret_val = e1000_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD, &nvm_word); if (ret_val != E1000_SUCCESS) nvm_word = E1000_INVM_DEFAULT_AL; tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL; for (i = 0; i < E1000_MAX_PLL_TRIES; i++) { /* check current state directly from internal PHY */ e1000_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE | E1000_PHY_PLL_FREQ_REG), &phy_word); if ((phy_word & E1000_PHY_PLL_UNCONF) != E1000_PHY_PLL_UNCONF) { ret_val = E1000_SUCCESS; break; } else { ret_val = -E1000_ERR_PHY; } /* directly reset the internal PHY */ ctrl = E1000_READ_REG(hw, E1000_CTRL); E1000_WRITE_REG(hw, E1000_CTRL, ctrl|E1000_CTRL_PHY_RST); ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE); E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); E1000_WRITE_REG(hw, E1000_WUC, 0); reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16); E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val); e1000_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); pci_word |= E1000_PCI_PMCSR_D3; e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); msec_delay(1); pci_word &= ~E1000_PCI_PMCSR_D3; e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16); E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val); /* restore WUC register */ E1000_WRITE_REG(hw, E1000_WUC, wuc); } /* restore MDICNFG setting */ E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg); return ret_val; } /** + * e1000_get_cfg_done_i210 - Read config done bit + * @hw: pointer to the HW structure + * + * Read the management control register for the config done bit for + * completion status. NOTE: silicon which is EEPROM-less will fail trying + * to read the config done bit, so an error is *ONLY* logged and returns + * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon + * would not be able to be reset or change link. + **/ +static s32 e1000_get_cfg_done_i210(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + u32 mask = E1000_NVM_CFG_DONE_PORT_0; + + DEBUGFUNC("e1000_get_cfg_done_i210"); + + while (timeout) { + if (E1000_READ_REG(hw, E1000_EEMNGCTL_I210) & mask) + break; + msec_delay(1); + timeout--; + } + if (!timeout) + DEBUGOUT("MNG configuration cycle has not completed.\n"); + + return E1000_SUCCESS; +} + +/** * e1000_init_hw_i210 - Init hw for I210/I211 * @hw: pointer to the HW structure * * Called to initialize hw for i210 hw family. **/ s32 e1000_init_hw_i210(struct e1000_hw *hw) { s32 ret_val; DEBUGFUNC("e1000_init_hw_i210"); if ((hw->mac.type >= e1000_i210) && !(e1000_get_flash_presence_i210(hw))) { ret_val = e1000_pll_workaround_i210(hw); if (ret_val != E1000_SUCCESS) return ret_val; } + hw->phy.ops.get_cfg_done = e1000_get_cfg_done_i210; ret_val = e1000_init_hw_82575(hw); return ret_val; } Index: stable/10/sys/dev/e1000/e1000_ich8lan.c =================================================================== --- stable/10/sys/dev/e1000/e1000_ich8lan.c (revision 296054) +++ stable/10/sys/dev/e1000/e1000_ich8lan.c (revision 296055) @@ -1,5341 +1,6101 @@ /****************************************************************************** Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ /* 82562G 10/100 Network Connection * 82562G-2 10/100 Network Connection * 82562GT 10/100 Network Connection * 82562GT-2 10/100 Network Connection * 82562V 10/100 Network Connection * 82562V-2 10/100 Network Connection * 82566DC-2 Gigabit Network Connection * 82566DC Gigabit Network Connection * 82566DM-2 Gigabit Network Connection * 82566DM Gigabit Network Connection * 82566MC Gigabit Network Connection * 82566MM Gigabit Network Connection * 82567LM Gigabit Network Connection * 82567LF Gigabit Network Connection * 82567V Gigabit Network Connection * 82567LM-2 Gigabit Network Connection * 82567LF-2 Gigabit Network Connection * 82567V-2 Gigabit Network Connection * 82567LF-3 Gigabit Network Connection * 82567LM-3 Gigabit Network Connection * 82567LM-4 Gigabit Network Connection * 82577LM Gigabit Network Connection * 82577LC Gigabit Network Connection * 82578DM Gigabit Network Connection * 82578DC Gigabit Network Connection * 82579LM Gigabit Network Connection * 82579V Gigabit Network Connection * Ethernet Connection I217-LM * Ethernet Connection I217-V * Ethernet Connection I218-V * Ethernet Connection I218-LM * Ethernet Connection (2) I218-LM * Ethernet Connection (2) I218-V * Ethernet Connection (3) I218-LM * Ethernet Connection (3) I218-V */ #include "e1000_api.h" static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw); static void e1000_release_swflag_ich8lan(struct e1000_hw *hw); static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw); static void e1000_release_nvm_ich8lan(struct e1000_hw *hw); static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw); static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index); static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index); static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw); static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw, u8 *mc_addr_list, u32 mc_addr_count); static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw); static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw); static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active); static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active); static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active); static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw); static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw); +static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw); static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data); static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw); static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw); static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw); static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw); static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw); static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw); static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw); static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed, u16 *duplex); static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw); static s32 e1000_led_on_ich8lan(struct e1000_hw *hw); static s32 e1000_led_off_ich8lan(struct e1000_hw *hw); static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); static s32 e1000_setup_led_pchlan(struct e1000_hw *hw); static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw); static s32 e1000_led_on_pchlan(struct e1000_hw *hw); static s32 e1000_led_off_pchlan(struct e1000_hw *hw); static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw); static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank); static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw); static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw); static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, u8 *data); static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, u8 size, u16 *data); +static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, + u32 *data); +static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, + u32 offset, u32 *data); +static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, + u32 offset, u32 data); +static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw, + u32 offset, u32 dword); static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, u16 *data); static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, u8 byte); static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw); static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw); static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw); static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); static s32 e1000_k1_workaround_lv(struct e1000_hw *hw); static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate); static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr); /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ /* Offset 04h HSFSTS */ union ich8_hws_flash_status { struct ich8_hsfsts { u16 flcdone:1; /* bit 0 Flash Cycle Done */ u16 flcerr:1; /* bit 1 Flash Cycle Error */ u16 dael:1; /* bit 2 Direct Access error Log */ u16 berasesz:2; /* bit 4:3 Sector Erase Size */ u16 flcinprog:1; /* bit 5 flash cycle in Progress */ u16 reserved1:2; /* bit 13:6 Reserved */ u16 reserved2:6; /* bit 13:6 Reserved */ u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */ u16 flockdn:1; /* bit 15 Flash Config Lock-Down */ } hsf_status; u16 regval; }; /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */ /* Offset 06h FLCTL */ union ich8_hws_flash_ctrl { struct ich8_hsflctl { u16 flcgo:1; /* 0 Flash Cycle Go */ u16 flcycle:2; /* 2:1 Flash Cycle */ u16 reserved:5; /* 7:3 Reserved */ u16 fldbcount:2; /* 9:8 Flash Data Byte Count */ u16 flockdn:6; /* 15:10 Reserved */ } hsf_ctrl; u16 regval; }; /* ICH Flash Region Access Permissions */ union ich8_hws_flash_regacc { struct ich8_flracc { u32 grra:8; /* 0:7 GbE region Read Access */ u32 grwa:8; /* 8:15 GbE region Write Access */ u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */ u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */ } hsf_flregacc; u16 regval; }; /** * e1000_phy_is_accessible_pchlan - Check if able to access PHY registers * @hw: pointer to the HW structure * * Test access to the PHY registers by reading the PHY ID registers. If * the PHY ID is already known (e.g. resume path) compare it with known ID, * otherwise assume the read PHY ID is correct if it is valid. * * Assumes the sw/fw/hw semaphore is already acquired. **/ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw) { u16 phy_reg = 0; u32 phy_id = 0; s32 ret_val = 0; u16 retry_count; u32 mac_reg = 0; for (retry_count = 0; retry_count < 2; retry_count++) { ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg); if (ret_val || (phy_reg == 0xFFFF)) continue; phy_id = (u32)(phy_reg << 16); ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg); if (ret_val || (phy_reg == 0xFFFF)) { phy_id = 0; continue; } phy_id |= (u32)(phy_reg & PHY_REVISION_MASK); break; } if (hw->phy.id) { if (hw->phy.id == phy_id) goto out; } else if (phy_id) { hw->phy.id = phy_id; hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK); goto out; } /* In case the PHY needs to be in mdio slow mode, * set slow mode and try to get the PHY id again. */ if (hw->mac.type < e1000_pch_lpt) { hw->phy.ops.release(hw); ret_val = e1000_set_mdio_slow_mode_hv(hw); if (!ret_val) ret_val = e1000_get_phy_id(hw); hw->phy.ops.acquire(hw); } if (ret_val) return FALSE; out: - if (hw->mac.type == e1000_pch_lpt) { - /* Unforce SMBus mode in PHY */ - hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg); - phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; - hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg); + if ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) { + /* Only unforce SMBus if ME is not active */ + if (!(E1000_READ_REG(hw, E1000_FWSM) & + E1000_ICH_FWSM_FW_VALID)) { + /* Unforce SMBus mode in PHY */ + hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg); + phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; + hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg); - /* Unforce SMBus mode in MAC */ - mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT); - mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; - E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg); + /* Unforce SMBus mode in MAC */ + mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg); + } } return TRUE; } /** * e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value * @hw: pointer to the HW structure * * Toggling the LANPHYPC pin value fully power-cycles the PHY and is * used to reset the PHY to a quiescent state when necessary. **/ static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw) { u32 mac_reg; DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt"); /* Set Phy Config Counter to 50msec */ mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3); mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK; mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC; E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg); /* Toggle LANPHYPC Value bit */ mac_reg = E1000_READ_REG(hw, E1000_CTRL); mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE; mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE; E1000_WRITE_REG(hw, E1000_CTRL, mac_reg); E1000_WRITE_FLUSH(hw); usec_delay(10); mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE; E1000_WRITE_REG(hw, E1000_CTRL, mac_reg); E1000_WRITE_FLUSH(hw); if (hw->mac.type < e1000_pch_lpt) { msec_delay(50); } else { u16 count = 20; do { msec_delay(5); } while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) & E1000_CTRL_EXT_LPCD) && count--); msec_delay(30); } } /** * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds * @hw: pointer to the HW structure * * Workarounds/flow necessary for PHY initialization during driver load * and resume paths. **/ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) { u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM); s32 ret_val; DEBUGFUNC("e1000_init_phy_workarounds_pchlan"); /* Gate automatic PHY configuration by hardware on managed and * non-managed 82579 and newer adapters. */ e1000_gate_hw_phy_config_ich8lan(hw, TRUE); /* It is not possible to be certain of the current state of ULP * so forcibly disable it. */ hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown; e1000_disable_ulp_lpt_lp(hw, TRUE); ret_val = hw->phy.ops.acquire(hw); if (ret_val) { DEBUGOUT("Failed to initialize PHY flow\n"); goto out; } /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is * inaccessible and resetting the PHY is not blocked, toggle the * LANPHYPC Value bit to force the interconnect to PCIe mode. */ switch (hw->mac.type) { case e1000_pch_lpt: + case e1000_pch_spt: if (e1000_phy_is_accessible_pchlan(hw)) break; /* Before toggling LANPHYPC, see if PHY is accessible by * forcing MAC to SMBus mode first. */ mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT); mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg); /* Wait 50 milliseconds for MAC to finish any retries * that it might be trying to perform from previous * attempts to acknowledge any phy read requests. */ msec_delay(50); /* fall-through */ case e1000_pch2lan: if (e1000_phy_is_accessible_pchlan(hw)) break; /* fall-through */ case e1000_pchlan: if ((hw->mac.type == e1000_pchlan) && (fwsm & E1000_ICH_FWSM_FW_VALID)) break; if (hw->phy.ops.check_reset_block(hw)) { DEBUGOUT("Required LANPHYPC toggle blocked by ME\n"); ret_val = -E1000_ERR_PHY; break; } /* Toggle LANPHYPC Value bit */ e1000_toggle_lanphypc_pch_lpt(hw); if (hw->mac.type >= e1000_pch_lpt) { if (e1000_phy_is_accessible_pchlan(hw)) break; /* Toggling LANPHYPC brings the PHY out of SMBus mode * so ensure that the MAC is also out of SMBus mode */ mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT); mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg); if (e1000_phy_is_accessible_pchlan(hw)) break; ret_val = -E1000_ERR_PHY; } break; default: break; } hw->phy.ops.release(hw); if (!ret_val) { /* Check to see if able to reset PHY. Print error if not */ if (hw->phy.ops.check_reset_block(hw)) { ERROR_REPORT("Reset blocked by ME\n"); goto out; } /* Reset the PHY before any access to it. Doing so, ensures * that the PHY is in a known good state before we read/write * PHY registers. The generic reset is sufficient here, * because we haven't determined the PHY type yet. */ ret_val = e1000_phy_hw_reset_generic(hw); if (ret_val) goto out; /* On a successful reset, possibly need to wait for the PHY * to quiesce to an accessible state before returning control * to the calling function. If the PHY does not quiesce, then * return E1000E_BLK_PHY_RESET, as this is the condition that * the PHY is in. */ ret_val = hw->phy.ops.check_reset_block(hw); if (ret_val) ERROR_REPORT("ME blocked access to PHY after reset\n"); } out: /* Ungate automatic PHY configuration on non-managed 82579 */ if ((hw->mac.type == e1000_pch2lan) && !(fwsm & E1000_ICH_FWSM_FW_VALID)) { msec_delay(10); e1000_gate_hw_phy_config_ich8lan(hw, FALSE); } return ret_val; } /** * e1000_init_phy_params_pchlan - Initialize PHY function pointers * @hw: pointer to the HW structure * * Initialize family-specific PHY parameters and function pointers. **/ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; DEBUGFUNC("e1000_init_phy_params_pchlan"); phy->addr = 1; phy->reset_delay_us = 100; phy->ops.acquire = e1000_acquire_swflag_ich8lan; phy->ops.check_reset_block = e1000_check_reset_block_ich8lan; phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan; phy->ops.set_page = e1000_set_page_igp; phy->ops.read_reg = e1000_read_phy_reg_hv; phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked; phy->ops.read_reg_page = e1000_read_phy_reg_page_hv; phy->ops.release = e1000_release_swflag_ich8lan; phy->ops.reset = e1000_phy_hw_reset_ich8lan; phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan; phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan; phy->ops.write_reg = e1000_write_phy_reg_hv; phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked; phy->ops.write_reg_page = e1000_write_phy_reg_page_hv; phy->ops.power_up = e1000_power_up_phy_copper; phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; phy->id = e1000_phy_unknown; ret_val = e1000_init_phy_workarounds_pchlan(hw); if (ret_val) return ret_val; if (phy->id == e1000_phy_unknown) switch (hw->mac.type) { default: ret_val = e1000_get_phy_id(hw); if (ret_val) return ret_val; if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK)) break; /* fall-through */ case e1000_pch2lan: case e1000_pch_lpt: + case e1000_pch_spt: /* In case the PHY needs to be in mdio slow mode, * set slow mode and try to get the PHY id again. */ ret_val = e1000_set_mdio_slow_mode_hv(hw); if (ret_val) return ret_val; ret_val = e1000_get_phy_id(hw); if (ret_val) return ret_val; break; } phy->type = e1000_get_phy_type_from_id(phy->id); switch (phy->type) { case e1000_phy_82577: case e1000_phy_82579: case e1000_phy_i217: phy->ops.check_polarity = e1000_check_polarity_82577; phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_82577; phy->ops.get_cable_length = e1000_get_cable_length_82577; phy->ops.get_info = e1000_get_phy_info_82577; phy->ops.commit = e1000_phy_sw_reset_generic; break; case e1000_phy_82578: phy->ops.check_polarity = e1000_check_polarity_m88; phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; phy->ops.get_cable_length = e1000_get_cable_length_m88; phy->ops.get_info = e1000_get_phy_info_m88; break; default: ret_val = -E1000_ERR_PHY; break; } return ret_val; } /** * e1000_init_phy_params_ich8lan - Initialize PHY function pointers * @hw: pointer to the HW structure * * Initialize family-specific PHY parameters and function pointers. **/ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 i = 0; DEBUGFUNC("e1000_init_phy_params_ich8lan"); phy->addr = 1; phy->reset_delay_us = 100; phy->ops.acquire = e1000_acquire_swflag_ich8lan; phy->ops.check_reset_block = e1000_check_reset_block_ich8lan; phy->ops.get_cable_length = e1000_get_cable_length_igp_2; phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan; phy->ops.read_reg = e1000_read_phy_reg_igp; phy->ops.release = e1000_release_swflag_ich8lan; phy->ops.reset = e1000_phy_hw_reset_ich8lan; phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan; phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan; phy->ops.write_reg = e1000_write_phy_reg_igp; phy->ops.power_up = e1000_power_up_phy_copper; phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; /* We may need to do this twice - once for IGP and if that fails, * we'll set BM func pointers and try again */ ret_val = e1000_determine_phy_address(hw); if (ret_val) { phy->ops.write_reg = e1000_write_phy_reg_bm; phy->ops.read_reg = e1000_read_phy_reg_bm; ret_val = e1000_determine_phy_address(hw); if (ret_val) { DEBUGOUT("Cannot determine PHY addr. Erroring out\n"); return ret_val; } } phy->id = 0; while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) && (i++ < 100)) { msec_delay(1); ret_val = e1000_get_phy_id(hw); if (ret_val) return ret_val; } /* Verify phy id */ switch (phy->id) { case IGP03E1000_E_PHY_ID: phy->type = e1000_phy_igp_3; phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked; phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked; phy->ops.get_info = e1000_get_phy_info_igp; phy->ops.check_polarity = e1000_check_polarity_igp; phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp; break; case IFE_E_PHY_ID: case IFE_PLUS_E_PHY_ID: case IFE_C_E_PHY_ID: phy->type = e1000_phy_ife; phy->autoneg_mask = E1000_ALL_NOT_GIG; phy->ops.get_info = e1000_get_phy_info_ife; phy->ops.check_polarity = e1000_check_polarity_ife; phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife; break; case BME1000_E_PHY_ID: phy->type = e1000_phy_bm; phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; phy->ops.read_reg = e1000_read_phy_reg_bm; phy->ops.write_reg = e1000_write_phy_reg_bm; phy->ops.commit = e1000_phy_sw_reset_generic; phy->ops.get_info = e1000_get_phy_info_m88; phy->ops.check_polarity = e1000_check_polarity_m88; phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; break; default: return -E1000_ERR_PHY; break; } return E1000_SUCCESS; } /** * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers * @hw: pointer to the HW structure * * Initialize family-specific NVM parameters and function * pointers. **/ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) { struct e1000_nvm_info *nvm = &hw->nvm; struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u32 gfpreg, sector_base_addr, sector_end_addr; u16 i; + u32 nvm_size; DEBUGFUNC("e1000_init_nvm_params_ich8lan"); - /* Can't read flash registers if the register set isn't mapped. */ nvm->type = e1000_nvm_flash_sw; - if (!hw->flash_address) { - DEBUGOUT("ERROR: Flash registers not mapped\n"); - return -E1000_ERR_CONFIG; - } - gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG); + if (hw->mac.type == e1000_pch_spt) { + /* in SPT, gfpreg doesn't exist. NVM size is taken from the + * STRAP register. This is because in SPT the GbE Flash region + * is no longer accessed through the flash registers. Instead, + * the mechanism has changed, and the Flash region access + * registers are now implemented in GbE memory space. + */ + nvm->flash_base_addr = 0; + nvm_size = + (((E1000_READ_REG(hw, E1000_STRAP) >> 1) & 0x1F) + 1) + * NVM_SIZE_MULTIPLIER; + nvm->flash_bank_size = nvm_size / 2; + /* Adjust to word count */ + nvm->flash_bank_size /= sizeof(u16); + /* Set the base address for flash register access */ + hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR; + } else { + /* Can't read flash registers if register set isn't mapped. */ + if (!hw->flash_address) { + DEBUGOUT("ERROR: Flash registers not mapped\n"); + return -E1000_ERR_CONFIG; + } - /* sector_X_addr is a "sector"-aligned address (4096 bytes) - * Add 1 to sector_end_addr since this sector is included in - * the overall size. - */ - sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; - sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; + gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG); - /* flash_base_addr is byte-aligned */ - nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT; + /* sector_X_addr is a "sector"-aligned address (4096 bytes) + * Add 1 to sector_end_addr since this sector is included in + * the overall size. + */ + sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; + sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; - /* find total size of the NVM, then cut in half since the total - * size represents two separate NVM banks. - */ - nvm->flash_bank_size = ((sector_end_addr - sector_base_addr) - << FLASH_SECTOR_ADDR_SHIFT); - nvm->flash_bank_size /= 2; - /* Adjust to word count */ - nvm->flash_bank_size /= sizeof(u16); + /* flash_base_addr is byte-aligned */ + nvm->flash_base_addr = sector_base_addr + << FLASH_SECTOR_ADDR_SHIFT; + /* find total size of the NVM, then cut in half since the total + * size represents two separate NVM banks. + */ + nvm->flash_bank_size = ((sector_end_addr - sector_base_addr) + << FLASH_SECTOR_ADDR_SHIFT); + nvm->flash_bank_size /= 2; + /* Adjust to word count */ + nvm->flash_bank_size /= sizeof(u16); + } + nvm->word_size = E1000_SHADOW_RAM_WORDS; /* Clear shadow ram */ for (i = 0; i < nvm->word_size; i++) { dev_spec->shadow_ram[i].modified = FALSE; dev_spec->shadow_ram[i].value = 0xFFFF; } E1000_MUTEX_INIT(&dev_spec->nvm_mutex); E1000_MUTEX_INIT(&dev_spec->swflag_mutex); /* Function Pointers */ nvm->ops.acquire = e1000_acquire_nvm_ich8lan; nvm->ops.release = e1000_release_nvm_ich8lan; - nvm->ops.read = e1000_read_nvm_ich8lan; - nvm->ops.update = e1000_update_nvm_checksum_ich8lan; + if (hw->mac.type == e1000_pch_spt) { + nvm->ops.read = e1000_read_nvm_spt; + nvm->ops.update = e1000_update_nvm_checksum_spt; + } else { + nvm->ops.read = e1000_read_nvm_ich8lan; + nvm->ops.update = e1000_update_nvm_checksum_ich8lan; + } nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan; nvm->ops.validate = e1000_validate_nvm_checksum_ich8lan; nvm->ops.write = e1000_write_nvm_ich8lan; return E1000_SUCCESS; } /** * e1000_init_mac_params_ich8lan - Initialize MAC function pointers * @hw: pointer to the HW structure * * Initialize family-specific MAC parameters and function * pointers. **/ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; -#if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT) - u16 pci_cfg; -#endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */ DEBUGFUNC("e1000_init_mac_params_ich8lan"); /* Set media type function pointer */ hw->phy.media_type = e1000_media_type_copper; /* Set mta register count */ mac->mta_reg_count = 32; /* Set rar entry count */ mac->rar_entry_count = E1000_ICH_RAR_ENTRIES; if (mac->type == e1000_ich8lan) mac->rar_entry_count--; /* Set if part includes ASF firmware */ mac->asf_firmware_present = TRUE; /* FWSM register */ mac->has_fwsm = TRUE; /* ARC subsystem not supported */ mac->arc_subsystem_valid = FALSE; /* Adaptive IFS supported */ mac->adaptive_ifs = TRUE; /* Function pointers */ /* bus type/speed/width */ mac->ops.get_bus_info = e1000_get_bus_info_ich8lan; /* function id */ mac->ops.set_lan_id = e1000_set_lan_id_single_port; /* reset */ mac->ops.reset_hw = e1000_reset_hw_ich8lan; /* hw initialization */ mac->ops.init_hw = e1000_init_hw_ich8lan; /* link setup */ mac->ops.setup_link = e1000_setup_link_ich8lan; /* physical interface setup */ mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan; /* check for link */ mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan; /* link info */ mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan; /* multicast address update */ mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; /* clear hardware counters */ mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan; /* LED and other operations */ switch (mac->type) { case e1000_ich8lan: case e1000_ich9lan: case e1000_ich10lan: /* check management mode */ mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan; /* ID LED init */ mac->ops.id_led_init = e1000_id_led_init_generic; /* blink LED */ mac->ops.blink_led = e1000_blink_led_generic; /* setup LED */ mac->ops.setup_led = e1000_setup_led_generic; /* cleanup LED */ mac->ops.cleanup_led = e1000_cleanup_led_ich8lan; /* turn on/off LED */ mac->ops.led_on = e1000_led_on_ich8lan; mac->ops.led_off = e1000_led_off_ich8lan; break; case e1000_pch2lan: mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES; mac->ops.rar_set = e1000_rar_set_pch2lan; /* fall-through */ case e1000_pch_lpt: + case e1000_pch_spt: /* multicast address update for pch2 */ mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_pch2lan; + /* fall-through */ case e1000_pchlan: -#if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT) - /* save PCH revision_id */ - e1000_read_pci_cfg(hw, E1000_PCI_REVISION_ID_REG, &pci_cfg); - hw->revision_id = (u8)(pci_cfg &= 0x000F); -#endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */ /* check management mode */ mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; /* ID LED init */ mac->ops.id_led_init = e1000_id_led_init_pchlan; /* setup LED */ mac->ops.setup_led = e1000_setup_led_pchlan; /* cleanup LED */ mac->ops.cleanup_led = e1000_cleanup_led_pchlan; /* turn on/off LED */ mac->ops.led_on = e1000_led_on_pchlan; mac->ops.led_off = e1000_led_off_pchlan; break; default: break; } - if (mac->type == e1000_pch_lpt) { + if ((mac->type == e1000_pch_lpt) || + (mac->type == e1000_pch_spt)) { mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES; mac->ops.rar_set = e1000_rar_set_pch_lpt; mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt; mac->ops.set_obff_timer = e1000_set_obff_timer_pch_lpt; } /* Enable PCS Lock-loss workaround for ICH8 */ if (mac->type == e1000_ich8lan) e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE); return E1000_SUCCESS; } /** * __e1000_access_emi_reg_locked - Read/write EMI register * @hw: pointer to the HW structure * @addr: EMI address to program * @data: pointer to value to read/write from/to the EMI address * @read: boolean flag to indicate read or write * * This helper function assumes the SW/FW/HW Semaphore is already acquired. **/ static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address, u16 *data, bool read) { s32 ret_val; DEBUGFUNC("__e1000_access_emi_reg_locked"); ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address); if (ret_val) return ret_val; if (read) ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA, data); else ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, *data); return ret_val; } /** * e1000_read_emi_reg_locked - Read Extended Management Interface register * @hw: pointer to the HW structure * @addr: EMI address to program * @data: value to be read from the EMI address * * Assumes the SW/FW/HW Semaphore is already acquired. **/ s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data) { DEBUGFUNC("e1000_read_emi_reg_locked"); return __e1000_access_emi_reg_locked(hw, addr, data, TRUE); } /** * e1000_write_emi_reg_locked - Write Extended Management Interface register * @hw: pointer to the HW structure * @addr: EMI address to program * @data: value to be written to the EMI address * * Assumes the SW/FW/HW Semaphore is already acquired. **/ s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data) { DEBUGFUNC("e1000_read_emi_reg_locked"); return __e1000_access_emi_reg_locked(hw, addr, &data, FALSE); } /** * e1000_set_eee_pchlan - Enable/disable EEE support * @hw: pointer to the HW structure * * Enable/disable EEE based on setting in dev_spec structure, the duplex of * the link and the EEE capabilities of the link partner. The LPI Control * register bits will remain set only if/when link is up. * * EEE LPI must not be asserted earlier than one second after link is up. * On 82579, EEE LPI should not be enabled until such time otherwise there * can be link issues with some switches. Other devices can have EEE LPI * enabled immediately upon link up since they have a timer in hardware which * prevents LPI from being asserted too early. **/ s32 e1000_set_eee_pchlan(struct e1000_hw *hw) { struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; s32 ret_val; u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data; DEBUGFUNC("e1000_set_eee_pchlan"); switch (hw->phy.type) { case e1000_phy_82579: lpa = I82579_EEE_LP_ABILITY; pcs_status = I82579_EEE_PCS_STATUS; adv_addr = I82579_EEE_ADVERTISEMENT; break; case e1000_phy_i217: lpa = I217_EEE_LP_ABILITY; pcs_status = I217_EEE_PCS_STATUS; adv_addr = I217_EEE_ADVERTISEMENT; break; default: return E1000_SUCCESS; } ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl); if (ret_val) goto release; /* Clear bits that enable EEE in various speeds */ lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK; /* Enable EEE if not disabled by user */ if (!dev_spec->eee_disable) { /* Save off link partner's EEE ability */ ret_val = e1000_read_emi_reg_locked(hw, lpa, &dev_spec->eee_lp_ability); if (ret_val) goto release; /* Read EEE advertisement */ ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv); if (ret_val) goto release; /* Enable EEE only for speeds in which the link partner is * EEE capable and for which we advertise EEE. */ if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED) lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE; if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) { hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data); if (data & NWAY_LPAR_100TX_FD_CAPS) lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE; else /* EEE is not supported in 100Half, so ignore * partner's EEE in 100 ability if full-duplex * is not advertised. */ dev_spec->eee_lp_ability &= ~I82579_EEE_100_SUPPORTED; } } if (hw->phy.type == e1000_phy_82579) { ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT, &data); if (ret_val) goto release; data &= ~I82579_LPI_100_PLL_SHUT; ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT, data); } /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */ ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data); if (ret_val) goto release; ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl); release: hw->phy.ops.release(hw); return ret_val; } /** * e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP * @hw: pointer to the HW structure * @link: link up bool flag * * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications * preventing further DMA write requests. Workaround the issue by disabling * the de-assertion of the clock request when in 1Gpbs mode. * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link * speeds in order to avoid Tx hangs. **/ static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link) { u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6); u32 status = E1000_READ_REG(hw, E1000_STATUS); s32 ret_val = E1000_SUCCESS; u16 reg; if (link && (status & E1000_STATUS_SPEED_1000)) { ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, ®); if (ret_val) goto release; ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, reg & ~E1000_KMRNCTRLSTA_K1_ENABLE); if (ret_val) goto release; usec_delay(10); E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK); ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, reg); release: hw->phy.ops.release(hw); } else { /* clear FEXTNVM6 bit 8 on link down or 10/100 */ fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK; - if (!link || ((status & E1000_STATUS_SPEED_100) && - (status & E1000_STATUS_FD))) + if ((hw->phy.revision > 5) || !link || + ((status & E1000_STATUS_SPEED_100) && + (status & E1000_STATUS_FD))) goto update_fextnvm6; ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, ®); if (ret_val) return ret_val; /* Clear link status transmit timeout */ reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK; if (status & E1000_STATUS_SPEED_100) { /* Set inband Tx timeout to 5x10us for 100Half */ reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT; /* Do not extend the K1 entry latency for 100Half */ fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION; } else { /* Set inband Tx timeout to 50x10us for 10Full/Half */ reg |= 50 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT; /* Extend the K1 entry latency for 10 Mbps */ fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION; } ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg); if (ret_val) return ret_val; update_fextnvm6: E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6); } return ret_val; } static u64 e1000_ltr2ns(u16 ltr) { u32 value, scale; /* Determine the latency in nsec based on the LTR value & scale */ value = ltr & E1000_LTRV_VALUE_MASK; scale = (ltr & E1000_LTRV_SCALE_MASK) >> E1000_LTRV_SCALE_SHIFT; return value * (1 << (scale * E1000_LTRV_SCALE_FACTOR)); } /** * e1000_platform_pm_pch_lpt - Set platform power management values * @hw: pointer to the HW structure * @link: bool indicating link status * * Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like" * GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed * when link is up (which must not exceed the maximum latency supported * by the platform), otherwise specify there is no LTR requirement. * Unlike TRUE-PCIe devices which set the LTR maximum snoop/no-snoop * latencies in the LTR Extended Capability Structure in the PCIe Extended * Capability register set, on this device LTR is set by writing the * equivalent snoop/no-snoop latencies in the LTRV register in the MAC and * set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB) * message to the PMC. * * Use the LTR value to calculate the Optimized Buffer Flush/Fill (OBFF) * high-water mark. **/ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link) { u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) | link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND; u16 lat_enc = 0; /* latency encoded */ s32 obff_hwm = 0; DEBUGFUNC("e1000_platform_pm_pch_lpt"); if (link) { u16 speed, duplex, scale = 0; u16 max_snoop, max_nosnoop; u16 max_ltr_enc; /* max LTR latency encoded */ s64 lat_ns; s64 value; u32 rxa; if (!hw->mac.max_frame_size) { DEBUGOUT("max_frame_size not set.\n"); return -E1000_ERR_CONFIG; } hw->mac.ops.get_link_up_info(hw, &speed, &duplex); if (!speed) { DEBUGOUT("Speed not set.\n"); return -E1000_ERR_CONFIG; } /* Rx Packet Buffer Allocation size (KB) */ rxa = E1000_READ_REG(hw, E1000_PBA) & E1000_PBA_RXA_MASK; /* Determine the maximum latency tolerated by the device. * * Per the PCIe spec, the tolerated latencies are encoded as * a 3-bit encoded scale (only 0-5 are valid) multiplied by * a 10-bit value (0-1023) to provide a range from 1 ns to * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns, * 1=2^5ns, 2=2^10ns,...5=2^25ns. */ lat_ns = ((s64)rxa * 1024 - (2 * (s64)hw->mac.max_frame_size)) * 8 * 1000; if (lat_ns < 0) lat_ns = 0; else lat_ns /= speed; value = lat_ns; while (value > E1000_LTRV_VALUE_MASK) { scale++; value = E1000_DIVIDE_ROUND_UP(value, (1 << 5)); } if (scale > E1000_LTRV_SCALE_MAX) { DEBUGOUT1("Invalid LTR latency scale %d\n", scale); return -E1000_ERR_CONFIG; } lat_enc = (u16)((scale << E1000_LTRV_SCALE_SHIFT) | value); /* Determine the maximum latency tolerated by the platform */ e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT, &max_snoop); e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop); max_ltr_enc = E1000_MAX(max_snoop, max_nosnoop); if (lat_enc > max_ltr_enc) { lat_enc = max_ltr_enc; lat_ns = e1000_ltr2ns(max_ltr_enc); } if (lat_ns) { lat_ns *= speed * 1000; lat_ns /= 8; lat_ns /= 1000000000; obff_hwm = (s32)(rxa - lat_ns); } if ((obff_hwm < 0) || (obff_hwm > E1000_SVT_OFF_HWM_MASK)) { DEBUGOUT1("Invalid high water mark %d\n", obff_hwm); return -E1000_ERR_CONFIG; } } /* Set Snoop and No-Snoop latencies the same */ reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT); E1000_WRITE_REG(hw, E1000_LTRV, reg); /* Set OBFF high water mark */ reg = E1000_READ_REG(hw, E1000_SVT) & ~E1000_SVT_OFF_HWM_MASK; reg |= obff_hwm; E1000_WRITE_REG(hw, E1000_SVT, reg); /* Enable OBFF */ reg = E1000_READ_REG(hw, E1000_SVCR); reg |= E1000_SVCR_OFF_EN; /* Always unblock interrupts to the CPU even when the system is * in OBFF mode. This ensures that small round-robin traffic * (like ping) does not get dropped or experience long latency. */ reg |= E1000_SVCR_OFF_MASKINT; E1000_WRITE_REG(hw, E1000_SVCR, reg); return E1000_SUCCESS; } /** * e1000_set_obff_timer_pch_lpt - Update Optimized Buffer Flush/Fill timer * @hw: pointer to the HW structure * @itr: interrupt throttling rate * * Configure OBFF with the updated interrupt rate. **/ static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr) { u32 svcr; s32 timer; DEBUGFUNC("e1000_set_obff_timer_pch_lpt"); /* Convert ITR value into microseconds for OBFF timer */ timer = itr & E1000_ITR_MASK; timer = (timer * E1000_ITR_MULT) / 1000; if ((timer < 0) || (timer > E1000_ITR_MASK)) { DEBUGOUT1("Invalid OBFF timer %d\n", timer); return -E1000_ERR_CONFIG; } svcr = E1000_READ_REG(hw, E1000_SVCR); svcr &= ~E1000_SVCR_OFF_TIMER_MASK; svcr |= timer << E1000_SVCR_OFF_TIMER_SHIFT; E1000_WRITE_REG(hw, E1000_SVCR, svcr); return E1000_SUCCESS; } /** * e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP * @hw: pointer to the HW structure * @to_sx: boolean indicating a system power state transition to Sx * * When link is down, configure ULP mode to significantly reduce the power * to the PHY. If on a Manageability Engine (ME) enabled system, tell the * ME firmware to start the ULP configuration. If not on an ME enabled * system, configure the ULP mode by software. */ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx) { u32 mac_reg; s32 ret_val = E1000_SUCCESS; u16 phy_reg; + u16 oem_reg = 0; if ((hw->mac.type < e1000_pch_lpt) || (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) || (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) || (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) || (hw->device_id == E1000_DEV_ID_PCH_I218_V2) || (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on)) return 0; if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) { /* Request ME configure ULP mode in the PHY */ mac_reg = E1000_READ_REG(hw, E1000_H2ME); mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS; E1000_WRITE_REG(hw, E1000_H2ME, mac_reg); goto out; } if (!to_sx) { int i = 0; /* Poll up to 5 seconds for Cable Disconnected indication */ while (!(E1000_READ_REG(hw, E1000_FEXT) & E1000_FEXT_PHY_CABLE_DISCONNECTED)) { /* Bail if link is re-acquired */ if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU) return -E1000_ERR_PHY; if (i++ == 100) break; msec_delay(50); } DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n", (E1000_READ_REG(hw, E1000_FEXT) & E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not", i * 50); } ret_val = hw->phy.ops.acquire(hw); if (ret_val) goto out; /* Force SMBus mode in PHY */ ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); if (ret_val) goto release; phy_reg |= CV_SMB_CTRL_FORCE_SMBUS; e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg); /* Force SMBus mode in MAC */ mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT); mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg); + /* Si workaround for ULP entry flow on i127/rev6 h/w. Enable + * LPLU and disable Gig speed when entering ULP + */ + if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) { + ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS, + &oem_reg); + if (ret_val) + goto release; + + phy_reg = oem_reg; + phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS; + + ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS, + phy_reg); + + if (ret_val) + goto release; + } + /* Set Inband ULP Exit, Reset to SMBus mode and * Disable SMBus Release on PERST# in PHY */ ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg); if (ret_val) goto release; phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS | I218_ULP_CONFIG1_DISABLE_SMB_PERST); if (to_sx) { if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC) phy_reg |= I218_ULP_CONFIG1_WOL_HOST; + else + phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST; phy_reg |= I218_ULP_CONFIG1_STICKY_ULP; + phy_reg &= ~I218_ULP_CONFIG1_INBAND_EXIT; } else { phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT; + phy_reg &= ~I218_ULP_CONFIG1_STICKY_ULP; + phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST; } e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); /* Set Disable SMBus Release on PERST# in MAC */ mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7); mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST; E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg); /* Commit ULP changes in PHY by starting auto ULP configuration */ phy_reg |= I218_ULP_CONFIG1_START; e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); + + if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6) && + to_sx && (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { + ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS, + oem_reg); + if (ret_val) + goto release; + } + release: hw->phy.ops.release(hw); out: if (ret_val) DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val); else hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on; return ret_val; } /** * e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP * @hw: pointer to the HW structure * @force: boolean indicating whether or not to force disabling ULP * * Un-configure ULP mode when link is up, the system is transitioned from * Sx or the driver is unloaded. If on a Manageability Engine (ME) enabled * system, poll for an indication from ME that ULP has been un-configured. * If not on an ME enabled system, un-configure the ULP mode by software. * * During nominal operation, this function is called when link is acquired * to disable ULP mode (force=FALSE); otherwise, for example when unloading * the driver or during Sx->S0 transitions, this is called with force=TRUE * to forcibly disable ULP. */ s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force) { s32 ret_val = E1000_SUCCESS; u32 mac_reg; u16 phy_reg; int i = 0; if ((hw->mac.type < e1000_pch_lpt) || (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) || (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) || (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) || (hw->device_id == E1000_DEV_ID_PCH_I218_V2) || (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off)) return 0; if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) { if (force) { /* Request ME un-configure ULP mode in the PHY */ mac_reg = E1000_READ_REG(hw, E1000_H2ME); mac_reg &= ~E1000_H2ME_ULP; mac_reg |= E1000_H2ME_ENFORCE_SETTINGS; E1000_WRITE_REG(hw, E1000_H2ME, mac_reg); } - /* Poll up to 100msec for ME to clear ULP_CFG_DONE */ + /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */ while (E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_ULP_CFG_DONE) { - if (i++ == 10) { + if (i++ == 30) { ret_val = -E1000_ERR_PHY; goto out; } msec_delay(10); } DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10); if (force) { mac_reg = E1000_READ_REG(hw, E1000_H2ME); mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS; E1000_WRITE_REG(hw, E1000_H2ME, mac_reg); } else { /* Clear H2ME.ULP after ME ULP configuration */ mac_reg = E1000_READ_REG(hw, E1000_H2ME); mac_reg &= ~E1000_H2ME_ULP; E1000_WRITE_REG(hw, E1000_H2ME, mac_reg); } goto out; } ret_val = hw->phy.ops.acquire(hw); if (ret_val) goto out; if (force) /* Toggle LANPHYPC Value bit */ e1000_toggle_lanphypc_pch_lpt(hw); /* Unforce SMBus mode in PHY */ ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); if (ret_val) { /* The MAC might be in PCIe mode, so temporarily force to * SMBus mode in order to access the PHY. */ mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT); mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg); msec_delay(50); ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); if (ret_val) goto release; } phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg); /* Unforce SMBus mode in MAC */ mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT); mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg); /* When ULP mode was previously entered, K1 was disabled by the * hardware. Re-Enable K1 in the PHY when exiting ULP. */ ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg); if (ret_val) goto release; phy_reg |= HV_PM_CTRL_K1_ENABLE; e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg); /* Clear ULP enabled configuration */ ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg); if (ret_val) goto release; phy_reg &= ~(I218_ULP_CONFIG1_IND | I218_ULP_CONFIG1_STICKY_ULP | I218_ULP_CONFIG1_RESET_TO_SMBUS | I218_ULP_CONFIG1_WOL_HOST | I218_ULP_CONFIG1_INBAND_EXIT | + I218_ULP_CONFIG1_EN_ULP_LANPHYPC | + I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST | I218_ULP_CONFIG1_DISABLE_SMB_PERST); e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); /* Commit ULP changes by starting auto ULP configuration */ phy_reg |= I218_ULP_CONFIG1_START; e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); /* Clear Disable SMBus Release on PERST# in MAC */ mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7); mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST; E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg); release: hw->phy.ops.release(hw); if (force) { hw->phy.ops.reset(hw); msec_delay(50); } out: if (ret_val) DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val); else hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off; return ret_val; } /** * e1000_check_for_copper_link_ich8lan - Check for link (Copper) * @hw: pointer to the HW structure * * Checks to see of the link status of the hardware has changed. If a * change in link status has been detected, then we read the PHY registers * to get the current speed/duplex if link exists. **/ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; - s32 ret_val; + s32 ret_val, tipg_reg = 0; + u16 emi_addr, emi_val = 0; bool link; u16 phy_reg; DEBUGFUNC("e1000_check_for_copper_link_ich8lan"); /* We only want to go out to the PHY registers to see if Auto-Neg * has completed and/or if our link status has changed. The * get_link_status flag is set upon receiving a Link Status * Change or Rx Sequence Error interrupt. */ if (!mac->get_link_status) return E1000_SUCCESS; /* First we want to see if the MII Status Register reports * link. If so, then we want to get the current speed/duplex * of the PHY. */ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); if (ret_val) return ret_val; if (hw->mac.type == e1000_pchlan) { ret_val = e1000_k1_gig_workaround_hv(hw, link); if (ret_val) return ret_val; } /* When connected at 10Mbps half-duplex, some parts are excessively * aggressive resulting in many collisions. To avoid this, increase * the IPG and reduce Rx latency in the PHY. */ if (((hw->mac.type == e1000_pch2lan) || - (hw->mac.type == e1000_pch_lpt)) && link) { - u32 reg; - reg = E1000_READ_REG(hw, E1000_STATUS); - if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) { - u16 emi_addr; + (hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) && link) { + u16 speed, duplex; - reg = E1000_READ_REG(hw, E1000_TIPG); - reg &= ~E1000_TIPG_IPGT_MASK; - reg |= 0xFF; - E1000_WRITE_REG(hw, E1000_TIPG, reg); + e1000_get_speed_and_duplex_copper_generic(hw, &speed, &duplex); + tipg_reg = E1000_READ_REG(hw, E1000_TIPG); + tipg_reg &= ~E1000_TIPG_IPGT_MASK; + if (duplex == HALF_DUPLEX && speed == SPEED_10) { + tipg_reg |= 0xFF; /* Reduce Rx latency in analog PHY */ - ret_val = hw->phy.ops.acquire(hw); - if (ret_val) - return ret_val; + emi_val = 0; + } else if (hw->mac.type == e1000_pch_spt && + duplex == FULL_DUPLEX && speed != SPEED_1000) { + tipg_reg |= 0xC; + emi_val = 1; + } else { + /* Roll back the default values */ + tipg_reg |= 0x08; + emi_val = 1; + } - if (hw->mac.type == e1000_pch2lan) - emi_addr = I82579_RX_CONFIG; + E1000_WRITE_REG(hw, E1000_TIPG, tipg_reg); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + if (hw->mac.type == e1000_pch2lan) + emi_addr = I82579_RX_CONFIG; + else + emi_addr = I217_RX_CONFIG; + ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val); + + if (hw->mac.type == e1000_pch_lpt || + hw->mac.type == e1000_pch_spt) { + u16 phy_reg; + + hw->phy.ops.read_reg_locked(hw, I217_PLL_CLOCK_GATE_REG, + &phy_reg); + phy_reg &= ~I217_PLL_CLOCK_GATE_MASK; + if (speed == SPEED_100 || speed == SPEED_10) + phy_reg |= 0x3E8; else - emi_addr = I217_RX_CONFIG; - ret_val = e1000_write_emi_reg_locked(hw, emi_addr, 0); + phy_reg |= 0xFA; + hw->phy.ops.write_reg_locked(hw, + I217_PLL_CLOCK_GATE_REG, + phy_reg); + } + hw->phy.ops.release(hw); - hw->phy.ops.release(hw); + if (ret_val) + return ret_val; - if (ret_val) - return ret_val; + if (hw->mac.type == e1000_pch_spt) { + u16 data; + u16 ptr_gap; + + if (speed == SPEED_1000) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.read_reg_locked(hw, + PHY_REG(776, 20), + &data); + if (ret_val) { + hw->phy.ops.release(hw); + return ret_val; + } + + ptr_gap = (data & (0x3FF << 2)) >> 2; + if (ptr_gap < 0x18) { + data &= ~(0x3FF << 2); + data |= (0x18 << 2); + ret_val = + hw->phy.ops.write_reg_locked(hw, + PHY_REG(776, 20), data); + } + hw->phy.ops.release(hw); + if (ret_val) + return ret_val; + } else { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg_locked(hw, + PHY_REG(776, 20), + 0xC023); + hw->phy.ops.release(hw); + if (ret_val) + return ret_val; + + } } } + /* I217 Packet Loss issue: + * ensure that FEXTNVM4 Beacon Duration is set correctly + * on power up. + * Set the Beacon Duration for I217 to 8 usec + */ + if ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) { + u32 mac_reg; + + mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4); + mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; + mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; + E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg); + } + /* Work-around I218 hang issue */ if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) || (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) || (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) || (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) { ret_val = e1000_k1_workaround_lpt_lp(hw, link); if (ret_val) return ret_val; } - if (hw->mac.type == e1000_pch_lpt) { + if ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) { /* Set platform power management values for * Latency Tolerance Reporting (LTR) * Optimized Buffer Flush/Fill (OBFF) */ ret_val = e1000_platform_pm_pch_lpt(hw, link); if (ret_val) return ret_val; } /* Clear link partner's EEE ability */ hw->dev_spec.ich8lan.eee_lp_ability = 0; + /* FEXTNVM6 K1-off workaround */ + if (hw->mac.type == e1000_pch_spt) { + u32 pcieanacfg = E1000_READ_REG(hw, E1000_PCIEANACFG); + u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6); + + if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE) + fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE; + else + fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE; + + E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6); + } + if (!link) return E1000_SUCCESS; /* No link detected */ mac->get_link_status = FALSE; switch (hw->mac.type) { case e1000_pch2lan: ret_val = e1000_k1_workaround_lv(hw); if (ret_val) return ret_val; /* fall-thru */ case e1000_pchlan: if (hw->phy.type == e1000_phy_82578) { ret_val = e1000_link_stall_workaround_hv(hw); if (ret_val) return ret_val; } /* Workaround for PCHx parts in half-duplex: * Set the number of preambles removed from the packet * when it is passed from the PHY to the MAC to prevent * the MAC from misinterpreting the packet type. */ hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg); phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK; if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD) phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT); hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg); break; default: break; } /* Check if there was DownShift, must be checked * immediately after link-up */ e1000_check_downshift_generic(hw); /* Enable/Disable EEE after link up */ if (hw->phy.type > e1000_phy_82579) { ret_val = e1000_set_eee_pchlan(hw); if (ret_val) return ret_val; } /* If we are forcing speed/duplex, then we simply return since * we have already determined whether we have link or not. */ if (!mac->autoneg) return -E1000_ERR_CONFIG; /* Auto-Neg is enabled. Auto Speed Detection takes care * of MAC speed/duplex configuration. So we only need to * configure Collision Distance in the MAC. */ mac->ops.config_collision_dist(hw); /* Configure Flow Control now that Auto-Neg has completed. * First, we need to restore the desired flow control * settings because we may have had to re-autoneg with a * different link partner. */ ret_val = e1000_config_fc_after_link_up_generic(hw); if (ret_val) DEBUGOUT("Error configuring flow control\n"); return ret_val; } /** * e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers * @hw: pointer to the HW structure * * Initialize family-specific function pointers for PHY, MAC, and NVM. **/ void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw) { DEBUGFUNC("e1000_init_function_pointers_ich8lan"); hw->mac.ops.init_params = e1000_init_mac_params_ich8lan; hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan; switch (hw->mac.type) { case e1000_ich8lan: case e1000_ich9lan: case e1000_ich10lan: hw->phy.ops.init_params = e1000_init_phy_params_ich8lan; break; case e1000_pchlan: case e1000_pch2lan: case e1000_pch_lpt: + case e1000_pch_spt: hw->phy.ops.init_params = e1000_init_phy_params_pchlan; break; default: break; } } /** * e1000_acquire_nvm_ich8lan - Acquire NVM mutex * @hw: pointer to the HW structure * * Acquires the mutex for performing NVM operations. **/ static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw) { DEBUGFUNC("e1000_acquire_nvm_ich8lan"); E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex); return E1000_SUCCESS; } /** * e1000_release_nvm_ich8lan - Release NVM mutex * @hw: pointer to the HW structure * * Releases the mutex used while performing NVM operations. **/ static void e1000_release_nvm_ich8lan(struct e1000_hw *hw) { DEBUGFUNC("e1000_release_nvm_ich8lan"); E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex); return; } /** * e1000_acquire_swflag_ich8lan - Acquire software control flag * @hw: pointer to the HW structure * * Acquires the software control flag for performing PHY and select * MAC CSR accesses. **/ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) { u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT; s32 ret_val = E1000_SUCCESS; DEBUGFUNC("e1000_acquire_swflag_ich8lan"); E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex); while (timeout) { extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)) break; msec_delay_irq(1); timeout--; } if (!timeout) { DEBUGOUT("SW has already locked the resource.\n"); ret_val = -E1000_ERR_CONFIG; goto out; } timeout = SW_FLAG_TIMEOUT; extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); while (timeout) { extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) break; msec_delay_irq(1); timeout--; } if (!timeout) { DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n", E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl); extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); ret_val = -E1000_ERR_CONFIG; goto out; } out: if (ret_val) E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex); return ret_val; } /** * e1000_release_swflag_ich8lan - Release software control flag * @hw: pointer to the HW structure * * Releases the software control flag for performing PHY and select * MAC CSR accesses. **/ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw) { u32 extcnf_ctrl; DEBUGFUNC("e1000_release_swflag_ich8lan"); extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) { extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); } else { DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n"); } E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex); return; } /** * e1000_check_mng_mode_ich8lan - Checks management mode * @hw: pointer to the HW structure * * This checks if the adapter has any manageability enabled. * This is a function pointer entry point only called by read/write * routines for the PHY and NVM parts. **/ static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw) { u32 fwsm; DEBUGFUNC("e1000_check_mng_mode_ich8lan"); fwsm = E1000_READ_REG(hw, E1000_FWSM); return (fwsm & E1000_ICH_FWSM_FW_VALID) && ((fwsm & E1000_FWSM_MODE_MASK) == (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); } /** * e1000_check_mng_mode_pchlan - Checks management mode * @hw: pointer to the HW structure * * This checks if the adapter has iAMT enabled. * This is a function pointer entry point only called by read/write * routines for the PHY and NVM parts. **/ static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw) { u32 fwsm; DEBUGFUNC("e1000_check_mng_mode_pchlan"); fwsm = E1000_READ_REG(hw, E1000_FWSM); return (fwsm & E1000_ICH_FWSM_FW_VALID) && (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); } /** * e1000_rar_set_pch2lan - Set receive address register * @hw: pointer to the HW structure * @addr: pointer to the receive address * @index: receive address array register * * Sets the receive address array register at index to the address passed * in by addr. For 82579, RAR[0] is the base address register that is to * contain the MAC address but RAR[1-6] are reserved for manageability (ME). * Use SHRA[0-3] in place of those reserved for ME. **/ static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index) { u32 rar_low, rar_high; DEBUGFUNC("e1000_rar_set_pch2lan"); /* HW expects these in little endian so we reverse the byte order * from network order (big endian) to little endian */ rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); /* If MAC address zero, no need to set the AV bit */ if (rar_low || rar_high) rar_high |= E1000_RAH_AV; if (index == 0) { E1000_WRITE_REG(hw, E1000_RAL(index), rar_low); E1000_WRITE_FLUSH(hw); E1000_WRITE_REG(hw, E1000_RAH(index), rar_high); E1000_WRITE_FLUSH(hw); return E1000_SUCCESS; } /* RAR[1-6] are owned by manageability. Skip those and program the * next address into the SHRA register array. */ if (index < (u32) (hw->mac.rar_entry_count)) { s32 ret_val; ret_val = e1000_acquire_swflag_ich8lan(hw); if (ret_val) goto out; E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low); E1000_WRITE_FLUSH(hw); E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high); E1000_WRITE_FLUSH(hw); e1000_release_swflag_ich8lan(hw); /* verify the register updates */ if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) && (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high)) return E1000_SUCCESS; DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n", (index - 1), E1000_READ_REG(hw, E1000_FWSM)); } out: DEBUGOUT1("Failed to write receive address at index %d\n", index); return -E1000_ERR_CONFIG; } /** * e1000_rar_set_pch_lpt - Set receive address registers * @hw: pointer to the HW structure * @addr: pointer to the receive address * @index: receive address array register * * Sets the receive address register array at index to the address passed * in by addr. For LPT, RAR[0] is the base address register that is to * contain the MAC address. SHRA[0-10] are the shared receive address * registers that are shared between the Host and manageability engine (ME). **/ static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index) { u32 rar_low, rar_high; u32 wlock_mac; DEBUGFUNC("e1000_rar_set_pch_lpt"); /* HW expects these in little endian so we reverse the byte order * from network order (big endian) to little endian */ rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); /* If MAC address zero, no need to set the AV bit */ if (rar_low || rar_high) rar_high |= E1000_RAH_AV; if (index == 0) { E1000_WRITE_REG(hw, E1000_RAL(index), rar_low); E1000_WRITE_FLUSH(hw); E1000_WRITE_REG(hw, E1000_RAH(index), rar_high); E1000_WRITE_FLUSH(hw); return E1000_SUCCESS; } /* The manageability engine (ME) can lock certain SHRAR registers that * it is using - those registers are unavailable for use. */ if (index < hw->mac.rar_entry_count) { wlock_mac = E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_WLOCK_MAC_MASK; wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT; /* Check if all SHRAR registers are locked */ if (wlock_mac == 1) goto out; if ((wlock_mac == 0) || (index <= wlock_mac)) { s32 ret_val; ret_val = e1000_acquire_swflag_ich8lan(hw); if (ret_val) goto out; E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1), rar_low); E1000_WRITE_FLUSH(hw); E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1), rar_high); E1000_WRITE_FLUSH(hw); e1000_release_swflag_ich8lan(hw); /* verify the register updates */ if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) && (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high)) return E1000_SUCCESS; } } out: DEBUGOUT1("Failed to write receive address at index %d\n", index); return -E1000_ERR_CONFIG; } /** * e1000_update_mc_addr_list_pch2lan - Update Multicast addresses * @hw: pointer to the HW structure * @mc_addr_list: array of multicast addresses to program * @mc_addr_count: number of multicast addresses to program * * Updates entire Multicast Table Array of the PCH2 MAC and PHY. * The caller must have a packed mc_addr_list of multicast addresses. **/ static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw, u8 *mc_addr_list, u32 mc_addr_count) { u16 phy_reg = 0; int i; s32 ret_val; DEBUGFUNC("e1000_update_mc_addr_list_pch2lan"); e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count); ret_val = hw->phy.ops.acquire(hw); if (ret_val) return; ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg); if (ret_val) goto release; for (i = 0; i < hw->mac.mta_reg_count; i++) { hw->phy.ops.write_reg_page(hw, BM_MTA(i), (u16)(hw->mac.mta_shadow[i] & 0xFFFF)); hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1), (u16)((hw->mac.mta_shadow[i] >> 16) & 0xFFFF)); } e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); release: hw->phy.ops.release(hw); } /** * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked * @hw: pointer to the HW structure * * Checks if firmware is blocking the reset of the PHY. * This is a function pointer entry point only called by * reset routines. **/ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) { u32 fwsm; bool blocked = FALSE; int i = 0; DEBUGFUNC("e1000_check_reset_block_ich8lan"); do { fwsm = E1000_READ_REG(hw, E1000_FWSM); if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) { blocked = TRUE; msec_delay(10); continue; } blocked = FALSE; - } while (blocked && (i++ < 10)); + } while (blocked && (i++ < 30)); return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS; } /** * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states * @hw: pointer to the HW structure * * Assumes semaphore already acquired. * **/ static s32 e1000_write_smbus_addr(struct e1000_hw *hw) { u16 phy_data; u32 strap = E1000_READ_REG(hw, E1000_STRAP); u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >> E1000_STRAP_SMT_FREQ_SHIFT; s32 ret_val; strap &= E1000_STRAP_SMBUS_ADDRESS_MASK; ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data); if (ret_val) return ret_val; phy_data &= ~HV_SMB_ADDR_MASK; phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT); phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID; if (hw->phy.type == e1000_phy_i217) { /* Restore SMBus frequency */ if (freq--) { phy_data &= ~HV_SMB_ADDR_FREQ_MASK; phy_data |= (freq & (1 << 0)) << HV_SMB_ADDR_FREQ_LOW_SHIFT; phy_data |= (freq & (1 << 1)) << (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1); } else { DEBUGOUT("Unsupported SMB frequency in PHY\n"); } } return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data); } /** * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration * @hw: pointer to the HW structure * * SW should configure the LCD from the NVM extended configuration region * as a workaround for certain parts. **/ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask; s32 ret_val = E1000_SUCCESS; u16 word_addr, reg_data, reg_addr, phy_page = 0; DEBUGFUNC("e1000_sw_lcd_config_ich8lan"); /* Initialize the PHY from the NVM on ICH platforms. This * is needed due to an issue where the NVM configuration is * not properly autoloaded after power transitions. * Therefore, after each PHY reset, we will load the * configuration data out of the NVM manually. */ switch (hw->mac.type) { case e1000_ich8lan: if (phy->type != e1000_phy_igp_3) return ret_val; if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) || (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) { sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; break; } /* Fall-thru */ case e1000_pchlan: case e1000_pch2lan: case e1000_pch_lpt: + case e1000_pch_spt: sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; break; default: return ret_val; } ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; data = E1000_READ_REG(hw, E1000_FEXTNVM); if (!(data & sw_cfg_mask)) goto release; /* Make sure HW does not configure LCD from PHY * extended configuration before SW configuration */ data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); if ((hw->mac.type < e1000_pch2lan) && (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)) goto release; cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE); cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK; cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT; if (!cnf_size) goto release; cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; if (((hw->mac.type == e1000_pchlan) && !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) || (hw->mac.type > e1000_pchlan)) { /* HW configures the SMBus address and LEDs when the * OEM and LCD Write Enable bits are set in the NVM. * When both NVM bits are cleared, SW will configure * them instead. */ ret_val = e1000_write_smbus_addr(hw); if (ret_val) goto release; data = E1000_READ_REG(hw, E1000_LEDCTL); ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG, (u16)data); if (ret_val) goto release; } /* Configure LCD from extended configuration region. */ /* cnf_base_addr is in DWORD */ word_addr = (u16)(cnf_base_addr << 1); for (i = 0; i < cnf_size; i++) { ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1, ®_data); if (ret_val) goto release; ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1), 1, ®_addr); if (ret_val) goto release; /* Save off the PHY page for future writes. */ if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) { phy_page = reg_data; continue; } reg_addr &= PHY_REG_MASK; reg_addr |= phy_page; ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr, reg_data); if (ret_val) goto release; } release: hw->phy.ops.release(hw); return ret_val; } /** * e1000_k1_gig_workaround_hv - K1 Si workaround * @hw: pointer to the HW structure * @link: link up bool flag * * If K1 is enabled for 1Gbps, the MAC might stall when transitioning * from a lower speed. This workaround disables K1 whenever link is at 1Gig * If link is down, the function will restore the default K1 setting located * in the NVM. **/ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) { s32 ret_val = E1000_SUCCESS; u16 status_reg = 0; bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled; DEBUGFUNC("e1000_k1_gig_workaround_hv"); if (hw->mac.type != e1000_pchlan) return E1000_SUCCESS; /* Wrap the whole flow with the sw flag */ ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */ if (link) { if (hw->phy.type == e1000_phy_82578) { ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS, &status_reg); if (ret_val) goto release; status_reg &= (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | BM_CS_STATUS_SPEED_MASK); if (status_reg == (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | BM_CS_STATUS_SPEED_1000)) k1_enable = FALSE; } if (hw->phy.type == e1000_phy_82577) { ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS, &status_reg); if (ret_val) goto release; status_reg &= (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE | HV_M_STATUS_SPEED_MASK); if (status_reg == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE | HV_M_STATUS_SPEED_1000)) k1_enable = FALSE; } /* Link stall fix for link up */ ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), 0x0100); if (ret_val) goto release; } else { /* Link stall fix for link down */ ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), 0x4100); if (ret_val) goto release; } ret_val = e1000_configure_k1_ich8lan(hw, k1_enable); release: hw->phy.ops.release(hw); return ret_val; } /** * e1000_configure_k1_ich8lan - Configure K1 power state * @hw: pointer to the HW structure * @enable: K1 state to configure * * Configure the K1 power state based on the provided parameter. * Assumes semaphore already acquired. * * Success returns 0, Failure returns -E1000_ERR_PHY (-2) **/ s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable) { s32 ret_val; u32 ctrl_reg = 0; u32 ctrl_ext = 0; u32 reg = 0; u16 kmrn_reg = 0; DEBUGFUNC("e1000_configure_k1_ich8lan"); ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, &kmrn_reg); if (ret_val) return ret_val; if (k1_enable) kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE; else kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE; ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, kmrn_reg); if (ret_val) return ret_val; usec_delay(20); ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); ctrl_reg = E1000_READ_REG(hw, E1000_CTRL); reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); reg |= E1000_CTRL_FRCSPD; E1000_WRITE_REG(hw, E1000_CTRL, reg); E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS); E1000_WRITE_FLUSH(hw); usec_delay(20); E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg); E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); E1000_WRITE_FLUSH(hw); usec_delay(20); return E1000_SUCCESS; } /** * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration * @hw: pointer to the HW structure * @d0_state: boolean if entering d0 or d3 device state * * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are * collectively called OEM bits. The OEM Write Enable bit and SW Config bit * in NVM determines whether HW should configure LPLU and Gbe Disable. **/ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) { s32 ret_val = 0; u32 mac_reg; u16 oem_reg; DEBUGFUNC("e1000_oem_bits_config_ich8lan"); if (hw->mac.type < e1000_pchlan) return ret_val; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; if (hw->mac.type == e1000_pchlan) { mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) goto release; } mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM); if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M)) goto release; mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL); ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg); if (ret_val) goto release; oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU); if (d0_state) { if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE) oem_reg |= HV_OEM_BITS_GBE_DIS; if (mac_reg & E1000_PHY_CTRL_D0A_LPLU) oem_reg |= HV_OEM_BITS_LPLU; } else { if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE | E1000_PHY_CTRL_NOND0A_GBE_DISABLE)) oem_reg |= HV_OEM_BITS_GBE_DIS; if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_NOND0A_LPLU)) oem_reg |= HV_OEM_BITS_LPLU; } /* Set Restart auto-neg to activate the bits */ if ((d0_state || (hw->mac.type != e1000_pchlan)) && !hw->phy.ops.check_reset_block(hw)) oem_reg |= HV_OEM_BITS_RESTART_AN; ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg); release: hw->phy.ops.release(hw); return ret_val; } /** * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode * @hw: pointer to the HW structure **/ static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw) { s32 ret_val; u16 data; DEBUGFUNC("e1000_set_mdio_slow_mode_hv"); ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data); if (ret_val) return ret_val; data |= HV_KMRN_MDIO_SLOW; ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data); return ret_val; } /** * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be * done after every PHY reset. **/ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) { s32 ret_val = E1000_SUCCESS; u16 phy_data; DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan"); if (hw->mac.type != e1000_pchlan) return E1000_SUCCESS; /* Set MDIO slow mode before any other MDIO access */ if (hw->phy.type == e1000_phy_82577) { ret_val = e1000_set_mdio_slow_mode_hv(hw); if (ret_val) return ret_val; } if (((hw->phy.type == e1000_phy_82577) && ((hw->phy.revision == 1) || (hw->phy.revision == 2))) || ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) { /* Disable generation of early preamble */ ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431); if (ret_val) return ret_val; /* Preamble tuning for SSC */ ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, 0xA204); if (ret_val) return ret_val; } if (hw->phy.type == e1000_phy_82578) { /* Return registers to default by doing a soft reset then * writing 0x3140 to the control register. */ if (hw->phy.revision < 2) { e1000_phy_sw_reset_generic(hw); ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, 0x3140); } } /* Select page 0 */ ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; hw->phy.addr = 1; ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0); hw->phy.ops.release(hw); if (ret_val) return ret_val; /* Configure the K1 Si workaround during phy reset assuming there is * link so that it disables K1 if link is in 1Gbps. */ ret_val = e1000_k1_gig_workaround_hv(hw, TRUE); if (ret_val) return ret_val; /* Workaround for link disconnects on a busy hub in half duplex */ ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data); if (ret_val) goto release; ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG, phy_data & 0x00FF); if (ret_val) goto release; /* set MSE higher to enable link to stay up when noise is high */ ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034); release: hw->phy.ops.release(hw); return ret_val; } /** * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY * @hw: pointer to the HW structure **/ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw) { u32 mac_reg; u16 i, phy_reg = 0; s32 ret_val; DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan"); ret_val = hw->phy.ops.acquire(hw); if (ret_val) return; ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg); if (ret_val) goto release; /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */ for (i = 0; i < (hw->mac.rar_entry_count); i++) { mac_reg = E1000_READ_REG(hw, E1000_RAL(i)); hw->phy.ops.write_reg_page(hw, BM_RAR_L(i), (u16)(mac_reg & 0xFFFF)); hw->phy.ops.write_reg_page(hw, BM_RAR_M(i), (u16)((mac_reg >> 16) & 0xFFFF)); mac_reg = E1000_READ_REG(hw, E1000_RAH(i)); hw->phy.ops.write_reg_page(hw, BM_RAR_H(i), (u16)(mac_reg & 0xFFFF)); hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i), (u16)((mac_reg & E1000_RAH_AV) >> 16)); } e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); release: hw->phy.ops.release(hw); } static u32 e1000_calc_rx_da_crc(u8 mac[]) { u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */ u32 i, j, mask, crc; DEBUGFUNC("e1000_calc_rx_da_crc"); crc = 0xffffffff; for (i = 0; i < 6; i++) { crc = crc ^ mac[i]; for (j = 8; j > 0; j--) { mask = (crc & 1) * (-1); crc = (crc >> 1) ^ (poly & mask); } } return ~crc; } /** * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation * with 82579 PHY * @hw: pointer to the HW structure * @enable: flag to enable/disable workaround when enabling/disabling jumbos **/ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) { s32 ret_val = E1000_SUCCESS; u16 phy_reg, data; u32 mac_reg; u16 i; DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan"); if (hw->mac.type < e1000_pch2lan) return E1000_SUCCESS; /* disable Rx path while enabling/disabling workaround */ hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg); ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg | (1 << 14)); if (ret_val) return ret_val; if (enable) { /* Write Rx addresses (rar_entry_count for RAL/H, and * SHRAL/H) and initial CRC values to the MAC */ for (i = 0; i < hw->mac.rar_entry_count; i++) { u8 mac_addr[ETH_ADDR_LEN] = {0}; u32 addr_high, addr_low; addr_high = E1000_READ_REG(hw, E1000_RAH(i)); if (!(addr_high & E1000_RAH_AV)) continue; addr_low = E1000_READ_REG(hw, E1000_RAL(i)); mac_addr[0] = (addr_low & 0xFF); mac_addr[1] = ((addr_low >> 8) & 0xFF); mac_addr[2] = ((addr_low >> 16) & 0xFF); mac_addr[3] = ((addr_low >> 24) & 0xFF); mac_addr[4] = (addr_high & 0xFF); mac_addr[5] = ((addr_high >> 8) & 0xFF); E1000_WRITE_REG(hw, E1000_PCH_RAICC(i), e1000_calc_rx_da_crc(mac_addr)); } /* Write Rx addresses to the PHY */ e1000_copy_rx_addrs_to_phy_ich8lan(hw); /* Enable jumbo frame workaround in the MAC */ mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG); mac_reg &= ~(1 << 14); mac_reg |= (7 << 15); E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg); mac_reg = E1000_READ_REG(hw, E1000_RCTL); mac_reg |= E1000_RCTL_SECRC; E1000_WRITE_REG(hw, E1000_RCTL, mac_reg); ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_CTRL_OFFSET, &data); if (ret_val) return ret_val; ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_CTRL_OFFSET, data | (1 << 0)); if (ret_val) return ret_val; ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_HD_CTRL, &data); if (ret_val) return ret_val; data &= ~(0xF << 8); data |= (0xB << 8); ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_HD_CTRL, data); if (ret_val) return ret_val; /* Enable jumbo frame workaround in the PHY */ hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data); data &= ~(0x7F << 5); data |= (0x37 << 5); ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data); if (ret_val) return ret_val; hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data); data &= ~(1 << 13); ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data); if (ret_val) return ret_val; hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data); data &= ~(0x3FF << 2); data |= (E1000_TX_PTR_GAP << 2); ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data); if (ret_val) return ret_val; ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100); if (ret_val) return ret_val; hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data); ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data | (1 << 10)); if (ret_val) return ret_val; } else { /* Write MAC register values back to h/w defaults */ mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG); mac_reg &= ~(0xF << 14); E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg); mac_reg = E1000_READ_REG(hw, E1000_RCTL); mac_reg &= ~E1000_RCTL_SECRC; E1000_WRITE_REG(hw, E1000_RCTL, mac_reg); ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_CTRL_OFFSET, &data); if (ret_val) return ret_val; ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_CTRL_OFFSET, data & ~(1 << 0)); if (ret_val) return ret_val; ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_HD_CTRL, &data); if (ret_val) return ret_val; data &= ~(0xF << 8); data |= (0xB << 8); ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_HD_CTRL, data); if (ret_val) return ret_val; /* Write PHY register values back to h/w defaults */ hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data); data &= ~(0x7F << 5); ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data); if (ret_val) return ret_val; hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data); data |= (1 << 13); ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data); if (ret_val) return ret_val; hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data); data &= ~(0x3FF << 2); data |= (0x8 << 2); ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data); if (ret_val) return ret_val; ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00); if (ret_val) return ret_val; hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data); ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data & ~(1 << 10)); if (ret_val) return ret_val; } /* re-enable Rx path after enabling/disabling workaround */ return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14)); } /** * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be * done after every PHY reset. **/ static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw) { s32 ret_val = E1000_SUCCESS; DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan"); if (hw->mac.type != e1000_pch2lan) return E1000_SUCCESS; /* Set MDIO slow mode before any other MDIO access */ ret_val = e1000_set_mdio_slow_mode_hv(hw); if (ret_val) return ret_val; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; /* set MSE higher to enable link to stay up when noise is high */ ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034); if (ret_val) goto release; /* drop link after 5 times MSE threshold was reached */ ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005); release: hw->phy.ops.release(hw); return ret_val; } /** * e1000_k1_gig_workaround_lv - K1 Si workaround * @hw: pointer to the HW structure * * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps * Disable K1 for 1000 and 100 speeds **/ static s32 e1000_k1_workaround_lv(struct e1000_hw *hw) { s32 ret_val = E1000_SUCCESS; u16 status_reg = 0; DEBUGFUNC("e1000_k1_workaround_lv"); if (hw->mac.type != e1000_pch2lan) return E1000_SUCCESS; /* Set K1 beacon duration based on 10Mbs speed */ ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg); if (ret_val) return ret_val; if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) { if (status_reg & (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) { u16 pm_phy_reg; /* LV 1G/100 Packet drop issue wa */ ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL, &pm_phy_reg); if (ret_val) return ret_val; pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE; ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, pm_phy_reg); if (ret_val) return ret_val; } else { u32 mac_reg; mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4); mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg); } } return ret_val; } /** * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware * @hw: pointer to the HW structure * @gate: boolean set to TRUE to gate, FALSE to ungate * * Gate/ungate the automatic PHY configuration via hardware; perform * the configuration via software instead. **/ static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate) { u32 extcnf_ctrl; DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan"); if (hw->mac.type < e1000_pch2lan) return; extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL); if (gate) extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG; else extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG; E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl); } /** * e1000_lan_init_done_ich8lan - Check for PHY config completion * @hw: pointer to the HW structure * * Check the appropriate indication the MAC has finished configuring the * PHY after a software reset. **/ static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw) { u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT; DEBUGFUNC("e1000_lan_init_done_ich8lan"); /* Wait for basic configuration completes before proceeding */ do { data = E1000_READ_REG(hw, E1000_STATUS); data &= E1000_STATUS_LAN_INIT_DONE; usec_delay(100); } while ((!data) && --loop); /* If basic configuration is incomplete before the above loop * count reaches 0, loading the configuration from NVM will * leave the PHY in a bad state possibly resulting in no link. */ if (loop == 0) DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n"); /* Clear the Init Done bit for the next init event */ data = E1000_READ_REG(hw, E1000_STATUS); data &= ~E1000_STATUS_LAN_INIT_DONE; E1000_WRITE_REG(hw, E1000_STATUS, data); } /** * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset * @hw: pointer to the HW structure **/ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw) { s32 ret_val = E1000_SUCCESS; u16 reg; DEBUGFUNC("e1000_post_phy_reset_ich8lan"); if (hw->phy.ops.check_reset_block(hw)) return E1000_SUCCESS; /* Allow time for h/w to get to quiescent state after reset */ msec_delay(10); /* Perform any necessary post-reset workarounds */ switch (hw->mac.type) { case e1000_pchlan: ret_val = e1000_hv_phy_workarounds_ich8lan(hw); if (ret_val) return ret_val; break; case e1000_pch2lan: ret_val = e1000_lv_phy_workarounds_ich8lan(hw); if (ret_val) return ret_val; break; default: break; } /* Clear the host wakeup bit after lcd reset */ if (hw->mac.type >= e1000_pchlan) { hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, ®); reg &= ~BM_WUC_HOST_WU_BIT; hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg); } /* Configure the LCD with the extended configuration region in NVM */ ret_val = e1000_sw_lcd_config_ich8lan(hw); if (ret_val) return ret_val; /* Configure the LCD with the OEM bits in NVM */ ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE); if (hw->mac.type == e1000_pch2lan) { /* Ungate automatic PHY configuration on non-managed 82579 */ if (!(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID)) { msec_delay(10); e1000_gate_hw_phy_config_ich8lan(hw, FALSE); } /* Set EEE LPI Update Timer to 200usec */ ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_UPDATE_TIMER, 0x1387); hw->phy.ops.release(hw); } return ret_val; } /** * e1000_phy_hw_reset_ich8lan - Performs a PHY reset * @hw: pointer to the HW structure * * Resets the PHY * This is a function pointer entry point called by drivers * or other shared routines. **/ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) { s32 ret_val = E1000_SUCCESS; DEBUGFUNC("e1000_phy_hw_reset_ich8lan"); /* Gate automatic PHY configuration by hardware on non-managed 82579 */ if ((hw->mac.type == e1000_pch2lan) && !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID)) e1000_gate_hw_phy_config_ich8lan(hw, TRUE); ret_val = e1000_phy_hw_reset_generic(hw); if (ret_val) return ret_val; return e1000_post_phy_reset_ich8lan(hw); } /** * e1000_set_lplu_state_pchlan - Set Low Power Link Up state * @hw: pointer to the HW structure * @active: TRUE to enable LPLU, FALSE to disable * * Sets the LPLU state according to the active flag. For PCH, if OEM write * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set * the phy speed. This function will manually set the LPLU bit and restart * auto-neg as hw would do. D3 and D0 LPLU will call the same function * since it configures the same bit. **/ static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active) { s32 ret_val; u16 oem_reg; DEBUGFUNC("e1000_set_lplu_state_pchlan"); ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg); if (ret_val) return ret_val; if (active) oem_reg |= HV_OEM_BITS_LPLU; else oem_reg &= ~HV_OEM_BITS_LPLU; if (!hw->phy.ops.check_reset_block(hw)) oem_reg |= HV_OEM_BITS_RESTART_AN; return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg); } /** * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state * @hw: pointer to the HW structure * @active: TRUE to enable LPLU, FALSE to disable * * Sets the LPLU D0 state according to the active flag. When * activating LPLU this function also disables smart speed * and vice versa. LPLU will not be activated unless the * device autonegotiation advertisement meets standards of * either 10 or 10/100 or 10/100/1000 at all duplexes. * This is a function pointer entry point only called by * PHY setup routines. **/ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active) { struct e1000_phy_info *phy = &hw->phy; u32 phy_ctrl; s32 ret_val = E1000_SUCCESS; u16 data; DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan"); if (phy->type == e1000_phy_ife) return E1000_SUCCESS; phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL); if (active) { phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU; E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); if (phy->type != e1000_phy_igp_3) return E1000_SUCCESS; /* Call gig speed drop workaround on LPLU before accessing * any PHY registers */ if (hw->mac.type == e1000_ich8lan) e1000_gig_downshift_workaround_ich8lan(hw); /* When LPLU is enabled, we should disable SmartSpeed */ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) return ret_val; } else { phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU; E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); if (phy->type != e1000_phy_igp_3) return E1000_SUCCESS; /* LPLU and SmartSpeed are mutually exclusive. LPLU is used * during Dx states where the power conservation is most * important. During driver activity we should enable * SmartSpeed, so performance is maintained. */ if (phy->smart_speed == e1000_smart_speed_on) { ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data |= IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) return ret_val; } else if (phy->smart_speed == e1000_smart_speed_off) { ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) return ret_val; } } return E1000_SUCCESS; } /** * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state * @hw: pointer to the HW structure * @active: TRUE to enable LPLU, FALSE to disable * * Sets the LPLU D3 state according to the active flag. When * activating LPLU this function also disables smart speed * and vice versa. LPLU will not be activated unless the * device autonegotiation advertisement meets standards of * either 10 or 10/100 or 10/100/1000 at all duplexes. * This is a function pointer entry point only called by * PHY setup routines. **/ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active) { struct e1000_phy_info *phy = &hw->phy; u32 phy_ctrl; s32 ret_val = E1000_SUCCESS; u16 data; DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan"); phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL); if (!active) { phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU; E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); if (phy->type != e1000_phy_igp_3) return E1000_SUCCESS; /* LPLU and SmartSpeed are mutually exclusive. LPLU is used * during Dx states where the power conservation is most * important. During driver activity we should enable * SmartSpeed, so performance is maintained. */ if (phy->smart_speed == e1000_smart_speed_on) { ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data |= IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) return ret_val; } else if (phy->smart_speed == e1000_smart_speed_off) { ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) return ret_val; } } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU; E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); if (phy->type != e1000_phy_igp_3) return E1000_SUCCESS; /* Call gig speed drop workaround on LPLU before accessing * any PHY registers */ if (hw->mac.type == e1000_ich8lan) e1000_gig_downshift_workaround_ich8lan(hw); /* When LPLU is enabled, we should disable SmartSpeed */ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, data); } return ret_val; } /** * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1 * @hw: pointer to the HW structure * @bank: pointer to the variable that returns the active bank * * Reads signature byte from the NVM using the flash access registers. * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank. **/ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) { u32 eecd; struct e1000_nvm_info *nvm = &hw->nvm; u32 bank1_offset = nvm->flash_bank_size * sizeof(u16); u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1; + u32 nvm_dword = 0; u8 sig_byte = 0; s32 ret_val; DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan"); switch (hw->mac.type) { + case e1000_pch_spt: + bank1_offset = nvm->flash_bank_size; + act_offset = E1000_ICH_NVM_SIG_WORD; + + /* set bank to 0 in case flash read fails */ + *bank = 0; + + /* Check bank 0 */ + ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, + &nvm_dword); + if (ret_val) + return ret_val; + sig_byte = (u8)((nvm_dword & 0xFF00) >> 8); + if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == + E1000_ICH_NVM_SIG_VALUE) { + *bank = 0; + return E1000_SUCCESS; + } + + /* Check bank 1 */ + ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset + + bank1_offset, + &nvm_dword); + if (ret_val) + return ret_val; + sig_byte = (u8)((nvm_dword & 0xFF00) >> 8); + if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == + E1000_ICH_NVM_SIG_VALUE) { + *bank = 1; + return E1000_SUCCESS; + } + + DEBUGOUT("ERROR: No valid NVM bank present\n"); + return -E1000_ERR_NVM; case e1000_ich8lan: case e1000_ich9lan: eecd = E1000_READ_REG(hw, E1000_EECD); if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) == E1000_EECD_SEC1VAL_VALID_MASK) { if (eecd & E1000_EECD_SEC1VAL) *bank = 1; else *bank = 0; return E1000_SUCCESS; } DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n"); /* fall-thru */ default: /* set bank to 0 in case flash read fails */ *bank = 0; /* Check bank 0 */ ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset, &sig_byte); if (ret_val) return ret_val; if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == E1000_ICH_NVM_SIG_VALUE) { *bank = 0; return E1000_SUCCESS; } /* Check bank 1 */ ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset + bank1_offset, &sig_byte); if (ret_val) return ret_val; if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == E1000_ICH_NVM_SIG_VALUE) { *bank = 1; return E1000_SUCCESS; } DEBUGOUT("ERROR: No valid NVM bank present\n"); return -E1000_ERR_NVM; } } /** + * e1000_read_nvm_spt - NVM access for SPT + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the word(s) to read. + * @words: Size of data to read in words. + * @data: pointer to the word(s) to read at offset. + * + * Reads a word(s) from the NVM + **/ +static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 act_offset; + s32 ret_val = E1000_SUCCESS; + u32 bank = 0; + u32 dword = 0; + u16 offset_to_read; + u16 i; + + DEBUGFUNC("e1000_read_nvm_spt"); + + if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + nvm->ops.acquire(hw); + + ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); + if (ret_val != E1000_SUCCESS) { + DEBUGOUT("Could not detect valid bank, assuming bank 0\n"); + bank = 0; + } + + act_offset = (bank) ? nvm->flash_bank_size : 0; + act_offset += offset; + + ret_val = E1000_SUCCESS; + + for (i = 0; i < words; i += 2) { + if (words - i == 1) { + if (dev_spec->shadow_ram[offset+i].modified) { + data[i] = dev_spec->shadow_ram[offset+i].value; + } else { + offset_to_read = act_offset + i - + ((act_offset + i) % 2); + ret_val = + e1000_read_flash_dword_ich8lan(hw, + offset_to_read, + &dword); + if (ret_val) + break; + if ((act_offset + i) % 2 == 0) + data[i] = (u16)(dword & 0xFFFF); + else + data[i] = (u16)((dword >> 16) & 0xFFFF); + } + } else { + offset_to_read = act_offset + i; + if (!(dev_spec->shadow_ram[offset+i].modified) || + !(dev_spec->shadow_ram[offset+i+1].modified)) { + ret_val = + e1000_read_flash_dword_ich8lan(hw, + offset_to_read, + &dword); + if (ret_val) + break; + } + if (dev_spec->shadow_ram[offset+i].modified) + data[i] = dev_spec->shadow_ram[offset+i].value; + else + data[i] = (u16) (dword & 0xFFFF); + if (dev_spec->shadow_ram[offset+i].modified) + data[i+1] = + dev_spec->shadow_ram[offset+i+1].value; + else + data[i+1] = (u16) (dword >> 16 & 0xFFFF); + } + } + + nvm->ops.release(hw); + +out: + if (ret_val) + DEBUGOUT1("NVM read error: %d\n", ret_val); + + return ret_val; +} + +/** * e1000_read_nvm_ich8lan - Read word(s) from the NVM * @hw: pointer to the HW structure * @offset: The offset (in bytes) of the word(s) to read. * @words: Size of data to read in words * @data: Pointer to the word(s) to read at offset. * * Reads a word(s) from the NVM using the flash access registers. **/ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) { struct e1000_nvm_info *nvm = &hw->nvm; struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u32 act_offset; s32 ret_val = E1000_SUCCESS; u32 bank = 0; u16 i, word; DEBUGFUNC("e1000_read_nvm_ich8lan"); if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || (words == 0)) { DEBUGOUT("nvm parameter(s) out of bounds\n"); ret_val = -E1000_ERR_NVM; goto out; } nvm->ops.acquire(hw); ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); if (ret_val != E1000_SUCCESS) { DEBUGOUT("Could not detect valid bank, assuming bank 0\n"); bank = 0; } act_offset = (bank) ? nvm->flash_bank_size : 0; act_offset += offset; ret_val = E1000_SUCCESS; for (i = 0; i < words; i++) { if (dev_spec->shadow_ram[offset+i].modified) { data[i] = dev_spec->shadow_ram[offset+i].value; } else { ret_val = e1000_read_flash_word_ich8lan(hw, act_offset + i, &word); if (ret_val) break; data[i] = word; } } nvm->ops.release(hw); out: if (ret_val) DEBUGOUT1("NVM read error: %d\n", ret_val); return ret_val; } /** * e1000_flash_cycle_init_ich8lan - Initialize flash * @hw: pointer to the HW structure * * This function does initial flash setup so that a new read/write/erase cycle * can be started. **/ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) { union ich8_hws_flash_status hsfsts; s32 ret_val = -E1000_ERR_NVM; DEBUGFUNC("e1000_flash_cycle_init_ich8lan"); hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); /* Check if the flash descriptor is valid */ if (!hsfsts.hsf_status.fldesvalid) { DEBUGOUT("Flash descriptor invalid. SW Sequencing must be used.\n"); return -E1000_ERR_NVM; } /* Clear FCERR and DAEL in hw status by writing 1 */ hsfsts.hsf_status.flcerr = 1; hsfsts.hsf_status.dael = 1; - E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval); + if (hw->mac.type == e1000_pch_spt) + E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, + hsfsts.regval & 0xFFFF); + else + E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval); /* Either we should have a hardware SPI cycle in progress * bit to check against, in order to start a new cycle or * FDONE bit should be changed in the hardware so that it * is 1 after hardware reset, which can then be used as an * indication whether a cycle is in progress or has been * completed. */ if (!hsfsts.hsf_status.flcinprog) { /* There is no cycle running at present, * so we can start a cycle. * Begin by setting Flash Cycle Done. */ hsfsts.hsf_status.flcdone = 1; - E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval); + if (hw->mac.type == e1000_pch_spt) + E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, + hsfsts.regval & 0xFFFF); + else + E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, + hsfsts.regval); ret_val = E1000_SUCCESS; } else { s32 i; /* Otherwise poll for sometime so the current * cycle has a chance to end before giving up. */ for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); if (!hsfsts.hsf_status.flcinprog) { ret_val = E1000_SUCCESS; break; } usec_delay(1); } if (ret_val == E1000_SUCCESS) { /* Successful in waiting for previous cycle to timeout, * now set the Flash Cycle Done. */ hsfsts.hsf_status.flcdone = 1; - E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, - hsfsts.regval); + if (hw->mac.type == e1000_pch_spt) + E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, + hsfsts.regval & 0xFFFF); + else + E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, + hsfsts.regval); } else { DEBUGOUT("Flash controller busy, cannot get access\n"); } } return ret_val; } /** * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase) * @hw: pointer to the HW structure * @timeout: maximum time to wait for completion * * This function starts a flash cycle and waits for its completion. **/ static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout) { union ich8_hws_flash_ctrl hsflctl; union ich8_hws_flash_status hsfsts; u32 i = 0; DEBUGFUNC("e1000_flash_cycle_ich8lan"); /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ - hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL); + if (hw->mac.type == e1000_pch_spt) + hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16; + else + hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL); hsflctl.hsf_ctrl.flcgo = 1; - E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval); + if (hw->mac.type == e1000_pch_spt) + E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, + hsflctl.regval << 16); + else + E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval); /* wait till FDONE bit is set to 1 */ do { hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); if (hsfsts.hsf_status.flcdone) break; usec_delay(1); } while (i++ < timeout); if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr) return E1000_SUCCESS; return -E1000_ERR_NVM; } /** + * e1000_read_flash_dword_ich8lan - Read dword from flash + * @hw: pointer to the HW structure + * @offset: offset to data location + * @data: pointer to the location for storing the data + * + * Reads the flash dword at offset into data. Offset is converted + * to bytes before read. + **/ +static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset, + u32 *data) +{ + DEBUGFUNC("e1000_read_flash_dword_ich8lan"); + + if (!data) + return -E1000_ERR_NVM; + + /* Must convert word offset into bytes. */ + offset <<= 1; + + return e1000_read_flash_data32_ich8lan(hw, offset, data); +} + +/** * e1000_read_flash_word_ich8lan - Read word from flash * @hw: pointer to the HW structure * @offset: offset to data location * @data: pointer to the location for storing the data * * Reads the flash word at offset into data. Offset is converted * to bytes before read. **/ static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, u16 *data) { DEBUGFUNC("e1000_read_flash_word_ich8lan"); if (!data) return -E1000_ERR_NVM; /* Must convert offset into bytes. */ offset <<= 1; return e1000_read_flash_data_ich8lan(hw, offset, 2, data); } /** * e1000_read_flash_byte_ich8lan - Read byte from flash * @hw: pointer to the HW structure * @offset: The offset of the byte to read. * @data: Pointer to a byte to store the value read. * * Reads a single byte from the NVM using the flash access registers. **/ static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, u8 *data) { s32 ret_val; u16 word = 0; - ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word); + /* In SPT, only 32 bits access is supported, + * so this function should not be called. + */ + if (hw->mac.type == e1000_pch_spt) + return -E1000_ERR_NVM; + else + ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word); if (ret_val) return ret_val; *data = (u8)word; return E1000_SUCCESS; } /** * e1000_read_flash_data_ich8lan - Read byte or word from NVM * @hw: pointer to the HW structure * @offset: The offset (in bytes) of the byte or word to read. * @size: Size of data to read, 1=byte 2=word * @data: Pointer to the word to store the value read. * * Reads a byte or word from the NVM using the flash access registers. **/ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, u8 size, u16 *data) { union ich8_hws_flash_status hsfsts; union ich8_hws_flash_ctrl hsflctl; u32 flash_linear_addr; u32 flash_data = 0; s32 ret_val = -E1000_ERR_NVM; u8 count = 0; DEBUGFUNC("e1000_read_flash_data_ich8lan"); if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) return -E1000_ERR_NVM; flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + hw->nvm.flash_base_addr); do { usec_delay(1); /* Steps */ ret_val = e1000_flash_cycle_init_ich8lan(hw); if (ret_val != E1000_SUCCESS) break; hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL); /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ hsflctl.hsf_ctrl.fldbcount = size - 1; hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ; E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval); E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr); ret_val = e1000_flash_cycle_ich8lan(hw, ICH_FLASH_READ_COMMAND_TIMEOUT); /* Check if FCERR is set to 1, if set to 1, clear it * and try the whole sequence a few more times, else * read in (shift in) the Flash Data0, the order is * least significant byte first msb to lsb */ if (ret_val == E1000_SUCCESS) { flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0); if (size == 1) *data = (u8)(flash_data & 0x000000FF); else if (size == 2) *data = (u16)(flash_data & 0x0000FFFF); break; } else { /* If we've gotten here, then things are probably * completely hosed, but if the error condition is * detected, it won't hurt to give it another try... * ICH_FLASH_CYCLE_REPEAT_COUNT times. */ hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); if (hsfsts.hsf_status.flcerr) { /* Repeat for some time before giving up. */ continue; } else if (!hsfsts.hsf_status.flcdone) { DEBUGOUT("Timeout error - flash cycle did not complete.\n"); break; } } } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); return ret_val; } +/** + * e1000_read_flash_data32_ich8lan - Read dword from NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the dword to read. + * @data: Pointer to the dword to store the value read. + * + * Reads a byte or word from the NVM using the flash access registers. + **/ +static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, + u32 *data) +{ + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + s32 ret_val = -E1000_ERR_NVM; + u8 count = 0; + DEBUGFUNC("e1000_read_flash_data_ich8lan"); + + if (offset > ICH_FLASH_LINEAR_ADDR_MASK || + hw->mac.type != e1000_pch_spt) + return -E1000_ERR_NVM; + flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr); + + do { + usec_delay(1); + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val != E1000_SUCCESS) + break; + /* In SPT, This register is in Lan memory space, not flash. + * Therefore, only 32 bit access is supported + */ + hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16; + + /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ + hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1; + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ; + /* In SPT, This register is in Lan memory space, not flash. + * Therefore, only 32 bit access is supported + */ + E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, + (u32)hsflctl.regval << 16); + E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr); + + ret_val = e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_READ_COMMAND_TIMEOUT); + + /* Check if FCERR is set to 1, if set to 1, clear it + * and try the whole sequence a few more times, else + * read in (shift in) the Flash Data0, the order is + * least significant byte first msb to lsb + */ + if (ret_val == E1000_SUCCESS) { + *data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0); + break; + } else { + /* If we've gotten here, then things are probably + * completely hosed, but if the error condition is + * detected, it won't hurt to give it another try... + * ICH_FLASH_CYCLE_REPEAT_COUNT times. + */ + hsfsts.regval = E1000_READ_FLASH_REG16(hw, + ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr) { + /* Repeat for some time before giving up. */ + continue; + } else if (!hsfsts.hsf_status.flcdone) { + DEBUGOUT("Timeout error - flash cycle did not complete.\n"); + break; + } + } + } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); + + return ret_val; +} + /** * e1000_write_nvm_ich8lan - Write word(s) to the NVM * @hw: pointer to the HW structure * @offset: The offset (in bytes) of the word(s) to write. * @words: Size of data to write in words * @data: Pointer to the word(s) to write at offset. * * Writes a byte or word to the NVM using the flash access registers. **/ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) { struct e1000_nvm_info *nvm = &hw->nvm; struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u16 i; DEBUGFUNC("e1000_write_nvm_ich8lan"); if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || (words == 0)) { DEBUGOUT("nvm parameter(s) out of bounds\n"); return -E1000_ERR_NVM; } nvm->ops.acquire(hw); for (i = 0; i < words; i++) { dev_spec->shadow_ram[offset+i].modified = TRUE; dev_spec->shadow_ram[offset+i].value = data[i]; } nvm->ops.release(hw); return E1000_SUCCESS; } /** + * e1000_update_nvm_checksum_spt - Update the checksum for NVM + * @hw: pointer to the HW structure + * + * The NVM checksum is updated by calling the generic update_nvm_checksum, + * which writes the checksum to the shadow ram. The changes in the shadow + * ram are then committed to the EEPROM by processing each bank at a time + * checking for the modified bit and writing only the pending changes. + * After a successful commit, the shadow ram is cleared and is ready for + * future writes. + **/ +static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 i, act_offset, new_bank_offset, old_bank_offset, bank; + s32 ret_val; + u32 dword = 0; + + DEBUGFUNC("e1000_update_nvm_checksum_spt"); + + ret_val = e1000_update_nvm_checksum_generic(hw); + if (ret_val) + goto out; + + if (nvm->type != e1000_nvm_flash_sw) + goto out; + + nvm->ops.acquire(hw); + + /* We're writing to the opposite bank so if we're on bank 1, + * write to bank 0 etc. We also need to erase the segment that + * is going to be written + */ + ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); + if (ret_val != E1000_SUCCESS) { + DEBUGOUT("Could not detect valid bank, assuming bank 0\n"); + bank = 0; + } + + if (bank == 0) { + new_bank_offset = nvm->flash_bank_size; + old_bank_offset = 0; + ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); + if (ret_val) + goto release; + } else { + old_bank_offset = nvm->flash_bank_size; + new_bank_offset = 0; + ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); + if (ret_val) + goto release; + } + for (i = 0; i < E1000_SHADOW_RAM_WORDS; i += 2) { + /* Determine whether to write the value stored + * in the other NVM bank or a modified value stored + * in the shadow RAM + */ + ret_val = e1000_read_flash_dword_ich8lan(hw, + i + old_bank_offset, + &dword); + + if (dev_spec->shadow_ram[i].modified) { + dword &= 0xffff0000; + dword |= (dev_spec->shadow_ram[i].value & 0xffff); + } + if (dev_spec->shadow_ram[i + 1].modified) { + dword &= 0x0000ffff; + dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff) + << 16); + } + if (ret_val) + break; + + /* If the word is 0x13, then make sure the signature bits + * (15:14) are 11b until the commit has completed. + * This will allow us to write 10b which indicates the + * signature is valid. We want to do this after the write + * has completed so that we don't mark the segment valid + * while the write is still in progress + */ + if (i == E1000_ICH_NVM_SIG_WORD - 1) + dword |= E1000_ICH_NVM_SIG_MASK << 16; + + /* Convert offset to bytes. */ + act_offset = (i + new_bank_offset) << 1; + + usec_delay(100); + + /* Write the data to the new bank. Offset in words*/ + act_offset = i + new_bank_offset; + ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, + dword); + if (ret_val) + break; + } + + /* Don't bother writing the segment valid bits if sector + * programming failed. + */ + if (ret_val) { + DEBUGOUT("Flash commit failed.\n"); + goto release; + } + + /* Finally validate the new segment by setting bit 15:14 + * to 10b in word 0x13 , this can be done without an + * erase as well since these bits are 11 to start with + * and we need to change bit 14 to 0b + */ + act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; + + /*offset in words but we read dword*/ + --act_offset; + ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword); + + if (ret_val) + goto release; + + dword &= 0xBFFFFFFF; + ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword); + + if (ret_val) + goto release; + + /* And invalidate the previously valid segment by setting + * its signature word (0x13) high_byte to 0b. This can be + * done without an erase because flash erase sets all bits + * to 1's. We can write 1's to 0's without an erase + */ + act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; + + /* offset in words but we read dword*/ + act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1; + ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword); + + if (ret_val) + goto release; + + dword &= 0x00FFFFFF; + ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword); + + if (ret_val) + goto release; + + /* Great! Everything worked, we can now clear the cached entries. */ + for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) { + dev_spec->shadow_ram[i].modified = FALSE; + dev_spec->shadow_ram[i].value = 0xFFFF; + } + +release: + nvm->ops.release(hw); + + /* Reload the EEPROM, or else modifications will not appear + * until after the next adapter reset. + */ + if (!ret_val) { + nvm->ops.reload(hw); + msec_delay(10); + } + +out: + if (ret_val) + DEBUGOUT1("NVM update error: %d\n", ret_val); + + return ret_val; +} + +/** * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM * @hw: pointer to the HW structure * * The NVM checksum is updated by calling the generic update_nvm_checksum, * which writes the checksum to the shadow ram. The changes in the shadow * ram are then committed to the EEPROM by processing each bank at a time * checking for the modified bit and writing only the pending changes. * After a successful commit, the shadow ram is cleared and is ready for * future writes. **/ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) { struct e1000_nvm_info *nvm = &hw->nvm; struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u32 i, act_offset, new_bank_offset, old_bank_offset, bank; s32 ret_val; u16 data = 0; DEBUGFUNC("e1000_update_nvm_checksum_ich8lan"); ret_val = e1000_update_nvm_checksum_generic(hw); if (ret_val) goto out; if (nvm->type != e1000_nvm_flash_sw) goto out; nvm->ops.acquire(hw); /* We're writing to the opposite bank so if we're on bank 1, * write to bank 0 etc. We also need to erase the segment that * is going to be written */ ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); if (ret_val != E1000_SUCCESS) { DEBUGOUT("Could not detect valid bank, assuming bank 0\n"); bank = 0; } if (bank == 0) { new_bank_offset = nvm->flash_bank_size; old_bank_offset = 0; ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); if (ret_val) goto release; } else { old_bank_offset = nvm->flash_bank_size; new_bank_offset = 0; ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); if (ret_val) goto release; } for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) { if (dev_spec->shadow_ram[i].modified) { data = dev_spec->shadow_ram[i].value; } else { ret_val = e1000_read_flash_word_ich8lan(hw, i + old_bank_offset, &data); if (ret_val) break; } /* If the word is 0x13, then make sure the signature bits * (15:14) are 11b until the commit has completed. * This will allow us to write 10b which indicates the * signature is valid. We want to do this after the write * has completed so that we don't mark the segment valid * while the write is still in progress */ if (i == E1000_ICH_NVM_SIG_WORD) data |= E1000_ICH_NVM_SIG_MASK; /* Convert offset to bytes. */ act_offset = (i + new_bank_offset) << 1; usec_delay(100); /* Write the bytes to the new bank. */ ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, (u8)data); if (ret_val) break; usec_delay(100); ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset + 1, (u8)(data >> 8)); if (ret_val) break; } /* Don't bother writing the segment valid bits if sector * programming failed. */ if (ret_val) { DEBUGOUT("Flash commit failed.\n"); goto release; } /* Finally validate the new segment by setting bit 15:14 * to 10b in word 0x13 , this can be done without an * erase as well since these bits are 11 to start with * and we need to change bit 14 to 0b */ act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data); if (ret_val) goto release; data &= 0xBFFF; ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset * 2 + 1, (u8)(data >> 8)); if (ret_val) goto release; /* And invalidate the previously valid segment by setting * its signature word (0x13) high_byte to 0b. This can be * done without an erase because flash erase sets all bits * to 1's. We can write 1's to 0's without an erase */ act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); if (ret_val) goto release; /* Great! Everything worked, we can now clear the cached entries. */ for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) { dev_spec->shadow_ram[i].modified = FALSE; dev_spec->shadow_ram[i].value = 0xFFFF; } release: nvm->ops.release(hw); /* Reload the EEPROM, or else modifications will not appear * until after the next adapter reset. */ if (!ret_val) { nvm->ops.reload(hw); msec_delay(10); } out: if (ret_val) DEBUGOUT1("NVM update error: %d\n", ret_val); return ret_val; } /** * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum * @hw: pointer to the HW structure * * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19. * If the bit is 0, that the EEPROM had been modified, but the checksum was not * calculated, in which case we need to calculate the checksum and set bit 6. **/ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) { s32 ret_val; u16 data; u16 word; u16 valid_csum_mask; DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan"); /* Read NVM and check Invalid Image CSUM bit. If this bit is 0, * the checksum needs to be fixed. This bit is an indication that * the NVM was prepared by OEM software and did not calculate * the checksum...a likely scenario. */ switch (hw->mac.type) { case e1000_pch_lpt: + case e1000_pch_spt: word = NVM_COMPAT; valid_csum_mask = NVM_COMPAT_VALID_CSUM; break; default: word = NVM_FUTURE_INIT_WORD1; valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM; break; } ret_val = hw->nvm.ops.read(hw, word, 1, &data); if (ret_val) return ret_val; if (!(data & valid_csum_mask)) { data |= valid_csum_mask; ret_val = hw->nvm.ops.write(hw, word, 1, &data); if (ret_val) return ret_val; ret_val = hw->nvm.ops.update(hw); if (ret_val) return ret_val; } return e1000_validate_nvm_checksum_generic(hw); } /** * e1000_write_flash_data_ich8lan - Writes bytes to the NVM * @hw: pointer to the HW structure * @offset: The offset (in bytes) of the byte/word to read. * @size: Size of data to read, 1=byte 2=word * @data: The byte(s) to write to the NVM. * * Writes one/two bytes to the NVM using the flash access registers. **/ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, u8 size, u16 data) { union ich8_hws_flash_status hsfsts; union ich8_hws_flash_ctrl hsflctl; u32 flash_linear_addr; u32 flash_data = 0; s32 ret_val; u8 count = 0; DEBUGFUNC("e1000_write_ich8_data"); - if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) - return -E1000_ERR_NVM; + if (hw->mac.type == e1000_pch_spt) { + if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK) + return -E1000_ERR_NVM; + } else { + if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) + return -E1000_ERR_NVM; + } flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + hw->nvm.flash_base_addr); do { usec_delay(1); /* Steps */ ret_val = e1000_flash_cycle_init_ich8lan(hw); if (ret_val != E1000_SUCCESS) break; - hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL); + /* In SPT, This register is in Lan memory space, not + * flash. Therefore, only 32 bit access is supported + */ + if (hw->mac.type == e1000_pch_spt) + hsflctl.regval = + E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16; + else + hsflctl.regval = + E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL); /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ hsflctl.hsf_ctrl.fldbcount = size - 1; hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; - E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval); + /* In SPT, This register is in Lan memory space, + * not flash. Therefore, only 32 bit access is + * supported + */ + if (hw->mac.type == e1000_pch_spt) + E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, + hsflctl.regval << 16); + else + E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, + hsflctl.regval); E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr); if (size == 1) flash_data = (u32)data & 0x00FF; else flash_data = (u32)data; E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data); /* check if FCERR is set to 1 , if set to 1, clear it * and try the whole sequence a few more times else done */ ret_val = e1000_flash_cycle_ich8lan(hw, ICH_FLASH_WRITE_COMMAND_TIMEOUT); if (ret_val == E1000_SUCCESS) break; /* If we're here, then things are most likely * completely hosed, but if the error condition * is detected, it won't hurt to give it another * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. */ hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); if (hsfsts.hsf_status.flcerr) /* Repeat for some time before giving up. */ continue; if (!hsfsts.hsf_status.flcdone) { DEBUGOUT("Timeout error - flash cycle did not complete.\n"); break; } } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); return ret_val; } +/** +* e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM +* @hw: pointer to the HW structure +* @offset: The offset (in bytes) of the dwords to read. +* @data: The 4 bytes to write to the NVM. +* +* Writes one/two/four bytes to the NVM using the flash access registers. +**/ +static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, + u32 data) +{ + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + s32 ret_val; + u8 count = 0; + DEBUGFUNC("e1000_write_flash_data32_ich8lan"); + + if (hw->mac.type == e1000_pch_spt) { + if (offset > ICH_FLASH_LINEAR_ADDR_MASK) + return -E1000_ERR_NVM; + } + flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr); + do { + usec_delay(1); + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val != E1000_SUCCESS) + break; + + /* In SPT, This register is in Lan memory space, not + * flash. Therefore, only 32 bit access is supported + */ + if (hw->mac.type == e1000_pch_spt) + hsflctl.regval = E1000_READ_FLASH_REG(hw, + ICH_FLASH_HSFSTS) + >> 16; + else + hsflctl.regval = E1000_READ_FLASH_REG16(hw, + ICH_FLASH_HSFCTL); + + hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1; + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; + + /* In SPT, This register is in Lan memory space, + * not flash. Therefore, only 32 bit access is + * supported + */ + if (hw->mac.type == e1000_pch_spt) + E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, + hsflctl.regval << 16); + else + E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, + hsflctl.regval); + + E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr); + + E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, data); + + /* check if FCERR is set to 1 , if set to 1, clear it + * and try the whole sequence a few more times else done + */ + ret_val = e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_WRITE_COMMAND_TIMEOUT); + + if (ret_val == E1000_SUCCESS) + break; + + /* If we're here, then things are most likely + * completely hosed, but if the error condition + * is detected, it won't hurt to give it another + * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. + */ + hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); + + if (hsfsts.hsf_status.flcerr) + /* Repeat for some time before giving up. */ + continue; + if (!hsfsts.hsf_status.flcdone) { + DEBUGOUT("Timeout error - flash cycle did not complete.\n"); + break; + } + } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); + + return ret_val; +} + /** * e1000_write_flash_byte_ich8lan - Write a single byte to NVM * @hw: pointer to the HW structure * @offset: The index of the byte to read. * @data: The byte to write to the NVM. * * Writes a single byte to the NVM using the flash access registers. **/ static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, u8 data) { u16 word = (u16)data; DEBUGFUNC("e1000_write_flash_byte_ich8lan"); return e1000_write_flash_data_ich8lan(hw, offset, 1, word); } +/** +* e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM +* @hw: pointer to the HW structure +* @offset: The offset of the word to write. +* @dword: The dword to write to the NVM. +* +* Writes a single dword to the NVM using the flash access registers. +* Goes through a retry algorithm before giving up. +**/ +static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw, + u32 offset, u32 dword) +{ + s32 ret_val; + u16 program_retries; + DEBUGFUNC("e1000_retry_write_flash_dword_ich8lan"); + /* Must convert word offset into bytes. */ + offset <<= 1; + + ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword); + + if (!ret_val) + return ret_val; + for (program_retries = 0; program_retries < 100; program_retries++) { + DEBUGOUT2("Retrying Byte %8.8X at offset %u\n", dword, offset); + usec_delay(100); + ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword); + if (ret_val == E1000_SUCCESS) + break; + } + if (program_retries == 100) + return -E1000_ERR_NVM; + + return E1000_SUCCESS; +} + /** * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM * @hw: pointer to the HW structure * @offset: The offset of the byte to write. * @byte: The byte to write to the NVM. * * Writes a single byte to the NVM using the flash access registers. * Goes through a retry algorithm before giving up. **/ static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, u8 byte) { s32 ret_val; u16 program_retries; DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan"); ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); if (!ret_val) return ret_val; for (program_retries = 0; program_retries < 100; program_retries++) { DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset); usec_delay(100); ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); if (ret_val == E1000_SUCCESS) break; } if (program_retries == 100) return -E1000_ERR_NVM; return E1000_SUCCESS; } /** * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM * @hw: pointer to the HW structure * @bank: 0 for first bank, 1 for second bank, etc. * * Erases the bank specified. Each bank is a 4k block. Banks are 0 based. * bank N is 4096 * N + flash_reg_addr. **/ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) { struct e1000_nvm_info *nvm = &hw->nvm; union ich8_hws_flash_status hsfsts; union ich8_hws_flash_ctrl hsflctl; u32 flash_linear_addr; /* bank size is in 16bit words - adjust to bytes */ u32 flash_bank_size = nvm->flash_bank_size * 2; s32 ret_val; s32 count = 0; s32 j, iteration, sector_size; DEBUGFUNC("e1000_erase_flash_bank_ich8lan"); hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); /* Determine HW Sector size: Read BERASE bits of hw flash status * register * 00: The Hw sector is 256 bytes, hence we need to erase 16 * consecutive sectors. The start index for the nth Hw sector * can be calculated as = bank * 4096 + n * 256 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector. * The start index for the nth Hw sector can be calculated * as = bank * 4096 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192 * (ich9 only, otherwise error condition) * 11: The Hw sector is 64K bytes, nth sector = bank * 65536 */ switch (hsfsts.hsf_status.berasesz) { case 0: /* Hw sector size 256 */ sector_size = ICH_FLASH_SEG_SIZE_256; iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256; break; case 1: sector_size = ICH_FLASH_SEG_SIZE_4K; iteration = 1; break; case 2: sector_size = ICH_FLASH_SEG_SIZE_8K; iteration = 1; break; case 3: sector_size = ICH_FLASH_SEG_SIZE_64K; iteration = 1; break; default: return -E1000_ERR_NVM; } /* Start with the base address, then add the sector offset. */ flash_linear_addr = hw->nvm.flash_base_addr; flash_linear_addr += (bank) ? flash_bank_size : 0; for (j = 0; j < iteration; j++) { do { u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT; /* Steps */ ret_val = e1000_flash_cycle_init_ich8lan(hw); if (ret_val) return ret_val; /* Write a value 11 (block Erase) in Flash * Cycle field in hw flash control */ - hsflctl.regval = - E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL); + if (hw->mac.type == e1000_pch_spt) + hsflctl.regval = + E1000_READ_FLASH_REG(hw, + ICH_FLASH_HSFSTS)>>16; + else + hsflctl.regval = + E1000_READ_FLASH_REG16(hw, + ICH_FLASH_HSFCTL); hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; - E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, - hsflctl.regval); + if (hw->mac.type == e1000_pch_spt) + E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS, + hsflctl.regval << 16); + else + E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, + hsflctl.regval); /* Write the last 24 bits of an index within the * block into Flash Linear address field in Flash * Address. */ flash_linear_addr += (j * sector_size); E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr); ret_val = e1000_flash_cycle_ich8lan(hw, timeout); if (ret_val == E1000_SUCCESS) break; /* Check if FCERR is set to 1. If 1, * clear it and try the whole sequence * a few more times else Done */ hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS); if (hsfsts.hsf_status.flcerr) /* repeat for some time before giving up */ continue; else if (!hsfsts.hsf_status.flcdone) return ret_val; } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT); } return E1000_SUCCESS; } /** * e1000_valid_led_default_ich8lan - Set the default LED settings * @hw: pointer to the HW structure * @data: Pointer to the LED settings * * Reads the LED default settings from the NVM to data. If the NVM LED * settings is all 0's or F's, set the LED default to a valid LED default * setting. **/ static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data) { s32 ret_val; DEBUGFUNC("e1000_valid_led_default_ich8lan"); ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); if (ret_val) { DEBUGOUT("NVM Read Error\n"); return ret_val; } if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) *data = ID_LED_DEFAULT_ICH8LAN; return E1000_SUCCESS; } /** * e1000_id_led_init_pchlan - store LED configurations * @hw: pointer to the HW structure * * PCH does not control LEDs via the LEDCTL register, rather it uses * the PHY LED configuration register. * * PCH also does not have an "always on" or "always off" mode which * complicates the ID feature. Instead of using the "on" mode to indicate * in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()), * use "link_up" mode. The LEDs will still ID on request if there is no * link based on logic in e1000_led_[on|off]_pchlan(). **/ static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; s32 ret_val; const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP; const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT; u16 data, i, temp, shift; DEBUGFUNC("e1000_id_led_init_pchlan"); /* Get default ID LED modes */ ret_val = hw->nvm.ops.valid_led_default(hw, &data); if (ret_val) return ret_val; mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL); mac->ledctl_mode1 = mac->ledctl_default; mac->ledctl_mode2 = mac->ledctl_default; for (i = 0; i < 4; i++) { temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK; shift = (i * 5); switch (temp) { case ID_LED_ON1_DEF2: case ID_LED_ON1_ON2: case ID_LED_ON1_OFF2: mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift); mac->ledctl_mode1 |= (ledctl_on << shift); break; case ID_LED_OFF1_DEF2: case ID_LED_OFF1_ON2: case ID_LED_OFF1_OFF2: mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift); mac->ledctl_mode1 |= (ledctl_off << shift); break; default: /* Do nothing */ break; } switch (temp) { case ID_LED_DEF1_ON2: case ID_LED_ON1_ON2: case ID_LED_OFF1_ON2: mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift); mac->ledctl_mode2 |= (ledctl_on << shift); break; case ID_LED_DEF1_OFF2: case ID_LED_ON1_OFF2: case ID_LED_OFF1_OFF2: mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift); mac->ledctl_mode2 |= (ledctl_off << shift); break; default: /* Do nothing */ break; } } return E1000_SUCCESS; } /** * e1000_get_bus_info_ich8lan - Get/Set the bus type and width * @hw: pointer to the HW structure * * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability * register, so the the bus width is hard coded. **/ static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw) { struct e1000_bus_info *bus = &hw->bus; s32 ret_val; DEBUGFUNC("e1000_get_bus_info_ich8lan"); ret_val = e1000_get_bus_info_pcie_generic(hw); /* ICH devices are "PCI Express"-ish. They have * a configuration space, but do not contain * PCI Express Capability registers, so bus width * must be hardcoded. */ if (bus->width == e1000_bus_width_unknown) bus->width = e1000_bus_width_pcie_x1; return ret_val; } /** * e1000_reset_hw_ich8lan - Reset the hardware * @hw: pointer to the HW structure * * Does a full reset of the hardware which includes a reset of the PHY and * MAC. **/ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) { struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u16 kum_cfg; u32 ctrl, reg; s32 ret_val; DEBUGFUNC("e1000_reset_hw_ich8lan"); /* Prevent the PCI-E bus from sticking if there is no TLP connection * on the last TLP read/write transaction when MAC is reset. */ ret_val = e1000_disable_pcie_master_generic(hw); if (ret_val) DEBUGOUT("PCI-E Master disable polling has failed.\n"); DEBUGOUT("Masking off all interrupts\n"); E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); /* Disable the Transmit and Receive units. Then delay to allow * any pending transactions to complete before we hit the MAC * with the global reset. */ E1000_WRITE_REG(hw, E1000_RCTL, 0); E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); E1000_WRITE_FLUSH(hw); msec_delay(10); /* Workaround for ICH8 bit corruption issue in FIFO memory */ if (hw->mac.type == e1000_ich8lan) { /* Set Tx and Rx buffer allocation to 8k apiece. */ E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K); /* Set Packet Buffer Size to 16k. */ E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K); } if (hw->mac.type == e1000_pchlan) { /* Save the NVM K1 bit setting*/ ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg); if (ret_val) return ret_val; if (kum_cfg & E1000_NVM_K1_ENABLE) dev_spec->nvm_k1_enabled = TRUE; else dev_spec->nvm_k1_enabled = FALSE; } ctrl = E1000_READ_REG(hw, E1000_CTRL); if (!hw->phy.ops.check_reset_block(hw)) { /* Full-chip reset requires MAC and PHY reset at the same * time to make sure the interface between MAC and the * external PHY is reset. */ ctrl |= E1000_CTRL_PHY_RST; /* Gate automatic PHY configuration by hardware on * non-managed 82579 */ if ((hw->mac.type == e1000_pch2lan) && !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID)) e1000_gate_hw_phy_config_ich8lan(hw, TRUE); } ret_val = e1000_acquire_swflag_ich8lan(hw); DEBUGOUT("Issuing a global reset to ich8lan\n"); E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST)); /* cannot issue a flush here because it hangs the hardware */ msec_delay(20); /* Set Phy Config Counter to 50msec */ if (hw->mac.type == e1000_pch2lan) { reg = E1000_READ_REG(hw, E1000_FEXTNVM3); reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK; reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC; E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg); } if (!ret_val) E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex); if (ctrl & E1000_CTRL_PHY_RST) { ret_val = hw->phy.ops.get_cfg_done(hw); if (ret_val) return ret_val; ret_val = e1000_post_phy_reset_ich8lan(hw); if (ret_val) return ret_val; } /* For PCH, this write will make sure that any noise * will be detected as a CRC error and be dropped rather than show up * as a bad packet to the DMA engine. */ if (hw->mac.type == e1000_pchlan) E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565); E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); E1000_READ_REG(hw, E1000_ICR); reg = E1000_READ_REG(hw, E1000_KABGTXD); reg |= E1000_KABGTXD_BGSQLBIAS; E1000_WRITE_REG(hw, E1000_KABGTXD, reg); return E1000_SUCCESS; } /** * e1000_init_hw_ich8lan - Initialize the hardware * @hw: pointer to the HW structure * * Prepares the hardware for transmit and receive by doing the following: * - initialize hardware bits * - initialize LED identification * - setup receive address registers * - setup flow control * - setup transmit descriptors * - clear statistics **/ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) { struct e1000_mac_info *mac = &hw->mac; u32 ctrl_ext, txdctl, snoop; s32 ret_val; u16 i; DEBUGFUNC("e1000_init_hw_ich8lan"); e1000_initialize_hw_bits_ich8lan(hw); /* Initialize identification LED */ ret_val = mac->ops.id_led_init(hw); /* An error is not fatal and we should not stop init due to this */ if (ret_val) DEBUGOUT("Error initializing identification LED\n"); /* Setup the receive address. */ e1000_init_rx_addrs_generic(hw, mac->rar_entry_count); /* Zero out the Multicast HASH table */ DEBUGOUT("Zeroing the MTA\n"); for (i = 0; i < mac->mta_reg_count; i++) E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); /* The 82578 Rx buffer will stall if wakeup is enabled in host and * the ME. Disable wakeup by clearing the host wakeup bit. * Reset the phy after disabling host wakeup to reset the Rx buffer. */ if (hw->phy.type == e1000_phy_82578) { hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i); i &= ~BM_WUC_HOST_WU_BIT; hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i); ret_val = e1000_phy_hw_reset_ich8lan(hw); if (ret_val) return ret_val; } /* Setup link and flow control */ ret_val = mac->ops.setup_link(hw); /* Set the transmit descriptor write-back policy for both queues */ txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0)); txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB); txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) | E1000_TXDCTL_MAX_TX_DESC_PREFETCH); E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl); txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1)); txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB); txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) | E1000_TXDCTL_MAX_TX_DESC_PREFETCH); E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl); /* ICH8 has opposite polarity of no_snoop bits. * By default, we should use snoop behavior. */ if (mac->type == e1000_ich8lan) snoop = PCIE_ICH8_SNOOP_ALL; else snoop = (u32) ~(PCIE_NO_SNOOP_ALL); e1000_set_pcie_no_snoop_generic(hw, snoop); ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); ctrl_ext |= E1000_CTRL_EXT_RO_DIS; E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); /* Clear all of the statistics registers (clear on read). It is * important that we do this after we have tried to establish link * because the symbol error count will increment wildly if there * is no link. */ e1000_clear_hw_cntrs_ich8lan(hw); return ret_val; } /** * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits * @hw: pointer to the HW structure * * Sets/Clears required hardware bits necessary for correctly setting up the * hardware for transmit and receive. **/ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) { u32 reg; DEBUGFUNC("e1000_initialize_hw_bits_ich8lan"); /* Extended Device Control */ reg = E1000_READ_REG(hw, E1000_CTRL_EXT); reg |= (1 << 22); /* Enable PHY low-power state when MAC is at D3 w/o WoL */ if (hw->mac.type >= e1000_pchlan) reg |= E1000_CTRL_EXT_PHYPDEN; E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); /* Transmit Descriptor Control 0 */ reg = E1000_READ_REG(hw, E1000_TXDCTL(0)); reg |= (1 << 22); E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg); /* Transmit Descriptor Control 1 */ reg = E1000_READ_REG(hw, E1000_TXDCTL(1)); reg |= (1 << 22); E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg); /* Transmit Arbitration Control 0 */ reg = E1000_READ_REG(hw, E1000_TARC(0)); if (hw->mac.type == e1000_ich8lan) reg |= (1 << 28) | (1 << 29); reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27); E1000_WRITE_REG(hw, E1000_TARC(0), reg); /* Transmit Arbitration Control 1 */ reg = E1000_READ_REG(hw, E1000_TARC(1)); if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR) reg &= ~(1 << 28); else reg |= (1 << 28); reg |= (1 << 24) | (1 << 26) | (1 << 30); E1000_WRITE_REG(hw, E1000_TARC(1), reg); /* Device Status */ if (hw->mac.type == e1000_ich8lan) { reg = E1000_READ_REG(hw, E1000_STATUS); reg &= ~(1 << 31); E1000_WRITE_REG(hw, E1000_STATUS, reg); } /* work-around descriptor data corruption issue during nfs v2 udp * traffic, just disable the nfs filtering capability */ reg = E1000_READ_REG(hw, E1000_RFCTL); reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS); /* Disable IPv6 extension header parsing because some malformed * IPv6 headers can hang the Rx. */ if (hw->mac.type == e1000_ich8lan) reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); E1000_WRITE_REG(hw, E1000_RFCTL, reg); /* Enable ECC on Lynxpoint */ - if (hw->mac.type == e1000_pch_lpt) { + if ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) { reg = E1000_READ_REG(hw, E1000_PBECCSTS); reg |= E1000_PBECCSTS_ECC_ENABLE; E1000_WRITE_REG(hw, E1000_PBECCSTS, reg); reg = E1000_READ_REG(hw, E1000_CTRL); reg |= E1000_CTRL_MEHE; E1000_WRITE_REG(hw, E1000_CTRL, reg); } return; } /** * e1000_setup_link_ich8lan - Setup flow control and link settings * @hw: pointer to the HW structure * * Determines which flow control settings to use, then configures flow * control. Calls the appropriate media-specific link configuration * function. Assuming the adapter has a valid link partner, a valid link * should be established. Assumes the hardware has previously been reset * and the transmitter and receiver are not enabled. **/ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) { s32 ret_val; DEBUGFUNC("e1000_setup_link_ich8lan"); if (hw->phy.ops.check_reset_block(hw)) return E1000_SUCCESS; /* ICH parts do not have a word in the NVM to determine * the default flow control setting, so we explicitly * set it to full. */ if (hw->fc.requested_mode == e1000_fc_default) hw->fc.requested_mode = e1000_fc_full; /* Save off the requested flow control mode for use later. Depending * on the link partner's capabilities, we may or may not use this mode. */ hw->fc.current_mode = hw->fc.requested_mode; DEBUGOUT1("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); /* Continue to configure the copper link. */ ret_val = hw->mac.ops.setup_physical_interface(hw); if (ret_val) return ret_val; E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time); if ((hw->phy.type == e1000_phy_82578) || (hw->phy.type == e1000_phy_82579) || (hw->phy.type == e1000_phy_i217) || (hw->phy.type == e1000_phy_82577)) { E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time); ret_val = hw->phy.ops.write_reg(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27), hw->fc.pause_time); if (ret_val) return ret_val; } return e1000_set_fc_watermarks_generic(hw); } /** * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface * @hw: pointer to the HW structure * * Configures the kumeran interface to the PHY to wait the appropriate time * when polling the PHY, then call the generic setup_copper_link to finish * configuring the copper link. **/ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) { u32 ctrl; s32 ret_val; u16 reg_data; DEBUGFUNC("e1000_setup_copper_link_ich8lan"); ctrl = E1000_READ_REG(hw, E1000_CTRL); ctrl |= E1000_CTRL_SLU; ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); E1000_WRITE_REG(hw, E1000_CTRL, ctrl); /* Set the mac to wait the maximum time between each iteration * and increase the max iterations when polling the phy; * this fixes erroneous timeouts at 10Mbps. */ ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS, 0xFFFF); if (ret_val) return ret_val; ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, ®_data); if (ret_val) return ret_val; reg_data |= 0x3F; ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, reg_data); if (ret_val) return ret_val; switch (hw->phy.type) { case e1000_phy_igp_3: ret_val = e1000_copper_link_setup_igp(hw); if (ret_val) return ret_val; break; case e1000_phy_bm: case e1000_phy_82578: ret_val = e1000_copper_link_setup_m88(hw); if (ret_val) return ret_val; break; case e1000_phy_82577: case e1000_phy_82579: ret_val = e1000_copper_link_setup_82577(hw); if (ret_val) return ret_val; break; case e1000_phy_ife: ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, ®_data); if (ret_val) return ret_val; reg_data &= ~IFE_PMC_AUTO_MDIX; switch (hw->phy.mdix) { case 1: reg_data &= ~IFE_PMC_FORCE_MDIX; break; case 2: reg_data |= IFE_PMC_FORCE_MDIX; break; case 0: default: reg_data |= IFE_PMC_AUTO_MDIX; break; } ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, reg_data); if (ret_val) return ret_val; break; default: break; } return e1000_setup_copper_link_generic(hw); } /** * e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface * @hw: pointer to the HW structure * * Calls the PHY specific link setup function and then calls the * generic setup_copper_link to finish configuring the link for * Lynxpoint PCH devices **/ static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw) { u32 ctrl; s32 ret_val; DEBUGFUNC("e1000_setup_copper_link_pch_lpt"); ctrl = E1000_READ_REG(hw, E1000_CTRL); ctrl |= E1000_CTRL_SLU; ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); E1000_WRITE_REG(hw, E1000_CTRL, ctrl); ret_val = e1000_copper_link_setup_82577(hw); if (ret_val) return ret_val; return e1000_setup_copper_link_generic(hw); } /** * e1000_get_link_up_info_ich8lan - Get current link speed and duplex * @hw: pointer to the HW structure * @speed: pointer to store current link speed * @duplex: pointer to store the current link duplex * * Calls the generic get_speed_and_duplex to retrieve the current link * information and then calls the Kumeran lock loss workaround for links at * gigabit speeds. **/ static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed, u16 *duplex) { s32 ret_val; DEBUGFUNC("e1000_get_link_up_info_ich8lan"); ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex); if (ret_val) return ret_val; if ((hw->mac.type == e1000_ich8lan) && (hw->phy.type == e1000_phy_igp_3) && (*speed == SPEED_1000)) { ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw); } return ret_val; } /** * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround * @hw: pointer to the HW structure * * Work-around for 82566 Kumeran PCS lock loss: * On link status change (i.e. PCI reset, speed change) and link is up and * speed is gigabit- * 0) if workaround is optionally disabled do nothing * 1) wait 1ms for Kumeran link to come up * 2) check Kumeran Diagnostic register PCS lock loss bit * 3) if not set the link is locked (all is good), otherwise... * 4) reset the PHY * 5) repeat up to 10 times * Note: this is only called for IGP3 copper when speed is 1gb. **/ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw) { struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u32 phy_ctrl; s32 ret_val; u16 i, data; bool link; DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan"); if (!dev_spec->kmrn_lock_loss_workaround_enabled) return E1000_SUCCESS; /* Make sure link is up before proceeding. If not just return. * Attempting this while link is negotiating fouled up link * stability */ ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); if (!link) return E1000_SUCCESS; for (i = 0; i < 10; i++) { /* read once to clear */ ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data); if (ret_val) return ret_val; /* and again to get new status */ ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data); if (ret_val) return ret_val; /* check for PCS lock */ if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS)) return E1000_SUCCESS; /* Issue PHY reset */ hw->phy.ops.reset(hw); msec_delay_irq(5); } /* Disable GigE link negotiation */ phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL); phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE | E1000_PHY_CTRL_NOND0A_GBE_DISABLE); E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); /* Call gig speed drop workaround on Gig disable before accessing * any PHY registers */ e1000_gig_downshift_workaround_ich8lan(hw); /* unable to acquire PCS lock */ return -E1000_ERR_PHY; } /** * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state * @hw: pointer to the HW structure * @state: boolean value used to set the current Kumeran workaround state * * If ICH8, set the current Kumeran workaround state (enabled - TRUE * /disabled - FALSE). **/ void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, bool state) { struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan"); if (hw->mac.type != e1000_ich8lan) { DEBUGOUT("Workaround applies to ICH8 only.\n"); return; } dev_spec->kmrn_lock_loss_workaround_enabled = state; return; } /** * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3 * @hw: pointer to the HW structure * * Workaround for 82566 power-down on D3 entry: * 1) disable gigabit link * 2) write VR power-down enable * 3) read it back * Continue if successful, else issue LCD reset and repeat **/ void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw) { u32 reg; u16 data; u8 retry = 0; DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan"); if (hw->phy.type != e1000_phy_igp_3) return; /* Try the workaround twice (if needed) */ do { /* Disable link */ reg = E1000_READ_REG(hw, E1000_PHY_CTRL); reg |= (E1000_PHY_CTRL_GBE_DISABLE | E1000_PHY_CTRL_NOND0A_GBE_DISABLE); E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg); /* Call gig speed drop workaround on Gig disable before * accessing any PHY registers */ if (hw->mac.type == e1000_ich8lan) e1000_gig_downshift_workaround_ich8lan(hw); /* Write VR power-down enable */ hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data); data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; hw->phy.ops.write_reg(hw, IGP3_VR_CTRL, data | IGP3_VR_CTRL_MODE_SHUTDOWN); /* Read it back and test */ hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data); data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry) break; /* Issue PHY reset and repeat at most one more time */ reg = E1000_READ_REG(hw, E1000_CTRL); E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST); retry++; } while (retry); } /** * e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working * @hw: pointer to the HW structure * * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC), * LPLU, Gig disable, MDIC PHY reset): * 1) Set Kumeran Near-end loopback * 2) Clear Kumeran Near-end loopback * Should only be called for ICH8[m] devices with any 1G Phy. **/ void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw) { s32 ret_val; u16 reg_data; DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan"); if ((hw->mac.type != e1000_ich8lan) || (hw->phy.type == e1000_phy_ife)) return; ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, ®_data); if (ret_val) return; reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK; ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, reg_data); if (ret_val) return; reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK; e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, reg_data); } /** * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx * @hw: pointer to the HW structure * * During S0 to Sx transition, it is possible the link remains at gig * instead of negotiating to a lower speed. Before going to Sx, set * 'Gig Disable' to force link speed negotiation to a lower speed based on * the LPLU setting in the NVM or custom setting. For PCH and newer parts, * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also * needs to be written. * Parts that support (and are linked to a partner which support) EEE in * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power * than 10Mbps w/o EEE. **/ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) { struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; u32 phy_ctrl; s32 ret_val; DEBUGFUNC("e1000_suspend_workarounds_ich8lan"); phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL); phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE; if (hw->phy.type == e1000_phy_i217) { u16 phy_reg, device_id = hw->device_id; if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) || (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) || (device_id == E1000_DEV_ID_PCH_I218_LM3) || - (device_id == E1000_DEV_ID_PCH_I218_V3)) { + (device_id == E1000_DEV_ID_PCH_I218_V3) || + (hw->mac.type == e1000_pch_spt)) { u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6); E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK); } ret_val = hw->phy.ops.acquire(hw); if (ret_val) goto out; if (!dev_spec->eee_disable) { u16 eee_advert; ret_val = e1000_read_emi_reg_locked(hw, I217_EEE_ADVERTISEMENT, &eee_advert); if (ret_val) goto release; /* Disable LPLU if both link partners support 100BaseT * EEE and 100Full is advertised on both ends of the * link, and enable Auto Enable LPI since there will * be no driver to enable LPI while in Sx. */ if ((eee_advert & I82579_EEE_100_SUPPORTED) && (dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) && (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) { phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_NOND0A_LPLU); /* Set Auto Enable LPI after link up */ hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg); phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI; hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg); } } /* For i217 Intel Rapid Start Technology support, * when the system is going into Sx and no manageability engine * is present, the driver must configure proxy to reset only on * power good. LPI (Low Power Idle) state must also reset only * on power good, as well as the MTA (Multicast table array). * The SMBus release must also be disabled on LCD reset. */ if (!(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID)) { /* Enable proxy to reset only on power good. */ hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL, &phy_reg); phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE; hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, phy_reg); /* Set bit enable LPI (EEE) to reset only on * power good. */ hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg); phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET; hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg); /* Disable the SMB release on LCD reset. */ hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg); phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE; hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg); } /* Enable MTA to reset for Intel Rapid Start Technology * Support */ hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg); phy_reg |= I217_CGFREG_ENABLE_MTA_RESET; hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg); release: hw->phy.ops.release(hw); } out: E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl); if (hw->mac.type == e1000_ich8lan) e1000_gig_downshift_workaround_ich8lan(hw); if (hw->mac.type >= e1000_pchlan) { e1000_oem_bits_config_ich8lan(hw, FALSE); /* Reset PHY to activate OEM bits on 82577/8 */ if (hw->mac.type == e1000_pchlan) e1000_phy_hw_reset_generic(hw); ret_val = hw->phy.ops.acquire(hw); if (ret_val) return; e1000_write_smbus_addr(hw); hw->phy.ops.release(hw); } return; } /** * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0 * @hw: pointer to the HW structure * * During Sx to S0 transitions on non-managed devices or managed devices * on which PHY resets are not blocked, if the PHY registers cannot be * accessed properly by the s/w toggle the LANPHYPC value to power cycle * the PHY. * On i217, setup Intel Rapid Start Technology. **/ -void e1000_resume_workarounds_pchlan(struct e1000_hw *hw) +u32 e1000_resume_workarounds_pchlan(struct e1000_hw *hw) { s32 ret_val; DEBUGFUNC("e1000_resume_workarounds_pchlan"); if (hw->mac.type < e1000_pch2lan) - return; + return E1000_SUCCESS; ret_val = e1000_init_phy_workarounds_pchlan(hw); if (ret_val) { DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val); - return; + return ret_val; } /* For i217 Intel Rapid Start Technology support when the system * is transitioning from Sx and no manageability engine is present * configure SMBus to restore on reset, disable proxy, and enable * the reset on MTA (Multicast table array). */ if (hw->phy.type == e1000_phy_i217) { u16 phy_reg; ret_val = hw->phy.ops.acquire(hw); if (ret_val) { DEBUGOUT("Failed to setup iRST\n"); - return; + return ret_val; } /* Clear Auto Enable LPI after link up */ hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg); phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI; hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg); if (!(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID)) { /* Restore clear on SMB if no manageability engine * is present */ ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg); if (ret_val) goto release; phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE; hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg); /* Disable Proxy */ hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0); } /* Enable reset on MTA */ ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg); if (ret_val) goto release; phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET; hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg); release: if (ret_val) DEBUGOUT1("Error %d in resume workarounds\n", ret_val); hw->phy.ops.release(hw); + return ret_val; } + return E1000_SUCCESS; } /** * e1000_cleanup_led_ich8lan - Restore the default LED operation * @hw: pointer to the HW structure * * Return the LED back to the default configuration. **/ static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw) { DEBUGFUNC("e1000_cleanup_led_ich8lan"); if (hw->phy.type == e1000_phy_ife) return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0); E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default); return E1000_SUCCESS; } /** * e1000_led_on_ich8lan - Turn LEDs on * @hw: pointer to the HW structure * * Turn on the LEDs. **/ static s32 e1000_led_on_ich8lan(struct e1000_hw *hw) { DEBUGFUNC("e1000_led_on_ich8lan"); if (hw->phy.type == e1000_phy_ife) return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON)); E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2); return E1000_SUCCESS; } /** * e1000_led_off_ich8lan - Turn LEDs off * @hw: pointer to the HW structure * * Turn off the LEDs. **/ static s32 e1000_led_off_ich8lan(struct e1000_hw *hw) { DEBUGFUNC("e1000_led_off_ich8lan"); if (hw->phy.type == e1000_phy_ife) return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF)); E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1); return E1000_SUCCESS; } /** * e1000_setup_led_pchlan - Configures SW controllable LED * @hw: pointer to the HW structure * * This prepares the SW controllable LED for use. **/ static s32 e1000_setup_led_pchlan(struct e1000_hw *hw) { DEBUGFUNC("e1000_setup_led_pchlan"); return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1); } /** * e1000_cleanup_led_pchlan - Restore the default LED operation * @hw: pointer to the HW structure * * Return the LED back to the default configuration. **/ static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw) { DEBUGFUNC("e1000_cleanup_led_pchlan"); return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default); } /** * e1000_led_on_pchlan - Turn LEDs on * @hw: pointer to the HW structure * * Turn on the LEDs. **/ static s32 e1000_led_on_pchlan(struct e1000_hw *hw) { u16 data = (u16)hw->mac.ledctl_mode2; u32 i, led; DEBUGFUNC("e1000_led_on_pchlan"); /* If no link, then turn LED on by setting the invert bit * for each LED that's mode is "link_up" in ledctl_mode2. */ if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { for (i = 0; i < 3; i++) { led = (data >> (i * 5)) & E1000_PHY_LED0_MASK; if ((led & E1000_PHY_LED0_MODE_MASK) != E1000_LEDCTL_MODE_LINK_UP) continue; if (led & E1000_PHY_LED0_IVRT) data &= ~(E1000_PHY_LED0_IVRT << (i * 5)); else data |= (E1000_PHY_LED0_IVRT << (i * 5)); } } return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data); } /** * e1000_led_off_pchlan - Turn LEDs off * @hw: pointer to the HW structure * * Turn off the LEDs. **/ static s32 e1000_led_off_pchlan(struct e1000_hw *hw) { u16 data = (u16)hw->mac.ledctl_mode1; u32 i, led; DEBUGFUNC("e1000_led_off_pchlan"); /* If no link, then turn LED off by clearing the invert bit * for each LED that's mode is "link_up" in ledctl_mode1. */ if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { for (i = 0; i < 3; i++) { led = (data >> (i * 5)) & E1000_PHY_LED0_MASK; if ((led & E1000_PHY_LED0_MODE_MASK) != E1000_LEDCTL_MODE_LINK_UP) continue; if (led & E1000_PHY_LED0_IVRT) data &= ~(E1000_PHY_LED0_IVRT << (i * 5)); else data |= (E1000_PHY_LED0_IVRT << (i * 5)); } } return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data); } /** * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset * @hw: pointer to the HW structure * * Read appropriate register for the config done bit for completion status * and configure the PHY through s/w for EEPROM-less parts. * * NOTE: some silicon which is EEPROM-less will fail trying to read the * config done bit, so only an error is logged and continues. If we were * to return with error, EEPROM-less silicon would not be able to be reset * or change link. **/ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) { s32 ret_val = E1000_SUCCESS; u32 bank = 0; u32 status; DEBUGFUNC("e1000_get_cfg_done_ich8lan"); e1000_get_cfg_done_generic(hw); /* Wait for indication from h/w that it has completed basic config */ if (hw->mac.type >= e1000_ich10lan) { e1000_lan_init_done_ich8lan(hw); } else { ret_val = e1000_get_auto_rd_done_generic(hw); if (ret_val) { /* When auto config read does not complete, do not * return with an error. This can happen in situations * where there is no eeprom and prevents getting link. */ DEBUGOUT("Auto Read Done did not complete\n"); ret_val = E1000_SUCCESS; } } /* Clear PHY Reset Asserted bit */ status = E1000_READ_REG(hw, E1000_STATUS); if (status & E1000_STATUS_PHYRA) E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA); else DEBUGOUT("PHY Reset Asserted not set - needs delay\n"); /* If EEPROM is not marked present, init the IGP 3 PHY manually */ if (hw->mac.type <= e1000_ich9lan) { if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) && (hw->phy.type == e1000_phy_igp_3)) { e1000_phy_init_script_igp3(hw); } } else { if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) { /* Maybe we should do a basic PHY config */ DEBUGOUT("EEPROM not present\n"); ret_val = -E1000_ERR_CONFIG; } } return ret_val; } /** * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down * @hw: pointer to the HW structure * * In the case of a PHY power down to save power, or to turn off link during a * driver unload, or wake on lan is not enabled, remove the link. **/ static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw) { /* If the management interface is not enabled, then power down */ if (!(hw->mac.ops.check_mng_mode(hw) || hw->phy.ops.check_reset_block(hw))) e1000_power_down_phy_copper(hw); return; } /** * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters * @hw: pointer to the HW structure * * Clears hardware counters specific to the silicon family and calls * clear_hw_cntrs_generic to clear all general purpose counters. **/ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw) { u16 phy_data; s32 ret_val; DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan"); e1000_clear_hw_cntrs_base_generic(hw); E1000_READ_REG(hw, E1000_ALGNERRC); E1000_READ_REG(hw, E1000_RXERRC); E1000_READ_REG(hw, E1000_TNCRS); E1000_READ_REG(hw, E1000_CEXTERR); E1000_READ_REG(hw, E1000_TSCTC); E1000_READ_REG(hw, E1000_TSCTFC); E1000_READ_REG(hw, E1000_MGTPRC); E1000_READ_REG(hw, E1000_MGTPDC); E1000_READ_REG(hw, E1000_MGTPTC); E1000_READ_REG(hw, E1000_IAC); E1000_READ_REG(hw, E1000_ICRXOC); /* Clear PHY statistics registers */ if ((hw->phy.type == e1000_phy_82578) || (hw->phy.type == e1000_phy_82579) || (hw->phy.type == e1000_phy_i217) || (hw->phy.type == e1000_phy_82577)) { ret_val = hw->phy.ops.acquire(hw); if (ret_val) return; ret_val = hw->phy.ops.set_page(hw, HV_STATS_PAGE << IGP_PAGE_SHIFT); if (ret_val) goto release; hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data); hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data); release: hw->phy.ops.release(hw); } } Index: stable/10/sys/dev/e1000/e1000_ich8lan.h =================================================================== --- stable/10/sys/dev/e1000/e1000_ich8lan.h (revision 296054) +++ stable/10/sys/dev/e1000/e1000_ich8lan.h (revision 296055) @@ -1,322 +1,342 @@ /****************************************************************************** Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _E1000_ICH8LAN_H_ #define _E1000_ICH8LAN_H_ #define ICH_FLASH_GFPREG 0x0000 #define ICH_FLASH_HSFSTS 0x0004 #define ICH_FLASH_HSFCTL 0x0006 #define ICH_FLASH_FADDR 0x0008 #define ICH_FLASH_FDATA0 0x0010 /* Requires up to 10 seconds when MNG might be accessing part. */ #define ICH_FLASH_READ_COMMAND_TIMEOUT 10000000 #define ICH_FLASH_WRITE_COMMAND_TIMEOUT 10000000 #define ICH_FLASH_ERASE_COMMAND_TIMEOUT 10000000 #define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF #define ICH_FLASH_CYCLE_REPEAT_COUNT 10 #define ICH_CYCLE_READ 0 #define ICH_CYCLE_WRITE 2 #define ICH_CYCLE_ERASE 3 #define FLASH_GFPREG_BASE_MASK 0x1FFF #define FLASH_SECTOR_ADDR_SHIFT 12 #define ICH_FLASH_SEG_SIZE_256 256 #define ICH_FLASH_SEG_SIZE_4K 4096 #define ICH_FLASH_SEG_SIZE_8K 8192 #define ICH_FLASH_SEG_SIZE_64K 65536 #define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */ /* FW established a valid mode */ #define E1000_ICH_FWSM_FW_VALID 0x00008000 #define E1000_ICH_FWSM_PCIM2PCI 0x01000000 /* ME PCIm-to-PCI active */ #define E1000_ICH_FWSM_PCIM2PCI_COUNT 2000 #define E1000_ICH_MNG_IAMT_MODE 0x2 #define E1000_FWSM_WLOCK_MAC_MASK 0x0380 #define E1000_FWSM_WLOCK_MAC_SHIFT 7 #define E1000_FWSM_ULP_CFG_DONE 0x00000400 /* Low power cfg done */ /* Shared Receive Address Registers */ #define E1000_SHRAL_PCH_LPT(_i) (0x05408 + ((_i) * 8)) #define E1000_SHRAH_PCH_LPT(_i) (0x0540C + ((_i) * 8)) #define E1000_H2ME 0x05B50 /* Host to ME */ #define E1000_H2ME_ULP 0x00000800 /* ULP Indication Bit */ #define E1000_H2ME_ENFORCE_SETTINGS 0x00001000 /* Enforce Settings */ #define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \ (ID_LED_OFF1_OFF2 << 8) | \ (ID_LED_OFF1_ON2 << 4) | \ (ID_LED_DEF1_DEF2)) #define E1000_ICH_NVM_SIG_WORD 0x13 #define E1000_ICH_NVM_SIG_MASK 0xC000 #define E1000_ICH_NVM_VALID_SIG_MASK 0xC0 #define E1000_ICH_NVM_SIG_VALUE 0x80 #define E1000_ICH8_LAN_INIT_TIMEOUT 1500 /* FEXT register bit definition */ #define E1000_FEXT_PHY_CABLE_DISCONNECTED 0x00000004 #define E1000_FEXTNVM_SW_CONFIG 1 #define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* different on ICH8M */ #define E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK 0x0C000000 #define E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC 0x08000000 #define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7 #define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7 #define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3 #define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100 #define E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION 0x00000200 - +#define E1000_FEXTNVM6_K1_OFF_ENABLE 0x80000000 +/* bit for disabling packet buffer read */ +#define E1000_FEXTNVM7_DISABLE_PB_READ 0x00040000 +#define E1000_FEXTNVM7_SIDE_CLK_UNGATE 0x00000004 #define E1000_FEXTNVM7_DISABLE_SMB_PERST 0x00000020 +#define E1000_FEXTNVM9_IOSFSB_CLKGATE_DIS 0x00000800 +#define E1000_FEXTNVM9_IOSFSB_CLKREQ_DIS 0x00001000 +#define E1000_FEXTNVM11_DISABLE_PB_READ 0x00000200 +#define E1000_FEXTNVM11_DISABLE_MULR_FIX 0x00002000 +/* bit24: RXDCTL thresholds granularity: 0 - cache lines, 1 - descriptors */ +#define E1000_RXDCTL_THRESH_UNIT_DESC 0x01000000 + +#define NVM_SIZE_MULTIPLIER 4096 /*multiplier for NVMS field*/ +#define E1000_FLASH_BASE_ADDR 0xE000 /*offset of NVM access regs*/ +#define E1000_CTRL_EXT_NVMVS 0x3 /*NVM valid sector */ +#define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29) #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL #define E1000_ICH_RAR_ENTRIES 7 #define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */ #define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */ #define PHY_PAGE_SHIFT 5 #define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \ ((reg) & MAX_PHY_REG_ADDRESS)) #define IGP3_KMRN_DIAG PHY_REG(770, 19) /* KMRN Diagnostic */ #define IGP3_VR_CTRL PHY_REG(776, 18) /* Voltage Regulator Control */ #define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 #define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300 #define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200 /* PHY Wakeup Registers and defines */ #define BM_PORT_GEN_CFG PHY_REG(BM_PORT_CTRL_PAGE, 17) #define BM_RCTL PHY_REG(BM_WUC_PAGE, 0) #define BM_WUC PHY_REG(BM_WUC_PAGE, 1) #define BM_WUFC PHY_REG(BM_WUC_PAGE, 2) #define BM_WUS PHY_REG(BM_WUC_PAGE, 3) #define BM_RAR_L(_i) (BM_PHY_REG(BM_WUC_PAGE, 16 + ((_i) << 2))) #define BM_RAR_M(_i) (BM_PHY_REG(BM_WUC_PAGE, 17 + ((_i) << 2))) #define BM_RAR_H(_i) (BM_PHY_REG(BM_WUC_PAGE, 18 + ((_i) << 2))) #define BM_RAR_CTRL(_i) (BM_PHY_REG(BM_WUC_PAGE, 19 + ((_i) << 2))) #define BM_MTA(_i) (BM_PHY_REG(BM_WUC_PAGE, 128 + ((_i) << 1))) #define BM_RCTL_UPE 0x0001 /* Unicast Promiscuous Mode */ #define BM_RCTL_MPE 0x0002 /* Multicast Promiscuous Mode */ #define BM_RCTL_MO_SHIFT 3 /* Multicast Offset Shift */ #define BM_RCTL_MO_MASK (3 << 3) /* Multicast Offset Mask */ #define BM_RCTL_BAM 0x0020 /* Broadcast Accept Mode */ #define BM_RCTL_PMCF 0x0040 /* Pass MAC Control Frames */ #define BM_RCTL_RFCE 0x0080 /* Rx Flow Control Enable */ #define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */ #define HV_MUX_DATA_CTRL PHY_REG(776, 16) #define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400 #define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004 #define HV_STATS_PAGE 778 /* Half-duplex collision counts */ #define HV_SCC_UPPER PHY_REG(HV_STATS_PAGE, 16) /* Single Collision */ #define HV_SCC_LOWER PHY_REG(HV_STATS_PAGE, 17) #define HV_ECOL_UPPER PHY_REG(HV_STATS_PAGE, 18) /* Excessive Coll. */ #define HV_ECOL_LOWER PHY_REG(HV_STATS_PAGE, 19) #define HV_MCC_UPPER PHY_REG(HV_STATS_PAGE, 20) /* Multiple Collision */ #define HV_MCC_LOWER PHY_REG(HV_STATS_PAGE, 21) #define HV_LATECOL_UPPER PHY_REG(HV_STATS_PAGE, 23) /* Late Collision */ #define HV_LATECOL_LOWER PHY_REG(HV_STATS_PAGE, 24) #define HV_COLC_UPPER PHY_REG(HV_STATS_PAGE, 25) /* Collision */ #define HV_COLC_LOWER PHY_REG(HV_STATS_PAGE, 26) #define HV_DC_UPPER PHY_REG(HV_STATS_PAGE, 27) /* Defer Count */ #define HV_DC_LOWER PHY_REG(HV_STATS_PAGE, 28) #define HV_TNCRS_UPPER PHY_REG(HV_STATS_PAGE, 29) /* Tx with no CRS */ #define HV_TNCRS_LOWER PHY_REG(HV_STATS_PAGE, 30) #define E1000_FCRTV_PCH 0x05F40 /* PCH Flow Control Refresh Timer Value */ #define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */ #define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */ +#define K1_ENTRY_LATENCY 0 +#define K1_MIN_TIME 1 /* SMBus Control Phy Register */ #define CV_SMB_CTRL PHY_REG(769, 23) #define CV_SMB_CTRL_FORCE_SMBUS 0x0001 /* I218 Ultra Low Power Configuration 1 Register */ #define I218_ULP_CONFIG1 PHY_REG(779, 16) #define I218_ULP_CONFIG1_START 0x0001 /* Start auto ULP config */ #define I218_ULP_CONFIG1_IND 0x0004 /* Pwr up from ULP indication */ #define I218_ULP_CONFIG1_STICKY_ULP 0x0010 /* Set sticky ULP mode */ #define I218_ULP_CONFIG1_INBAND_EXIT 0x0020 /* Inband on ULP exit */ #define I218_ULP_CONFIG1_WOL_HOST 0x0040 /* WoL Host on ULP exit */ #define I218_ULP_CONFIG1_RESET_TO_SMBUS 0x0100 /* Reset to SMBus mode */ +/* enable ULP even if when phy powered down via lanphypc */ +#define I218_ULP_CONFIG1_EN_ULP_LANPHYPC 0x0400 +/* disable clear of sticky ULP on PERST */ +#define I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST 0x0800 #define I218_ULP_CONFIG1_DISABLE_SMB_PERST 0x1000 /* Disable on PERST# */ /* SMBus Address Phy Register */ #define HV_SMB_ADDR PHY_REG(768, 26) #define HV_SMB_ADDR_MASK 0x007F #define HV_SMB_ADDR_PEC_EN 0x0200 #define HV_SMB_ADDR_VALID 0x0080 #define HV_SMB_ADDR_FREQ_MASK 0x1100 #define HV_SMB_ADDR_FREQ_LOW_SHIFT 8 #define HV_SMB_ADDR_FREQ_HIGH_SHIFT 12 /* Strapping Option Register - RO */ #define E1000_STRAP 0x0000C #define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000 #define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17 #define E1000_STRAP_SMT_FREQ_MASK 0x00003000 #define E1000_STRAP_SMT_FREQ_SHIFT 12 /* OEM Bits Phy Register */ #define HV_OEM_BITS PHY_REG(768, 25) #define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */ #define HV_OEM_BITS_GBE_DIS 0x0040 /* Gigabit Disable */ #define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */ /* KMRN Mode Control */ #define HV_KMRN_MODE_CTRL PHY_REG(769, 16) #define HV_KMRN_MDIO_SLOW 0x0400 /* KMRN FIFO Control and Status */ #define HV_KMRN_FIFO_CTRLSTA PHY_REG(770, 16) #define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK 0x7000 #define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT 12 /* PHY Power Management Control */ #define HV_PM_CTRL PHY_REG(770, 17) #define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100 #define HV_PM_CTRL_K1_ENABLE 0x4000 +#define I217_PLL_CLOCK_GATE_REG PHY_REG(772, 28) +#define I217_PLL_CLOCK_GATE_MASK 0x07FF + #define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */ /* Inband Control */ #define I217_INBAND_CTRL PHY_REG(770, 18) #define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK 0x3F00 #define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT 8 /* Low Power Idle GPIO Control */ #define I217_LPI_GPIO_CTRL PHY_REG(772, 18) #define I217_LPI_GPIO_CTRL_AUTO_EN_LPI 0x0800 /* PHY Low Power Idle Control */ #define I82579_LPI_CTRL PHY_REG(772, 20) #define I82579_LPI_CTRL_100_ENABLE 0x2000 #define I82579_LPI_CTRL_1000_ENABLE 0x4000 #define I82579_LPI_CTRL_ENABLE_MASK 0x6000 /* 82579 DFT Control */ #define I82579_DFT_CTRL PHY_REG(769, 20) #define I82579_DFT_CTRL_GATE_PHY_RESET 0x0040 /* Gate PHY Reset on MAC Reset */ /* Extended Management Interface (EMI) Registers */ #define I82579_EMI_ADDR 0x10 #define I82579_EMI_DATA 0x11 #define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */ #define I82579_MSE_THRESHOLD 0x084F /* 82579 Mean Square Error Threshold */ #define I82577_MSE_THRESHOLD 0x0887 /* 82577 Mean Square Error Threshold */ #define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */ #define I82579_RX_CONFIG 0x3412 /* Receive configuration */ #define I82579_LPI_PLL_SHUT 0x4412 /* LPI PLL Shut Enable */ #define I82579_EEE_PCS_STATUS 0x182E /* IEEE MMD Register 3.1 >> 8 */ #define I82579_EEE_CAPABILITY 0x0410 /* IEEE MMD Register 3.20 */ #define I82579_EEE_ADVERTISEMENT 0x040E /* IEEE MMD Register 7.60 */ #define I82579_EEE_LP_ABILITY 0x040F /* IEEE MMD Register 7.61 */ #define I82579_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE */ #define I82579_EEE_1000_SUPPORTED (1 << 2) /* 1000BaseTx EEE */ #define I82579_LPI_100_PLL_SHUT (1 << 2) /* 100M LPI PLL Shut Enabled */ #define I217_EEE_PCS_STATUS 0x9401 /* IEEE MMD Register 3.1 */ #define I217_EEE_CAPABILITY 0x8000 /* IEEE MMD Register 3.20 */ #define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */ #define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */ #define I217_RX_CONFIG 0xB20C /* Receive configuration */ #define E1000_EEE_RX_LPI_RCVD 0x0400 /* Tx LP idle received */ #define E1000_EEE_TX_LPI_RCVD 0x0800 /* Rx LP idle received */ /* Intel Rapid Start Technology Support */ #define I217_PROXY_CTRL BM_PHY_REG(BM_WUC_PAGE, 70) #define I217_PROXY_CTRL_AUTO_DISABLE 0x0080 #define I217_SxCTRL PHY_REG(BM_PORT_CTRL_PAGE, 28) #define I217_SxCTRL_ENABLE_LPI_RESET 0x1000 #define I217_CGFREG PHY_REG(772, 29) #define I217_CGFREG_ENABLE_MTA_RESET 0x0002 #define I217_MEMPWR PHY_REG(772, 26) #define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010 /* Receive Address Initial CRC Calculation */ #define E1000_PCH_RAICC(_n) (0x05F50 + ((_n) * 4)) /* Latency Tolerance Reporting */ #define E1000_LTRV 0x000F8 #define E1000_LTRV_VALUE_MASK 0x000003FF #define E1000_LTRV_SCALE_MAX 5 #define E1000_LTRV_SCALE_FACTOR 5 #define E1000_LTRV_SCALE_SHIFT 10 #define E1000_LTRV_SCALE_MASK 0x00001C00 #define E1000_LTRV_REQ_SHIFT 15 #define E1000_LTRV_NOSNOOP_SHIFT 16 #define E1000_LTRV_SEND (1 << 30) /* Proprietary Latency Tolerance Reporting PCI Capability */ #define E1000_PCI_LTR_CAP_LPT 0xA8 /* OBFF Control & Threshold Defines */ #define E1000_SVCR_OFF_EN 0x00000001 #define E1000_SVCR_OFF_MASKINT 0x00001000 #define E1000_SVCR_OFF_TIMER_MASK 0xFFFF0000 #define E1000_SVCR_OFF_TIMER_SHIFT 16 #define E1000_SVT_OFF_HWM_MASK 0x0000001F -#if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT) -#define E1000_PCI_REVISION_ID_REG 0x08 -#endif /* defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT) */ void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, bool state); void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw); void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw); void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw); -void e1000_resume_workarounds_pchlan(struct e1000_hw *hw); +u32 e1000_resume_workarounds_pchlan(struct e1000_hw *hw); s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable); void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw); s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable); s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data); s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data); s32 e1000_set_eee_pchlan(struct e1000_hw *hw); s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx); s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force); #endif /* _E1000_ICH8LAN_H_ */ Index: stable/10/sys/dev/e1000/e1000_mac.h =================================================================== --- stable/10/sys/dev/e1000/e1000_mac.h (revision 296054) +++ stable/10/sys/dev/e1000/e1000_mac.h (revision 296055) @@ -1,97 +1,95 @@ /****************************************************************************** Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _E1000_MAC_H_ #define _E1000_MAC_H_ void e1000_init_mac_ops_generic(struct e1000_hw *hw); -#ifndef E1000_REMOVED #define E1000_REMOVED(a) (0) -#endif /* E1000_REMOVED */ void e1000_null_mac_generic(struct e1000_hw *hw); s32 e1000_null_ops_generic(struct e1000_hw *hw); s32 e1000_null_link_info(struct e1000_hw *hw, u16 *s, u16 *d); bool e1000_null_mng_mode(struct e1000_hw *hw); void e1000_null_update_mc(struct e1000_hw *hw, u8 *h, u32 a); void e1000_null_write_vfta(struct e1000_hw *hw, u32 a, u32 b); int e1000_null_rar_set(struct e1000_hw *hw, u8 *h, u32 a); s32 e1000_null_set_obff_timer(struct e1000_hw *hw, u32 a); s32 e1000_blink_led_generic(struct e1000_hw *hw); s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw); s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw); s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw); s32 e1000_cleanup_led_generic(struct e1000_hw *hw); s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw); s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw); s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw); s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw); s32 e1000_force_mac_fc_generic(struct e1000_hw *hw); s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw); s32 e1000_get_bus_info_pci_generic(struct e1000_hw *hw); s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw); void e1000_set_lan_id_single_port(struct e1000_hw *hw); void e1000_set_lan_id_multi_port_pci(struct e1000_hw *hw); s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw); s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed, u16 *duplex); s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw, u16 *speed, u16 *duplex); s32 e1000_id_led_init_generic(struct e1000_hw *hw); s32 e1000_led_on_generic(struct e1000_hw *hw); s32 e1000_led_off_generic(struct e1000_hw *hw); void e1000_update_mc_addr_list_generic(struct e1000_hw *hw, u8 *mc_addr_list, u32 mc_addr_count); s32 e1000_set_default_fc_generic(struct e1000_hw *hw); s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw); s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw); s32 e1000_setup_led_generic(struct e1000_hw *hw); s32 e1000_setup_link_generic(struct e1000_hw *hw); s32 e1000_validate_mdi_setting_crossover_generic(struct e1000_hw *hw); s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg, u32 offset, u8 data); u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr); void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw); void e1000_clear_vfta_generic(struct e1000_hw *hw); void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count); void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw); void e1000_put_hw_semaphore_generic(struct e1000_hw *hw); s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw); void e1000_reset_adaptive_generic(struct e1000_hw *hw); void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop); void e1000_update_adaptive_generic(struct e1000_hw *hw); void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value); #endif Index: stable/10/sys/dev/e1000/e1000_mbx.c =================================================================== --- stable/10/sys/dev/e1000/e1000_mbx.c (revision 296054) +++ stable/10/sys/dev/e1000/e1000_mbx.c (revision 296055) @@ -1,771 +1,785 @@ /****************************************************************************** Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "e1000_mbx.h" /** * e1000_null_mbx_check_for_flag - No-op function, return 0 * @hw: pointer to the HW structure **/ static s32 e1000_null_mbx_check_for_flag(struct e1000_hw E1000_UNUSEDARG *hw, u16 E1000_UNUSEDARG mbx_id) { DEBUGFUNC("e1000_null_mbx_check_flag"); return E1000_SUCCESS; } /** * e1000_null_mbx_transact - No-op function, return 0 * @hw: pointer to the HW structure **/ static s32 e1000_null_mbx_transact(struct e1000_hw E1000_UNUSEDARG *hw, u32 E1000_UNUSEDARG *msg, u16 E1000_UNUSEDARG size, u16 E1000_UNUSEDARG mbx_id) { DEBUGFUNC("e1000_null_mbx_rw_msg"); return E1000_SUCCESS; } /** * e1000_read_mbx - Reads a message from the mailbox * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * @mbx_id: id of mailbox to read * * returns SUCCESS if it successfuly read message from buffer **/ s32 e1000_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) { struct e1000_mbx_info *mbx = &hw->mbx; s32 ret_val = -E1000_ERR_MBX; DEBUGFUNC("e1000_read_mbx"); /* limit read to size of mailbox */ if (size > mbx->size) size = mbx->size; if (mbx->ops.read) ret_val = mbx->ops.read(hw, msg, size, mbx_id); return ret_val; } /** * e1000_write_mbx - Write a message to the mailbox * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * @mbx_id: id of mailbox to write * * returns SUCCESS if it successfully copied message into the buffer **/ s32 e1000_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) { struct e1000_mbx_info *mbx = &hw->mbx; s32 ret_val = E1000_SUCCESS; DEBUGFUNC("e1000_write_mbx"); if (size > mbx->size) ret_val = -E1000_ERR_MBX; else if (mbx->ops.write) ret_val = mbx->ops.write(hw, msg, size, mbx_id); return ret_val; } /** * e1000_check_for_msg - checks to see if someone sent us mail * @hw: pointer to the HW structure * @mbx_id: id of mailbox to check * * returns SUCCESS if the Status bit was found or else ERR_MBX **/ s32 e1000_check_for_msg(struct e1000_hw *hw, u16 mbx_id) { struct e1000_mbx_info *mbx = &hw->mbx; s32 ret_val = -E1000_ERR_MBX; DEBUGFUNC("e1000_check_for_msg"); if (mbx->ops.check_for_msg) ret_val = mbx->ops.check_for_msg(hw, mbx_id); return ret_val; } /** * e1000_check_for_ack - checks to see if someone sent us ACK * @hw: pointer to the HW structure * @mbx_id: id of mailbox to check * * returns SUCCESS if the Status bit was found or else ERR_MBX **/ s32 e1000_check_for_ack(struct e1000_hw *hw, u16 mbx_id) { struct e1000_mbx_info *mbx = &hw->mbx; s32 ret_val = -E1000_ERR_MBX; DEBUGFUNC("e1000_check_for_ack"); if (mbx->ops.check_for_ack) ret_val = mbx->ops.check_for_ack(hw, mbx_id); return ret_val; } /** * e1000_check_for_rst - checks to see if other side has reset * @hw: pointer to the HW structure * @mbx_id: id of mailbox to check * * returns SUCCESS if the Status bit was found or else ERR_MBX **/ s32 e1000_check_for_rst(struct e1000_hw *hw, u16 mbx_id) { struct e1000_mbx_info *mbx = &hw->mbx; s32 ret_val = -E1000_ERR_MBX; DEBUGFUNC("e1000_check_for_rst"); if (mbx->ops.check_for_rst) ret_val = mbx->ops.check_for_rst(hw, mbx_id); return ret_val; } /** * e1000_poll_for_msg - Wait for message notification * @hw: pointer to the HW structure * @mbx_id: id of mailbox to write * * returns SUCCESS if it successfully received a message notification **/ static s32 e1000_poll_for_msg(struct e1000_hw *hw, u16 mbx_id) { struct e1000_mbx_info *mbx = &hw->mbx; int countdown = mbx->timeout; DEBUGFUNC("e1000_poll_for_msg"); if (!countdown || !mbx->ops.check_for_msg) goto out; while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) { countdown--; if (!countdown) break; usec_delay(mbx->usec_delay); } /* if we failed, all future posted messages fail until reset */ if (!countdown) mbx->timeout = 0; out: return countdown ? E1000_SUCCESS : -E1000_ERR_MBX; } /** * e1000_poll_for_ack - Wait for message acknowledgement * @hw: pointer to the HW structure * @mbx_id: id of mailbox to write * * returns SUCCESS if it successfully received a message acknowledgement **/ static s32 e1000_poll_for_ack(struct e1000_hw *hw, u16 mbx_id) { struct e1000_mbx_info *mbx = &hw->mbx; int countdown = mbx->timeout; DEBUGFUNC("e1000_poll_for_ack"); if (!countdown || !mbx->ops.check_for_ack) goto out; while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) { countdown--; if (!countdown) break; usec_delay(mbx->usec_delay); } /* if we failed, all future posted messages fail until reset */ if (!countdown) mbx->timeout = 0; out: return countdown ? E1000_SUCCESS : -E1000_ERR_MBX; } /** * e1000_read_posted_mbx - Wait for message notification and receive message * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * @mbx_id: id of mailbox to write * * returns SUCCESS if it successfully received a message notification and * copied it into the receive buffer. **/ s32 e1000_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) { struct e1000_mbx_info *mbx = &hw->mbx; s32 ret_val = -E1000_ERR_MBX; DEBUGFUNC("e1000_read_posted_mbx"); if (!mbx->ops.read) goto out; ret_val = e1000_poll_for_msg(hw, mbx_id); /* if ack received read message, otherwise we timed out */ if (!ret_val) ret_val = mbx->ops.read(hw, msg, size, mbx_id); out: return ret_val; } /** * e1000_write_posted_mbx - Write a message to the mailbox, wait for ack * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * @mbx_id: id of mailbox to write * * returns SUCCESS if it successfully copied message into the buffer and * received an ack to that message within delay * timeout period **/ s32 e1000_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) { struct e1000_mbx_info *mbx = &hw->mbx; s32 ret_val = -E1000_ERR_MBX; DEBUGFUNC("e1000_write_posted_mbx"); /* exit if either we can't write or there isn't a defined timeout */ if (!mbx->ops.write || !mbx->timeout) goto out; /* send msg */ ret_val = mbx->ops.write(hw, msg, size, mbx_id); /* if msg sent wait until we receive an ack */ if (!ret_val) ret_val = e1000_poll_for_ack(hw, mbx_id); out: return ret_val; } /** * e1000_init_mbx_ops_generic - Initialize mbx function pointers * @hw: pointer to the HW structure * * Sets the function pointers to no-op functions **/ void e1000_init_mbx_ops_generic(struct e1000_hw *hw) { struct e1000_mbx_info *mbx = &hw->mbx; mbx->ops.init_params = e1000_null_ops_generic; mbx->ops.read = e1000_null_mbx_transact; mbx->ops.write = e1000_null_mbx_transact; mbx->ops.check_for_msg = e1000_null_mbx_check_for_flag; mbx->ops.check_for_ack = e1000_null_mbx_check_for_flag; mbx->ops.check_for_rst = e1000_null_mbx_check_for_flag; mbx->ops.read_posted = e1000_read_posted_mbx; mbx->ops.write_posted = e1000_write_posted_mbx; } /** * e1000_read_v2p_mailbox - read v2p mailbox * @hw: pointer to the HW structure * * This function is used to read the v2p mailbox without losing the read to * clear status bits. **/ static u32 e1000_read_v2p_mailbox(struct e1000_hw *hw) { u32 v2p_mailbox = E1000_READ_REG(hw, E1000_V2PMAILBOX(0)); v2p_mailbox |= hw->dev_spec.vf.v2p_mailbox; hw->dev_spec.vf.v2p_mailbox |= v2p_mailbox & E1000_V2PMAILBOX_R2C_BITS; return v2p_mailbox; } /** * e1000_check_for_bit_vf - Determine if a status bit was set * @hw: pointer to the HW structure * @mask: bitmask for bits to be tested and cleared * * This function is used to check for the read to clear bits within * the V2P mailbox. **/ static s32 e1000_check_for_bit_vf(struct e1000_hw *hw, u32 mask) { u32 v2p_mailbox = e1000_read_v2p_mailbox(hw); s32 ret_val = -E1000_ERR_MBX; if (v2p_mailbox & mask) ret_val = E1000_SUCCESS; hw->dev_spec.vf.v2p_mailbox &= ~mask; return ret_val; } /** * e1000_check_for_msg_vf - checks to see if the PF has sent mail * @hw: pointer to the HW structure * @mbx_id: id of mailbox to check * * returns SUCCESS if the PF has set the Status bit or else ERR_MBX **/ static s32 e1000_check_for_msg_vf(struct e1000_hw *hw, u16 E1000_UNUSEDARG mbx_id) { s32 ret_val = -E1000_ERR_MBX; DEBUGFUNC("e1000_check_for_msg_vf"); if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFSTS)) { ret_val = E1000_SUCCESS; hw->mbx.stats.reqs++; } return ret_val; } /** * e1000_check_for_ack_vf - checks to see if the PF has ACK'd * @hw: pointer to the HW structure * @mbx_id: id of mailbox to check * * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX **/ static s32 e1000_check_for_ack_vf(struct e1000_hw *hw, u16 E1000_UNUSEDARG mbx_id) { s32 ret_val = -E1000_ERR_MBX; DEBUGFUNC("e1000_check_for_ack_vf"); if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFACK)) { ret_val = E1000_SUCCESS; hw->mbx.stats.acks++; } return ret_val; } /** * e1000_check_for_rst_vf - checks to see if the PF has reset * @hw: pointer to the HW structure * @mbx_id: id of mailbox to check * * returns TRUE if the PF has set the reset done bit or else FALSE **/ static s32 e1000_check_for_rst_vf(struct e1000_hw *hw, u16 E1000_UNUSEDARG mbx_id) { s32 ret_val = -E1000_ERR_MBX; DEBUGFUNC("e1000_check_for_rst_vf"); if (!e1000_check_for_bit_vf(hw, (E1000_V2PMAILBOX_RSTD | E1000_V2PMAILBOX_RSTI))) { ret_val = E1000_SUCCESS; hw->mbx.stats.rsts++; } return ret_val; } /** * e1000_obtain_mbx_lock_vf - obtain mailbox lock * @hw: pointer to the HW structure * * return SUCCESS if we obtained the mailbox lock **/ static s32 e1000_obtain_mbx_lock_vf(struct e1000_hw *hw) { s32 ret_val = -E1000_ERR_MBX; + int count = 10; DEBUGFUNC("e1000_obtain_mbx_lock_vf"); - /* Take ownership of the buffer */ - E1000_WRITE_REG(hw, E1000_V2PMAILBOX(0), E1000_V2PMAILBOX_VFU); + do { + /* Take ownership of the buffer */ + E1000_WRITE_REG(hw, E1000_V2PMAILBOX(0), E1000_V2PMAILBOX_VFU); - /* reserve mailbox for vf use */ - if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU) - ret_val = E1000_SUCCESS; + /* reserve mailbox for vf use */ + if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU) { + ret_val = E1000_SUCCESS; + break; + } + usec_delay(1000); + } while (count-- > 0); return ret_val; } /** * e1000_write_mbx_vf - Write a message to the mailbox * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * @mbx_id: id of mailbox to write * * returns SUCCESS if it successfully copied message into the buffer **/ static s32 e1000_write_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size, u16 E1000_UNUSEDARG mbx_id) { s32 ret_val; u16 i; DEBUGFUNC("e1000_write_mbx_vf"); /* lock the mailbox to prevent pf/vf race condition */ ret_val = e1000_obtain_mbx_lock_vf(hw); if (ret_val) goto out_no_write; /* flush msg and acks as we are overwriting the message buffer */ e1000_check_for_msg_vf(hw, 0); e1000_check_for_ack_vf(hw, 0); /* copy the caller specified message to the mailbox memory buffer */ for (i = 0; i < size; i++) E1000_WRITE_REG_ARRAY(hw, E1000_VMBMEM(0), i, msg[i]); /* update stats */ hw->mbx.stats.msgs_tx++; /* Drop VFU and interrupt the PF to tell it a message has been sent */ E1000_WRITE_REG(hw, E1000_V2PMAILBOX(0), E1000_V2PMAILBOX_REQ); out_no_write: return ret_val; } /** * e1000_read_mbx_vf - Reads a message from the inbox intended for vf * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * @mbx_id: id of mailbox to read * * returns SUCCESS if it successfuly read message from buffer **/ static s32 e1000_read_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size, u16 E1000_UNUSEDARG mbx_id) { s32 ret_val = E1000_SUCCESS; u16 i; DEBUGFUNC("e1000_read_mbx_vf"); /* lock the mailbox to prevent pf/vf race condition */ ret_val = e1000_obtain_mbx_lock_vf(hw); if (ret_val) goto out_no_read; /* copy the message from the mailbox memory buffer */ for (i = 0; i < size; i++) msg[i] = E1000_READ_REG_ARRAY(hw, E1000_VMBMEM(0), i); /* Acknowledge receipt and release mailbox, then we're done */ E1000_WRITE_REG(hw, E1000_V2PMAILBOX(0), E1000_V2PMAILBOX_ACK); /* update stats */ hw->mbx.stats.msgs_rx++; out_no_read: return ret_val; } /** * e1000_init_mbx_params_vf - set initial values for vf mailbox * @hw: pointer to the HW structure * * Initializes the hw->mbx struct to correct values for vf mailbox */ s32 e1000_init_mbx_params_vf(struct e1000_hw *hw) { struct e1000_mbx_info *mbx = &hw->mbx; /* start mailbox as timed out and let the reset_hw call set the timeout * value to begin communications */ mbx->timeout = 0; mbx->usec_delay = E1000_VF_MBX_INIT_DELAY; mbx->size = E1000_VFMAILBOX_SIZE; mbx->ops.read = e1000_read_mbx_vf; mbx->ops.write = e1000_write_mbx_vf; mbx->ops.read_posted = e1000_read_posted_mbx; mbx->ops.write_posted = e1000_write_posted_mbx; mbx->ops.check_for_msg = e1000_check_for_msg_vf; mbx->ops.check_for_ack = e1000_check_for_ack_vf; mbx->ops.check_for_rst = e1000_check_for_rst_vf; mbx->stats.msgs_tx = 0; mbx->stats.msgs_rx = 0; mbx->stats.reqs = 0; mbx->stats.acks = 0; mbx->stats.rsts = 0; return E1000_SUCCESS; } static s32 e1000_check_for_bit_pf(struct e1000_hw *hw, u32 mask) { u32 mbvficr = E1000_READ_REG(hw, E1000_MBVFICR); s32 ret_val = -E1000_ERR_MBX; if (mbvficr & mask) { ret_val = E1000_SUCCESS; E1000_WRITE_REG(hw, E1000_MBVFICR, mask); } return ret_val; } /** * e1000_check_for_msg_pf - checks to see if the VF has sent mail * @hw: pointer to the HW structure * @vf_number: the VF index * * returns SUCCESS if the VF has set the Status bit or else ERR_MBX **/ static s32 e1000_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number) { s32 ret_val = -E1000_ERR_MBX; DEBUGFUNC("e1000_check_for_msg_pf"); if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) { ret_val = E1000_SUCCESS; hw->mbx.stats.reqs++; } return ret_val; } /** * e1000_check_for_ack_pf - checks to see if the VF has ACKed * @hw: pointer to the HW structure * @vf_number: the VF index * * returns SUCCESS if the VF has set the Status bit or else ERR_MBX **/ static s32 e1000_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number) { s32 ret_val = -E1000_ERR_MBX; DEBUGFUNC("e1000_check_for_ack_pf"); if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) { ret_val = E1000_SUCCESS; hw->mbx.stats.acks++; } return ret_val; } /** * e1000_check_for_rst_pf - checks to see if the VF has reset * @hw: pointer to the HW structure * @vf_number: the VF index * * returns SUCCESS if the VF has set the Status bit or else ERR_MBX **/ static s32 e1000_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number) { u32 vflre = E1000_READ_REG(hw, E1000_VFLRE); s32 ret_val = -E1000_ERR_MBX; DEBUGFUNC("e1000_check_for_rst_pf"); if (vflre & (1 << vf_number)) { ret_val = E1000_SUCCESS; E1000_WRITE_REG(hw, E1000_VFLRE, (1 << vf_number)); hw->mbx.stats.rsts++; } return ret_val; } /** * e1000_obtain_mbx_lock_pf - obtain mailbox lock * @hw: pointer to the HW structure * @vf_number: the VF index * * return SUCCESS if we obtained the mailbox lock **/ static s32 e1000_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) { s32 ret_val = -E1000_ERR_MBX; u32 p2v_mailbox; + int count = 10; DEBUGFUNC("e1000_obtain_mbx_lock_pf"); - /* Take ownership of the buffer */ - E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); + do { + /* Take ownership of the buffer */ + E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), + E1000_P2VMAILBOX_PFU); - /* reserve mailbox for vf use */ - p2v_mailbox = E1000_READ_REG(hw, E1000_P2VMAILBOX(vf_number)); - if (p2v_mailbox & E1000_P2VMAILBOX_PFU) - ret_val = E1000_SUCCESS; + /* reserve mailbox for pf use */ + p2v_mailbox = E1000_READ_REG(hw, E1000_P2VMAILBOX(vf_number)); + if (p2v_mailbox & E1000_P2VMAILBOX_PFU) { + ret_val = E1000_SUCCESS; + break; + } + usec_delay(1000); + } while (count-- > 0); return ret_val; + } /** * e1000_write_mbx_pf - Places a message in the mailbox * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * @vf_number: the VF index * * returns SUCCESS if it successfully copied message into the buffer **/ static s32 e1000_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, u16 vf_number) { s32 ret_val; u16 i; DEBUGFUNC("e1000_write_mbx_pf"); /* lock the mailbox to prevent pf/vf race condition */ ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number); if (ret_val) goto out_no_write; /* flush msg and acks as we are overwriting the message buffer */ e1000_check_for_msg_pf(hw, vf_number); e1000_check_for_ack_pf(hw, vf_number); /* copy the caller specified message to the mailbox memory buffer */ for (i = 0; i < size; i++) E1000_WRITE_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i, msg[i]); /* Interrupt VF to tell it a message has been sent and release buffer*/ E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS); /* update stats */ hw->mbx.stats.msgs_tx++; out_no_write: return ret_val; } /** * e1000_read_mbx_pf - Read a message from the mailbox * @hw: pointer to the HW structure * @msg: The message buffer * @size: Length of buffer * @vf_number: the VF index * * This function copies a message from the mailbox buffer to the caller's * memory buffer. The presumption is that the caller knows that there was * a message due to a VF request so no polling for message is needed. **/ static s32 e1000_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, u16 vf_number) { s32 ret_val; u16 i; DEBUGFUNC("e1000_read_mbx_pf"); /* lock the mailbox to prevent pf/vf race condition */ ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number); if (ret_val) goto out_no_read; /* copy the message to the mailbox memory buffer */ for (i = 0; i < size; i++) msg[i] = E1000_READ_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i); /* Acknowledge the message and release buffer */ E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK); /* update stats */ hw->mbx.stats.msgs_rx++; out_no_read: return ret_val; } /** * e1000_init_mbx_params_pf - set initial values for pf mailbox * @hw: pointer to the HW structure * * Initializes the hw->mbx struct to correct values for pf mailbox */ s32 e1000_init_mbx_params_pf(struct e1000_hw *hw) { struct e1000_mbx_info *mbx = &hw->mbx; switch (hw->mac.type) { case e1000_82576: case e1000_i350: case e1000_i354: mbx->timeout = 0; mbx->usec_delay = 0; mbx->size = E1000_VFMAILBOX_SIZE; mbx->ops.read = e1000_read_mbx_pf; mbx->ops.write = e1000_write_mbx_pf; mbx->ops.read_posted = e1000_read_posted_mbx; mbx->ops.write_posted = e1000_write_posted_mbx; mbx->ops.check_for_msg = e1000_check_for_msg_pf; mbx->ops.check_for_ack = e1000_check_for_ack_pf; mbx->ops.check_for_rst = e1000_check_for_rst_pf; mbx->stats.msgs_tx = 0; mbx->stats.msgs_rx = 0; mbx->stats.reqs = 0; mbx->stats.acks = 0; mbx->stats.rsts = 0; default: return E1000_SUCCESS; } } Index: stable/10/sys/dev/e1000/e1000_nvm.h =================================================================== --- stable/10/sys/dev/e1000/e1000_nvm.h (revision 296054) +++ stable/10/sys/dev/e1000/e1000_nvm.h (revision 296055) @@ -1,82 +1,80 @@ /****************************************************************************** Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _E1000_NVM_H_ #define _E1000_NVM_H_ -#if !defined(NO_READ_PBA_RAW) || !defined(NO_WRITE_PBA_RAW) struct e1000_pba { u16 word[2]; u16 *pba_block; }; -#endif void e1000_init_nvm_ops_generic(struct e1000_hw *hw); s32 e1000_null_read_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c); void e1000_null_nvm_generic(struct e1000_hw *hw); s32 e1000_null_led_default(struct e1000_hw *hw, u16 *data); s32 e1000_null_write_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c); s32 e1000_acquire_nvm_generic(struct e1000_hw *hw); s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg); s32 e1000_read_mac_addr_generic(struct e1000_hw *hw); s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size); s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size); s32 e1000_read_pba_raw(struct e1000_hw *hw, u16 *eeprom_buf, u32 eeprom_buf_size, u16 max_pba_block_size, struct e1000_pba *pba); s32 e1000_write_pba_raw(struct e1000_hw *hw, u16 *eeprom_buf, u32 eeprom_buf_size, struct e1000_pba *pba); s32 e1000_get_pba_block_size(struct e1000_hw *hw, u16 *eeprom_buf, u32 eeprom_buf_size, u16 *pba_block_size); s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); s32 e1000_read_nvm_microwire(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data); s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw); s32 e1000_write_nvm_microwire(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw); void e1000_stop_nvm(struct e1000_hw *hw); void e1000_release_nvm_generic(struct e1000_hw *hw); #define E1000_STM_OPCODE 0xDB00 #endif Index: stable/10/sys/dev/e1000/e1000_osdep.h =================================================================== --- stable/10/sys/dev/e1000/e1000_osdep.h (revision 296054) +++ stable/10/sys/dev/e1000/e1000_osdep.h (revision 296055) @@ -1,223 +1,220 @@ /****************************************************************************** Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _FREEBSD_OS_H_ #define _FREEBSD_OS_H_ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define ASSERT(x) if(!(x)) panic("EM: x") #define usec_delay(x) DELAY(x) -#define usec_delay_irq(x) DELAY(x) +#define usec_delay_irq(x) usec_delay(x) #define msec_delay(x) DELAY(1000*(x)) #define msec_delay_irq(x) DELAY(1000*(x)) -#define DEBUGFUNC(F) DEBUGOUT(F); -#define DEBUGOUT(S) do {} while (0) -#define DEBUGOUT1(S,A) do {} while (0) -#define DEBUGOUT2(S,A,B) do {} while (0) -#define DEBUGOUT3(S,A,B,C) do {} while (0) -#define DEBUGOUT7(S,A,B,C,D,E,F,G) do {} while (0) +/* Enable/disable debugging statements in shared code */ +#define DBG 0 +#define DEBUGOUT(...) \ + do { if (DBG) printf(__VA_ARGS__); } while (0) +#define DEBUGOUT1(...) DEBUGOUT(__VA_ARGS__) +#define DEBUGOUT2(...) DEBUGOUT(__VA_ARGS__) +#define DEBUGOUT3(...) DEBUGOUT(__VA_ARGS__) +#define DEBUGOUT7(...) DEBUGOUT(__VA_ARGS__) +#define DEBUGFUNC(F) DEBUGOUT(F "\n") + #define STATIC static #define FALSE 0 #define TRUE 1 -#ifndef __bool_true_false_are_defined -#define false FALSE -#define true TRUE -#endif #define CMD_MEM_WRT_INVALIDATE 0x0010 /* BIT_4 */ #define PCI_COMMAND_REGISTER PCIR_COMMAND /* Mutex used in the shared code */ #define E1000_MUTEX struct mtx #define E1000_MUTEX_INIT(mutex) mtx_init((mutex), #mutex, \ MTX_NETWORK_LOCK, \ MTX_DEF | MTX_DUPOK) #define E1000_MUTEX_DESTROY(mutex) mtx_destroy(mutex) #define E1000_MUTEX_LOCK(mutex) mtx_lock(mutex) #define E1000_MUTEX_TRYLOCK(mutex) mtx_trylock(mutex) #define E1000_MUTEX_UNLOCK(mutex) mtx_unlock(mutex) typedef uint64_t u64; typedef uint32_t u32; typedef uint16_t u16; typedef uint8_t u8; typedef int64_t s64; typedef int32_t s32; typedef int16_t s16; typedef int8_t s8; -#ifndef __bool_true_false_are_defined -typedef boolean_t bool; -#endif #define __le16 u16 #define __le32 u32 #define __le64 u64 #if __FreeBSD_version < 800000 #if defined(__i386__) || defined(__amd64__) #define mb() __asm volatile("mfence" ::: "memory") #define wmb() __asm volatile("sfence" ::: "memory") #define rmb() __asm volatile("lfence" ::: "memory") #else #define mb() #define rmb() #define wmb() #endif #endif /*__FreeBSD_version < 800000 */ #if defined(__i386__) || defined(__amd64__) static __inline void prefetch(void *x) { __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); } #else #define prefetch(x) #endif struct e1000_osdep { bus_space_tag_t mem_bus_space_tag; bus_space_handle_t mem_bus_space_handle; bus_space_tag_t io_bus_space_tag; bus_space_handle_t io_bus_space_handle; bus_space_tag_t flash_bus_space_tag; bus_space_handle_t flash_bus_space_handle; struct device *dev; }; #define E1000_REGISTER(hw, reg) (((hw)->mac.type >= e1000_82543) \ ? reg : e1000_translate_register_82542(reg)) #define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, E1000_STATUS) /* Read from an absolute offset in the adapter's memory space */ #define E1000_READ_OFFSET(hw, offset) \ bus_space_read_4(((struct e1000_osdep *)(hw)->back)->mem_bus_space_tag, \ ((struct e1000_osdep *)(hw)->back)->mem_bus_space_handle, offset) /* Write to an absolute offset in the adapter's memory space */ #define E1000_WRITE_OFFSET(hw, offset, value) \ bus_space_write_4(((struct e1000_osdep *)(hw)->back)->mem_bus_space_tag, \ ((struct e1000_osdep *)(hw)->back)->mem_bus_space_handle, offset, value) /* Register READ/WRITE macros */ #define E1000_READ_REG(hw, reg) \ bus_space_read_4(((struct e1000_osdep *)(hw)->back)->mem_bus_space_tag, \ ((struct e1000_osdep *)(hw)->back)->mem_bus_space_handle, \ E1000_REGISTER(hw, reg)) #define E1000_WRITE_REG(hw, reg, value) \ bus_space_write_4(((struct e1000_osdep *)(hw)->back)->mem_bus_space_tag, \ ((struct e1000_osdep *)(hw)->back)->mem_bus_space_handle, \ E1000_REGISTER(hw, reg), value) #define E1000_READ_REG_ARRAY(hw, reg, index) \ bus_space_read_4(((struct e1000_osdep *)(hw)->back)->mem_bus_space_tag, \ ((struct e1000_osdep *)(hw)->back)->mem_bus_space_handle, \ E1000_REGISTER(hw, reg) + ((index)<< 2)) #define E1000_WRITE_REG_ARRAY(hw, reg, index, value) \ bus_space_write_4(((struct e1000_osdep *)(hw)->back)->mem_bus_space_tag, \ ((struct e1000_osdep *)(hw)->back)->mem_bus_space_handle, \ E1000_REGISTER(hw, reg) + ((index)<< 2), value) #define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY #define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY #define E1000_READ_REG_ARRAY_BYTE(hw, reg, index) \ bus_space_read_1(((struct e1000_osdep *)(hw)->back)->mem_bus_space_tag, \ ((struct e1000_osdep *)(hw)->back)->mem_bus_space_handle, \ E1000_REGISTER(hw, reg) + index) #define E1000_WRITE_REG_ARRAY_BYTE(hw, reg, index, value) \ bus_space_write_1(((struct e1000_osdep *)(hw)->back)->mem_bus_space_tag, \ ((struct e1000_osdep *)(hw)->back)->mem_bus_space_handle, \ E1000_REGISTER(hw, reg) + index, value) #define E1000_WRITE_REG_ARRAY_WORD(hw, reg, index, value) \ bus_space_write_2(((struct e1000_osdep *)(hw)->back)->mem_bus_space_tag, \ ((struct e1000_osdep *)(hw)->back)->mem_bus_space_handle, \ E1000_REGISTER(hw, reg) + (index << 1), value) #define E1000_WRITE_REG_IO(hw, reg, value) do {\ bus_space_write_4(((struct e1000_osdep *)(hw)->back)->io_bus_space_tag, \ ((struct e1000_osdep *)(hw)->back)->io_bus_space_handle, \ (hw)->io_base, reg); \ bus_space_write_4(((struct e1000_osdep *)(hw)->back)->io_bus_space_tag, \ ((struct e1000_osdep *)(hw)->back)->io_bus_space_handle, \ (hw)->io_base + 4, value); } while (0) #define E1000_READ_FLASH_REG(hw, reg) \ bus_space_read_4(((struct e1000_osdep *)(hw)->back)->flash_bus_space_tag, \ ((struct e1000_osdep *)(hw)->back)->flash_bus_space_handle, reg) #define E1000_READ_FLASH_REG16(hw, reg) \ bus_space_read_2(((struct e1000_osdep *)(hw)->back)->flash_bus_space_tag, \ ((struct e1000_osdep *)(hw)->back)->flash_bus_space_handle, reg) #define E1000_WRITE_FLASH_REG(hw, reg, value) \ bus_space_write_4(((struct e1000_osdep *)(hw)->back)->flash_bus_space_tag, \ ((struct e1000_osdep *)(hw)->back)->flash_bus_space_handle, reg, value) #define E1000_WRITE_FLASH_REG16(hw, reg, value) \ bus_space_write_2(((struct e1000_osdep *)(hw)->back)->flash_bus_space_tag, \ ((struct e1000_osdep *)(hw)->back)->flash_bus_space_handle, reg, value) #endif /* _FREEBSD_OS_H_ */ Index: stable/10/sys/dev/e1000/e1000_phy.c =================================================================== --- stable/10/sys/dev/e1000/e1000_phy.c (revision 296054) +++ stable/10/sys/dev/e1000/e1000_phy.c (revision 296055) @@ -1,4253 +1,4251 @@ /****************************************************************************** Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "e1000_api.h" static s32 e1000_wait_autoneg(struct e1000_hw *hw); static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data, bool read, bool page_set); static u32 e1000_get_phy_addr_for_hv_page(u32 page); static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, u16 *data, bool read); /* Cable length tables */ static const u16 e1000_m88_cable_length_table[] = { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; #define M88E1000_CABLE_LENGTH_TABLE_SIZE \ (sizeof(e1000_m88_cable_length_table) / \ sizeof(e1000_m88_cable_length_table[0])) static const u16 e1000_igp_2_cable_length_table[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121, 124}; #define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ (sizeof(e1000_igp_2_cable_length_table) / \ sizeof(e1000_igp_2_cable_length_table[0])) /** * e1000_init_phy_ops_generic - Initialize PHY function pointers * @hw: pointer to the HW structure * * Setups up the function pointers to no-op functions **/ void e1000_init_phy_ops_generic(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; DEBUGFUNC("e1000_init_phy_ops_generic"); /* Initialize function pointers */ phy->ops.init_params = e1000_null_ops_generic; phy->ops.acquire = e1000_null_ops_generic; phy->ops.check_polarity = e1000_null_ops_generic; phy->ops.check_reset_block = e1000_null_ops_generic; phy->ops.commit = e1000_null_ops_generic; phy->ops.force_speed_duplex = e1000_null_ops_generic; phy->ops.get_cfg_done = e1000_null_ops_generic; phy->ops.get_cable_length = e1000_null_ops_generic; phy->ops.get_info = e1000_null_ops_generic; phy->ops.set_page = e1000_null_set_page; phy->ops.read_reg = e1000_null_read_reg; phy->ops.read_reg_locked = e1000_null_read_reg; phy->ops.read_reg_page = e1000_null_read_reg; phy->ops.release = e1000_null_phy_generic; phy->ops.reset = e1000_null_ops_generic; phy->ops.set_d0_lplu_state = e1000_null_lplu_state; phy->ops.set_d3_lplu_state = e1000_null_lplu_state; phy->ops.write_reg = e1000_null_write_reg; phy->ops.write_reg_locked = e1000_null_write_reg; phy->ops.write_reg_page = e1000_null_write_reg; phy->ops.power_up = e1000_null_phy_generic; phy->ops.power_down = e1000_null_phy_generic; phy->ops.read_i2c_byte = e1000_read_i2c_byte_null; phy->ops.write_i2c_byte = e1000_write_i2c_byte_null; phy->ops.cfg_on_link_up = e1000_null_ops_generic; } /** * e1000_null_set_page - No-op function, return 0 * @hw: pointer to the HW structure **/ s32 e1000_null_set_page(struct e1000_hw E1000_UNUSEDARG *hw, u16 E1000_UNUSEDARG data) { DEBUGFUNC("e1000_null_set_page"); return E1000_SUCCESS; } /** * e1000_null_read_reg - No-op function, return 0 * @hw: pointer to the HW structure **/ s32 e1000_null_read_reg(struct e1000_hw E1000_UNUSEDARG *hw, u32 E1000_UNUSEDARG offset, u16 E1000_UNUSEDARG *data) { DEBUGFUNC("e1000_null_read_reg"); return E1000_SUCCESS; } /** * e1000_null_phy_generic - No-op function, return void * @hw: pointer to the HW structure **/ void e1000_null_phy_generic(struct e1000_hw E1000_UNUSEDARG *hw) { DEBUGFUNC("e1000_null_phy_generic"); return; } /** * e1000_null_lplu_state - No-op function, return 0 * @hw: pointer to the HW structure **/ s32 e1000_null_lplu_state(struct e1000_hw E1000_UNUSEDARG *hw, bool E1000_UNUSEDARG active) { DEBUGFUNC("e1000_null_lplu_state"); return E1000_SUCCESS; } /** * e1000_null_write_reg - No-op function, return 0 * @hw: pointer to the HW structure **/ s32 e1000_null_write_reg(struct e1000_hw E1000_UNUSEDARG *hw, u32 E1000_UNUSEDARG offset, u16 E1000_UNUSEDARG data) { DEBUGFUNC("e1000_null_write_reg"); return E1000_SUCCESS; } /** * e1000_read_i2c_byte_null - No-op function, return 0 * @hw: pointer to hardware structure * @byte_offset: byte offset to write * @dev_addr: device address * @data: data value read * **/ s32 e1000_read_i2c_byte_null(struct e1000_hw E1000_UNUSEDARG *hw, u8 E1000_UNUSEDARG byte_offset, u8 E1000_UNUSEDARG dev_addr, u8 E1000_UNUSEDARG *data) { DEBUGFUNC("e1000_read_i2c_byte_null"); return E1000_SUCCESS; } /** * e1000_write_i2c_byte_null - No-op function, return 0 * @hw: pointer to hardware structure * @byte_offset: byte offset to write * @dev_addr: device address * @data: data value to write * **/ s32 e1000_write_i2c_byte_null(struct e1000_hw E1000_UNUSEDARG *hw, u8 E1000_UNUSEDARG byte_offset, u8 E1000_UNUSEDARG dev_addr, u8 E1000_UNUSEDARG data) { DEBUGFUNC("e1000_write_i2c_byte_null"); return E1000_SUCCESS; } /** * e1000_check_reset_block_generic - Check if PHY reset is blocked * @hw: pointer to the HW structure * * Read the PHY management control register and check whether a PHY reset * is blocked. If a reset is not blocked return E1000_SUCCESS, otherwise * return E1000_BLK_PHY_RESET (12). **/ s32 e1000_check_reset_block_generic(struct e1000_hw *hw) { u32 manc; DEBUGFUNC("e1000_check_reset_block"); manc = E1000_READ_REG(hw, E1000_MANC); return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? E1000_BLK_PHY_RESET : E1000_SUCCESS; } /** * e1000_get_phy_id - Retrieve the PHY ID and revision * @hw: pointer to the HW structure * * Reads the PHY registers and stores the PHY ID and possibly the PHY * revision in the hardware structure. **/ s32 e1000_get_phy_id(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val = E1000_SUCCESS; u16 phy_id; u16 retry_count = 0; DEBUGFUNC("e1000_get_phy_id"); if (!phy->ops.read_reg) return E1000_SUCCESS; while (retry_count < 2) { ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); if (ret_val) return ret_val; phy->id = (u32)(phy_id << 16); usec_delay(20); ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id); if (ret_val) return ret_val; phy->id |= (u32)(phy_id & PHY_REVISION_MASK); phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); if (phy->id != 0 && phy->id != PHY_REVISION_MASK) return E1000_SUCCESS; retry_count++; } return E1000_SUCCESS; } /** * e1000_phy_reset_dsp_generic - Reset PHY DSP * @hw: pointer to the HW structure * * Reset the digital signal processor. **/ s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw) { s32 ret_val; DEBUGFUNC("e1000_phy_reset_dsp_generic"); if (!hw->phy.ops.write_reg) return E1000_SUCCESS; ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1); if (ret_val) return ret_val; return hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0); } /** * e1000_read_phy_reg_mdic - Read MDI control register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Reads the MDI control register in the PHY at offset and stores the * information read to data. **/ s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) { struct e1000_phy_info *phy = &hw->phy; u32 i, mdic = 0; DEBUGFUNC("e1000_read_phy_reg_mdic"); if (offset > MAX_PHY_REG_ADDRESS) { DEBUGOUT1("PHY Address %d is out of range\n", offset); return -E1000_ERR_PARAM; } /* Set up Op-code, Phy Address, and register offset in the MDI * Control register. The MAC will take care of interfacing with the * PHY to retrieve the desired data. */ mdic = ((offset << E1000_MDIC_REG_SHIFT) | (phy->addr << E1000_MDIC_PHY_SHIFT) | (E1000_MDIC_OP_READ)); E1000_WRITE_REG(hw, E1000_MDIC, mdic); /* Poll the ready bit to see if the MDI read completed * Increasing the time out as testing showed failures with * the lower time out */ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { usec_delay_irq(50); mdic = E1000_READ_REG(hw, E1000_MDIC); if (mdic & E1000_MDIC_READY) break; } if (!(mdic & E1000_MDIC_READY)) { DEBUGOUT("MDI Read did not complete\n"); return -E1000_ERR_PHY; } if (mdic & E1000_MDIC_ERROR) { DEBUGOUT("MDI Error\n"); return -E1000_ERR_PHY; } if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) { DEBUGOUT2("MDI Read offset error - requested %d, returned %d\n", offset, (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT); return -E1000_ERR_PHY; } *data = (u16) mdic; /* Allow some time after each MDIC transaction to avoid * reading duplicate data in the next MDIC transaction. */ if (hw->mac.type == e1000_pch2lan) usec_delay_irq(100); return E1000_SUCCESS; } /** * e1000_write_phy_reg_mdic - Write MDI control register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write to register at offset * * Writes data to MDI control register in the PHY at offset. **/ s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) { struct e1000_phy_info *phy = &hw->phy; u32 i, mdic = 0; DEBUGFUNC("e1000_write_phy_reg_mdic"); if (offset > MAX_PHY_REG_ADDRESS) { DEBUGOUT1("PHY Address %d is out of range\n", offset); return -E1000_ERR_PARAM; } /* Set up Op-code, Phy Address, and register offset in the MDI * Control register. The MAC will take care of interfacing with the * PHY to retrieve the desired data. */ mdic = (((u32)data) | (offset << E1000_MDIC_REG_SHIFT) | (phy->addr << E1000_MDIC_PHY_SHIFT) | (E1000_MDIC_OP_WRITE)); E1000_WRITE_REG(hw, E1000_MDIC, mdic); /* Poll the ready bit to see if the MDI read completed * Increasing the time out as testing showed failures with * the lower time out */ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { usec_delay_irq(50); mdic = E1000_READ_REG(hw, E1000_MDIC); if (mdic & E1000_MDIC_READY) break; } if (!(mdic & E1000_MDIC_READY)) { DEBUGOUT("MDI Write did not complete\n"); return -E1000_ERR_PHY; } if (mdic & E1000_MDIC_ERROR) { DEBUGOUT("MDI Error\n"); return -E1000_ERR_PHY; } if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) { DEBUGOUT2("MDI Write offset error - requested %d, returned %d\n", offset, (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT); return -E1000_ERR_PHY; } /* Allow some time after each MDIC transaction to avoid * reading duplicate data in the next MDIC transaction. */ if (hw->mac.type == e1000_pch2lan) usec_delay_irq(100); return E1000_SUCCESS; } /** * e1000_read_phy_reg_i2c - Read PHY register using i2c * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Reads the PHY register at offset using the i2c interface and stores the * retrieved information in data. **/ s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data) { struct e1000_phy_info *phy = &hw->phy; u32 i, i2ccmd = 0; DEBUGFUNC("e1000_read_phy_reg_i2c"); /* Set up Op-code, Phy Address, and register address in the I2CCMD * register. The MAC will take care of interfacing with the * PHY to retrieve the desired data. */ i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | (E1000_I2CCMD_OPCODE_READ)); E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); /* Poll the ready bit to see if the I2C read completed */ for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { usec_delay(50); i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD); if (i2ccmd & E1000_I2CCMD_READY) break; } if (!(i2ccmd & E1000_I2CCMD_READY)) { DEBUGOUT("I2CCMD Read did not complete\n"); return -E1000_ERR_PHY; } if (i2ccmd & E1000_I2CCMD_ERROR) { DEBUGOUT("I2CCMD Error bit set\n"); return -E1000_ERR_PHY; } /* Need to byte-swap the 16-bit value. */ *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00); return E1000_SUCCESS; } /** * e1000_write_phy_reg_i2c - Write PHY register using i2c * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Writes the data to PHY register at the offset using the i2c interface. **/ s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data) { struct e1000_phy_info *phy = &hw->phy; u32 i, i2ccmd = 0; u16 phy_data_swapped; DEBUGFUNC("e1000_write_phy_reg_i2c"); /* Prevent overwritting SFP I2C EEPROM which is at A0 address.*/ if ((hw->phy.addr == 0) || (hw->phy.addr > 7)) { DEBUGOUT1("PHY I2C Address %d is out of range.\n", hw->phy.addr); return -E1000_ERR_CONFIG; } /* Swap the data bytes for the I2C interface */ phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00); /* Set up Op-code, Phy Address, and register address in the I2CCMD * register. The MAC will take care of interfacing with the * PHY to retrieve the desired data. */ i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | E1000_I2CCMD_OPCODE_WRITE | phy_data_swapped); E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); /* Poll the ready bit to see if the I2C read completed */ for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { usec_delay(50); i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD); if (i2ccmd & E1000_I2CCMD_READY) break; } if (!(i2ccmd & E1000_I2CCMD_READY)) { DEBUGOUT("I2CCMD Write did not complete\n"); return -E1000_ERR_PHY; } if (i2ccmd & E1000_I2CCMD_ERROR) { DEBUGOUT("I2CCMD Error bit set\n"); return -E1000_ERR_PHY; } return E1000_SUCCESS; } /** * e1000_read_sfp_data_byte - Reads SFP module data. * @hw: pointer to the HW structure * @offset: byte location offset to be read * @data: read data buffer pointer * * Reads one byte from SFP module data stored * in SFP resided EEPROM memory or SFP diagnostic area. * Function should be called with * E1000_I2CCMD_SFP_DATA_ADDR() for SFP module database access * E1000_I2CCMD_SFP_DIAG_ADDR() for SFP diagnostics parameters * access **/ s32 e1000_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data) { u32 i = 0; u32 i2ccmd = 0; u32 data_local = 0; DEBUGFUNC("e1000_read_sfp_data_byte"); if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) { DEBUGOUT("I2CCMD command address exceeds upper limit\n"); return -E1000_ERR_PHY; } /* Set up Op-code, EEPROM Address,in the I2CCMD * register. The MAC will take care of interfacing with the * EEPROM to retrieve the desired data. */ i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | E1000_I2CCMD_OPCODE_READ); E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); /* Poll the ready bit to see if the I2C read completed */ for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { usec_delay(50); data_local = E1000_READ_REG(hw, E1000_I2CCMD); if (data_local & E1000_I2CCMD_READY) break; } if (!(data_local & E1000_I2CCMD_READY)) { DEBUGOUT("I2CCMD Read did not complete\n"); return -E1000_ERR_PHY; } if (data_local & E1000_I2CCMD_ERROR) { DEBUGOUT("I2CCMD Error bit set\n"); return -E1000_ERR_PHY; } *data = (u8) data_local & 0xFF; return E1000_SUCCESS; } /** * e1000_write_sfp_data_byte - Writes SFP module data. * @hw: pointer to the HW structure * @offset: byte location offset to write to * @data: data to write * * Writes one byte to SFP module data stored * in SFP resided EEPROM memory or SFP diagnostic area. * Function should be called with * E1000_I2CCMD_SFP_DATA_ADDR() for SFP module database access * E1000_I2CCMD_SFP_DIAG_ADDR() for SFP diagnostics parameters * access **/ s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data) { u32 i = 0; u32 i2ccmd = 0; u32 data_local = 0; DEBUGFUNC("e1000_write_sfp_data_byte"); if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) { DEBUGOUT("I2CCMD command address exceeds upper limit\n"); return -E1000_ERR_PHY; } /* The programming interface is 16 bits wide * so we need to read the whole word first * then update appropriate byte lane and write * the updated word back. */ /* Set up Op-code, EEPROM Address,in the I2CCMD * register. The MAC will take care of interfacing * with an EEPROM to write the data given. */ i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | E1000_I2CCMD_OPCODE_READ); /* Set a command to read single word */ E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { usec_delay(50); /* Poll the ready bit to see if lastly * launched I2C operation completed */ i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD); if (i2ccmd & E1000_I2CCMD_READY) { /* Check if this is READ or WRITE phase */ if ((i2ccmd & E1000_I2CCMD_OPCODE_READ) == E1000_I2CCMD_OPCODE_READ) { /* Write the selected byte * lane and update whole word */ data_local = i2ccmd & 0xFF00; data_local |= data; i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | E1000_I2CCMD_OPCODE_WRITE | data_local); E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); } else { break; } } } if (!(i2ccmd & E1000_I2CCMD_READY)) { DEBUGOUT("I2CCMD Write did not complete\n"); return -E1000_ERR_PHY; } if (i2ccmd & E1000_I2CCMD_ERROR) { DEBUGOUT("I2CCMD Error bit set\n"); return -E1000_ERR_PHY; } return E1000_SUCCESS; } /** * e1000_read_phy_reg_m88 - Read m88 PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Acquires semaphore, if necessary, then reads the PHY register at offset * and storing the retrieved information in data. Release any acquired * semaphores before exiting. **/ s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data) { s32 ret_val; DEBUGFUNC("e1000_read_phy_reg_m88"); if (!hw->phy.ops.acquire) return E1000_SUCCESS; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); hw->phy.ops.release(hw); return ret_val; } /** * e1000_write_phy_reg_m88 - Write m88 PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Acquires semaphore, if necessary, then writes the data to PHY register * at the offset. Release any acquired semaphores before exiting. **/ s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data) { s32 ret_val; DEBUGFUNC("e1000_write_phy_reg_m88"); if (!hw->phy.ops.acquire) return E1000_SUCCESS; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); hw->phy.ops.release(hw); return ret_val; } /** * e1000_set_page_igp - Set page as on IGP-like PHY(s) * @hw: pointer to the HW structure * @page: page to set (shifted left when necessary) * * Sets PHY page required for PHY register access. Assumes semaphore is * already acquired. Note, this function sets phy.addr to 1 so the caller * must set it appropriately (if necessary) after this function returns. **/ s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page) { DEBUGFUNC("e1000_set_page_igp"); DEBUGOUT1("Setting page 0x%x\n", page); hw->phy.addr = 1; return e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, page); } /** * __e1000_read_phy_reg_igp - Read igp PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * @locked: semaphore has already been acquired or not * * Acquires semaphore, if necessary, then reads the PHY register at offset * and stores the retrieved information in data. Release any acquired * semaphores before exiting. **/ static s32 __e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data, bool locked) { s32 ret_val = E1000_SUCCESS; DEBUGFUNC("__e1000_read_phy_reg_igp"); if (!locked) { if (!hw->phy.ops.acquire) return E1000_SUCCESS; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; } if (offset > MAX_PHY_MULTI_PAGE_REG) ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, (u16)offset); if (!ret_val) ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); if (!locked) hw->phy.ops.release(hw); return ret_val; } /** * e1000_read_phy_reg_igp - Read igp PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Acquires semaphore then reads the PHY register at offset and stores the * retrieved information in data. * Release the acquired semaphore before exiting. **/ s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) { return __e1000_read_phy_reg_igp(hw, offset, data, FALSE); } /** * e1000_read_phy_reg_igp_locked - Read igp PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Reads the PHY register at offset and stores the retrieved information * in data. Assumes semaphore already acquired. **/ s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data) { return __e1000_read_phy_reg_igp(hw, offset, data, TRUE); } /** * e1000_write_phy_reg_igp - Write igp PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * @locked: semaphore has already been acquired or not * * Acquires semaphore, if necessary, then writes the data to PHY register * at the offset. Release any acquired semaphores before exiting. **/ static s32 __e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data, bool locked) { s32 ret_val = E1000_SUCCESS; DEBUGFUNC("e1000_write_phy_reg_igp"); if (!locked) { if (!hw->phy.ops.acquire) return E1000_SUCCESS; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; } if (offset > MAX_PHY_MULTI_PAGE_REG) ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, (u16)offset); if (!ret_val) ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); if (!locked) hw->phy.ops.release(hw); return ret_val; } /** * e1000_write_phy_reg_igp - Write igp PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Acquires semaphore then writes the data to PHY register * at the offset. Release any acquired semaphores before exiting. **/ s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) { return __e1000_write_phy_reg_igp(hw, offset, data, FALSE); } /** * e1000_write_phy_reg_igp_locked - Write igp PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Writes the data to PHY register at the offset. * Assumes semaphore already acquired. **/ s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data) { return __e1000_write_phy_reg_igp(hw, offset, data, TRUE); } /** * __e1000_read_kmrn_reg - Read kumeran register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * @locked: semaphore has already been acquired or not * * Acquires semaphore, if necessary. Then reads the PHY register at offset * using the kumeran interface. The information retrieved is stored in data. * Release any acquired semaphores before exiting. **/ static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data, bool locked) { u32 kmrnctrlsta; DEBUGFUNC("__e1000_read_kmrn_reg"); if (!locked) { s32 ret_val = E1000_SUCCESS; if (!hw->phy.ops.acquire) return E1000_SUCCESS; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; } kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta); E1000_WRITE_FLUSH(hw); usec_delay(2); kmrnctrlsta = E1000_READ_REG(hw, E1000_KMRNCTRLSTA); *data = (u16)kmrnctrlsta; if (!locked) hw->phy.ops.release(hw); return E1000_SUCCESS; } /** * e1000_read_kmrn_reg_generic - Read kumeran register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Acquires semaphore then reads the PHY register at offset using the * kumeran interface. The information retrieved is stored in data. * Release the acquired semaphore before exiting. **/ s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data) { return __e1000_read_kmrn_reg(hw, offset, data, FALSE); } /** * e1000_read_kmrn_reg_locked - Read kumeran register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Reads the PHY register at offset using the kumeran interface. The * information retrieved is stored in data. * Assumes semaphore already acquired. **/ s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data) { return __e1000_read_kmrn_reg(hw, offset, data, TRUE); } /** * __e1000_write_kmrn_reg - Write kumeran register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * @locked: semaphore has already been acquired or not * * Acquires semaphore, if necessary. Then write the data to PHY register * at the offset using the kumeran interface. Release any acquired semaphores * before exiting. **/ static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data, bool locked) { u32 kmrnctrlsta; DEBUGFUNC("e1000_write_kmrn_reg_generic"); if (!locked) { s32 ret_val = E1000_SUCCESS; if (!hw->phy.ops.acquire) return E1000_SUCCESS; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; } kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & E1000_KMRNCTRLSTA_OFFSET) | data; E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta); E1000_WRITE_FLUSH(hw); usec_delay(2); if (!locked) hw->phy.ops.release(hw); return E1000_SUCCESS; } /** * e1000_write_kmrn_reg_generic - Write kumeran register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Acquires semaphore then writes the data to the PHY register at the offset * using the kumeran interface. Release the acquired semaphore before exiting. **/ s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data) { return __e1000_write_kmrn_reg(hw, offset, data, FALSE); } /** * e1000_write_kmrn_reg_locked - Write kumeran register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Write the data to PHY register at the offset using the kumeran interface. * Assumes semaphore already acquired. **/ s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data) { return __e1000_write_kmrn_reg(hw, offset, data, TRUE); } /** * e1000_set_master_slave_mode - Setup PHY for Master/slave mode * @hw: pointer to the HW structure * * Sets up Master/slave mode **/ static s32 e1000_set_master_slave_mode(struct e1000_hw *hw) { s32 ret_val; u16 phy_data; /* Resolve Master/Slave mode */ ret_val = hw->phy.ops.read_reg(hw, PHY_1000T_CTRL, &phy_data); if (ret_val) return ret_val; /* load defaults for future use */ hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ? ((phy_data & CR_1000T_MS_VALUE) ? e1000_ms_force_master : e1000_ms_force_slave) : e1000_ms_auto; switch (hw->phy.ms_type) { case e1000_ms_force_master: phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); break; case e1000_ms_force_slave: phy_data |= CR_1000T_MS_ENABLE; phy_data &= ~(CR_1000T_MS_VALUE); break; case e1000_ms_auto: phy_data &= ~CR_1000T_MS_ENABLE; /* fall-through */ default: break; } return hw->phy.ops.write_reg(hw, PHY_1000T_CTRL, phy_data); } /** * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link * @hw: pointer to the HW structure * * Sets up Carrier-sense on Transmit and downshift values. **/ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) { s32 ret_val; u16 phy_data; DEBUGFUNC("e1000_copper_link_setup_82577"); if (hw->phy.type == e1000_phy_82580) { ret_val = hw->phy.ops.reset(hw); if (ret_val) { DEBUGOUT("Error resetting the PHY.\n"); return ret_val; } } /* Enable CRS on Tx. This must be set for half-duplex operation. */ ret_val = hw->phy.ops.read_reg(hw, I82577_CFG_REG, &phy_data); if (ret_val) return ret_val; phy_data |= I82577_CFG_ASSERT_CRS_ON_TX; /* Enable downshift */ phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; ret_val = hw->phy.ops.write_reg(hw, I82577_CFG_REG, phy_data); if (ret_val) return ret_val; /* Set MDI/MDIX mode */ ret_val = hw->phy.ops.read_reg(hw, I82577_PHY_CTRL_2, &phy_data); if (ret_val) return ret_val; phy_data &= ~I82577_PHY_CTRL2_MDIX_CFG_MASK; /* Options: * 0 - Auto (default) * 1 - MDI mode * 2 - MDI-X mode */ switch (hw->phy.mdix) { case 1: break; case 2: phy_data |= I82577_PHY_CTRL2_MANUAL_MDIX; break; case 0: default: phy_data |= I82577_PHY_CTRL2_AUTO_MDI_MDIX; break; } ret_val = hw->phy.ops.write_reg(hw, I82577_PHY_CTRL_2, phy_data); if (ret_val) return ret_val; return e1000_set_master_slave_mode(hw); } /** * e1000_copper_link_setup_m88 - Setup m88 PHY's for copper link * @hw: pointer to the HW structure * * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock * and downshift values are set also. **/ s32 e1000_copper_link_setup_m88(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data; DEBUGFUNC("e1000_copper_link_setup_m88"); /* Enable CRS on Tx. This must be set for half-duplex operation. */ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); if (ret_val) return ret_val; /* For BM PHY this bit is downshift enable */ if (phy->type != e1000_phy_bm) phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; /* Options: * MDI/MDI-X = 0 (default) * 0 - Auto for all speeds * 1 - MDI mode * 2 - MDI-X mode * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) */ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; switch (phy->mdix) { case 1: phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; break; case 2: phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; break; case 3: phy_data |= M88E1000_PSCR_AUTO_X_1000T; break; case 0: default: phy_data |= M88E1000_PSCR_AUTO_X_MODE; break; } /* Options: * disable_polarity_correction = 0 (default) * Automatic Correction for Reversed Cable Polarity * 0 - Disabled * 1 - Enabled */ phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; if (phy->disable_polarity_correction) phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; /* Enable downshift on BM (disabled by default) */ if (phy->type == e1000_phy_bm) { /* For 82574/82583, first disable then enable downshift */ if (phy->id == BME1000_E_PHY_ID_R2) { phy_data &= ~BME1000_PSCR_ENABLE_DOWNSHIFT; ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); if (ret_val) return ret_val; /* Commit the changes. */ ret_val = phy->ops.commit(hw); if (ret_val) { DEBUGOUT("Error committing the PHY changes\n"); return ret_val; } } phy_data |= BME1000_PSCR_ENABLE_DOWNSHIFT; } ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); if (ret_val) return ret_val; if ((phy->type == e1000_phy_m88) && (phy->revision < E1000_REVISION_4) && (phy->id != BME1000_E_PHY_ID_R2)) { /* Force TX_CLK in the Extended PHY Specific Control Register * to 25MHz clock. */ ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); if (ret_val) return ret_val; phy_data |= M88E1000_EPSCR_TX_CLK_25; if ((phy->revision == E1000_REVISION_2) && (phy->id == M88E1111_I_PHY_ID)) { /* 82573L PHY - set the downshift counter to 5x. */ phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK; phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; } else { /* Configure Master and Slave downshift values */ phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); } ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); if (ret_val) return ret_val; } if ((phy->type == e1000_phy_bm) && (phy->id == BME1000_E_PHY_ID_R2)) { /* Set PHY page 0, register 29 to 0x0003 */ ret_val = phy->ops.write_reg(hw, 29, 0x0003); if (ret_val) return ret_val; /* Set PHY page 0, register 30 to 0x0000 */ ret_val = phy->ops.write_reg(hw, 30, 0x0000); if (ret_val) return ret_val; } /* Commit the changes. */ ret_val = phy->ops.commit(hw); if (ret_val) { DEBUGOUT("Error committing the PHY changes\n"); return ret_val; } if (phy->type == e1000_phy_82578) { ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); if (ret_val) return ret_val; /* 82578 PHY - set the downshift count to 1x. */ phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE; phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK; ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); if (ret_val) return ret_val; } return E1000_SUCCESS; } /** * e1000_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link * @hw: pointer to the HW structure * * Sets up MDI/MDI-X and polarity for i347-AT4, m88e1322 and m88e1112 PHY's. * Also enables and sets the downshift parameters. **/ s32 e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data; DEBUGFUNC("e1000_copper_link_setup_m88_gen2"); /* Enable CRS on Tx. This must be set for half-duplex operation. */ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); if (ret_val) return ret_val; /* Options: * MDI/MDI-X = 0 (default) * 0 - Auto for all speeds * 1 - MDI mode * 2 - MDI-X mode * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) */ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; switch (phy->mdix) { case 1: phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; break; case 2: phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; break; case 3: /* M88E1112 does not support this mode) */ if (phy->id != M88E1112_E_PHY_ID) { phy_data |= M88E1000_PSCR_AUTO_X_1000T; break; } case 0: default: phy_data |= M88E1000_PSCR_AUTO_X_MODE; break; } /* Options: * disable_polarity_correction = 0 (default) * Automatic Correction for Reversed Cable Polarity * 0 - Disabled * 1 - Enabled */ phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; if (phy->disable_polarity_correction) phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; /* Enable downshift and setting it to X6 */ if (phy->id == M88E1543_E_PHY_ID) { phy_data &= ~I347AT4_PSCR_DOWNSHIFT_ENABLE; ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); if (ret_val) return ret_val; ret_val = phy->ops.commit(hw); if (ret_val) { DEBUGOUT("Error committing the PHY changes\n"); return ret_val; } } phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK; phy_data |= I347AT4_PSCR_DOWNSHIFT_6X; phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE; ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); if (ret_val) return ret_val; /* Commit the changes. */ ret_val = phy->ops.commit(hw); if (ret_val) { DEBUGOUT("Error committing the PHY changes\n"); return ret_val; } ret_val = e1000_set_master_slave_mode(hw); if (ret_val) return ret_val; return E1000_SUCCESS; } /** * e1000_copper_link_setup_igp - Setup igp PHY's for copper link * @hw: pointer to the HW structure * * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for * igp PHY's. **/ s32 e1000_copper_link_setup_igp(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; DEBUGFUNC("e1000_copper_link_setup_igp"); ret_val = hw->phy.ops.reset(hw); if (ret_val) { DEBUGOUT("Error resetting the PHY.\n"); return ret_val; } /* Wait 100ms for MAC to configure PHY from NVM settings, to avoid * timeout issues when LFS is enabled. */ msec_delay(100); /* The NVM settings will configure LPLU in D3 for * non-IGP1 PHYs. */ if (phy->type == e1000_phy_igp) { /* disable lplu d3 during driver init */ ret_val = hw->phy.ops.set_d3_lplu_state(hw, FALSE); if (ret_val) { DEBUGOUT("Error Disabling LPLU D3\n"); return ret_val; } } /* disable lplu d0 during driver init */ if (hw->phy.ops.set_d0_lplu_state) { ret_val = hw->phy.ops.set_d0_lplu_state(hw, FALSE); if (ret_val) { DEBUGOUT("Error Disabling LPLU D0\n"); return ret_val; } } /* Configure mdi-mdix settings */ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data); if (ret_val) return ret_val; data &= ~IGP01E1000_PSCR_AUTO_MDIX; switch (phy->mdix) { case 1: data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; break; case 2: data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; break; case 0: default: data |= IGP01E1000_PSCR_AUTO_MDIX; break; } ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, data); if (ret_val) return ret_val; /* set auto-master slave resolution settings */ if (hw->mac.autoneg) { /* when autonegotiation advertisement is only 1000Mbps then we * should disable SmartSpeed and enable Auto MasterSlave * resolution as hardware default. */ if (phy->autoneg_advertised == ADVERTISE_1000_FULL) { /* Disable SmartSpeed */ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) return ret_val; /* Set auto Master/Slave resolution process */ ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data); if (ret_val) return ret_val; data &= ~CR_1000T_MS_ENABLE; ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data); if (ret_val) return ret_val; } ret_val = e1000_set_master_slave_mode(hw); } return ret_val; } /** * e1000_phy_setup_autoneg - Configure PHY for auto-negotiation * @hw: pointer to the HW structure * * Reads the MII auto-neg advertisement register and/or the 1000T control * register and if the PHY is already setup for auto-negotiation, then * return successful. Otherwise, setup advertisement and flow control to * the appropriate values for the wanted auto-negotiation. **/ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 mii_autoneg_adv_reg; u16 mii_1000t_ctrl_reg = 0; DEBUGFUNC("e1000_phy_setup_autoneg"); phy->autoneg_advertised &= phy->autoneg_mask; /* Read the MII Auto-Neg Advertisement Register (Address 4). */ ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); if (ret_val) return ret_val; if (phy->autoneg_mask & ADVERTISE_1000_FULL) { /* Read the MII 1000Base-T Control Register (Address 9). */ ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); if (ret_val) return ret_val; } /* Need to parse both autoneg_advertised and fc and set up * the appropriate PHY registers. First we will parse for * autoneg_advertised software override. Since we can advertise * a plethora of combinations, we need to check each bit * individually. */ /* First we clear all the 10/100 mb speed bits in the Auto-Neg * Advertisement Register (Address 4) and the 1000 mb speed bits in * the 1000Base-T Control Register (Address 9). */ mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS | NWAY_AR_100TX_HD_CAPS | NWAY_AR_10T_FD_CAPS | NWAY_AR_10T_HD_CAPS); mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); DEBUGOUT1("autoneg_advertised %x\n", phy->autoneg_advertised); /* Do we want to advertise 10 Mb Half Duplex? */ if (phy->autoneg_advertised & ADVERTISE_10_HALF) { DEBUGOUT("Advertise 10mb Half duplex\n"); mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; } /* Do we want to advertise 10 Mb Full Duplex? */ if (phy->autoneg_advertised & ADVERTISE_10_FULL) { DEBUGOUT("Advertise 10mb Full duplex\n"); mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; } /* Do we want to advertise 100 Mb Half Duplex? */ if (phy->autoneg_advertised & ADVERTISE_100_HALF) { DEBUGOUT("Advertise 100mb Half duplex\n"); mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; } /* Do we want to advertise 100 Mb Full Duplex? */ if (phy->autoneg_advertised & ADVERTISE_100_FULL) { DEBUGOUT("Advertise 100mb Full duplex\n"); mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; } /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ if (phy->autoneg_advertised & ADVERTISE_1000_HALF) DEBUGOUT("Advertise 1000mb Half duplex request denied!\n"); /* Do we want to advertise 1000 Mb Full Duplex? */ if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { DEBUGOUT("Advertise 1000mb Full duplex\n"); mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; } /* Check for a software override of the flow control settings, and * setup the PHY advertisement registers accordingly. If * auto-negotiation is enabled, then software will have to set the * "PAUSE" bits to the correct value in the Auto-Negotiation * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto- * negotiation. * * The possible values of the "fc" parameter are: * 0: Flow control is completely disabled * 1: Rx flow control is enabled (we can receive pause frames * but not send pause frames). * 2: Tx flow control is enabled (we can send pause frames * but we do not support receiving pause frames). * 3: Both Rx and Tx flow control (symmetric) are enabled. * other: No software override. The flow control configuration * in the EEPROM is used. */ switch (hw->fc.current_mode) { case e1000_fc_none: /* Flow control (Rx & Tx) is completely disabled by a * software over-ride. */ mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); break; case e1000_fc_rx_pause: /* Rx Flow control is enabled, and Tx Flow control is * disabled, by a software over-ride. * * Since there really isn't a way to advertise that we are * capable of Rx Pause ONLY, we will advertise that we * support both symmetric and asymmetric Rx PAUSE. Later * (in e1000_config_fc_after_link_up) we will disable the * hw's ability to send PAUSE frames. */ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); break; case e1000_fc_tx_pause: /* Tx Flow control is enabled, and Rx Flow control is * disabled, by a software over-ride. */ mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; break; case e1000_fc_full: /* Flow control (both Rx and Tx) is enabled by a software * over-ride. */ mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); break; default: DEBUGOUT("Flow control param set incorrectly\n"); return -E1000_ERR_CONFIG; } ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); if (ret_val) return ret_val; DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); if (phy->autoneg_mask & ADVERTISE_1000_FULL) ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); return ret_val; } /** * e1000_copper_link_autoneg - Setup/Enable autoneg for copper link * @hw: pointer to the HW structure * * Performs initial bounds checking on autoneg advertisement parameter, then * configure to advertise the full capability. Setup the PHY to autoneg * and restart the negotiation process between the link partner. If * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. **/ s32 e1000_copper_link_autoneg(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_ctrl; DEBUGFUNC("e1000_copper_link_autoneg"); /* Perform some bounds checking on the autoneg advertisement * parameter. */ phy->autoneg_advertised &= phy->autoneg_mask; /* If autoneg_advertised is zero, we assume it was not defaulted * by the calling code so we set to advertise full capability. */ if (!phy->autoneg_advertised) phy->autoneg_advertised = phy->autoneg_mask; DEBUGOUT("Reconfiguring auto-neg advertisement params\n"); ret_val = e1000_phy_setup_autoneg(hw); if (ret_val) { DEBUGOUT("Error Setting up Auto-Negotiation\n"); return ret_val; } DEBUGOUT("Restarting Auto-Neg\n"); /* Restart auto-negotiation by setting the Auto Neg Enable bit and * the Auto Neg Restart bit in the PHY control register. */ ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); if (ret_val) return ret_val; phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl); if (ret_val) return ret_val; /* Does the user want to wait for Auto-Neg to complete here, or * check at a later time (for example, callback routine). */ if (phy->autoneg_wait_to_complete) { ret_val = e1000_wait_autoneg(hw); if (ret_val) { DEBUGOUT("Error while waiting for autoneg to complete\n"); return ret_val; } } hw->mac.get_link_status = TRUE; return ret_val; } /** * e1000_setup_copper_link_generic - Configure copper link settings * @hw: pointer to the HW structure * * Calls the appropriate function to configure the link for auto-neg or forced * speed and duplex. Then we check for link, once link is established calls * to configure collision distance and flow control are called. If link is * not established, we return -E1000_ERR_PHY (-2). **/ s32 e1000_setup_copper_link_generic(struct e1000_hw *hw) { s32 ret_val; bool link; DEBUGFUNC("e1000_setup_copper_link_generic"); if (hw->mac.autoneg) { /* Setup autoneg and flow control advertisement and perform * autonegotiation. */ ret_val = e1000_copper_link_autoneg(hw); if (ret_val) return ret_val; } else { /* PHY will be set to 10H, 10F, 100H or 100F * depending on user settings. */ DEBUGOUT("Forcing Speed and Duplex\n"); ret_val = hw->phy.ops.force_speed_duplex(hw); if (ret_val) { DEBUGOUT("Error Forcing Speed and Duplex\n"); return ret_val; } } /* Check link status. Wait up to 100 microseconds for link to become * valid. */ ret_val = e1000_phy_has_link_generic(hw, COPPER_LINK_UP_LIMIT, 10, &link); if (ret_val) return ret_val; if (link) { DEBUGOUT("Valid link established!!!\n"); hw->mac.ops.config_collision_dist(hw); ret_val = e1000_config_fc_after_link_up_generic(hw); } else { DEBUGOUT("Unable to establish link!!!\n"); } return ret_val; } /** * e1000_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY * @hw: pointer to the HW structure * * Calls the PHY setup function to force speed and duplex. Clears the * auto-crossover to force MDI manually. Waits for link and returns * successful if link up is successful, else -E1000_ERR_PHY (-2). **/ s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data; bool link; DEBUGFUNC("e1000_phy_force_speed_duplex_igp"); ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); if (ret_val) return ret_val; e1000_phy_force_speed_duplex_setup(hw, &phy_data); ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); if (ret_val) return ret_val; /* Clear Auto-Crossover to force MDI manually. IGP requires MDI * forced whenever speed and duplex are forced. */ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); if (ret_val) return ret_val; phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); if (ret_val) return ret_val; DEBUGOUT1("IGP PSCR: %X\n", phy_data); usec_delay(1); if (phy->autoneg_wait_to_complete) { DEBUGOUT("Waiting for forced speed/duplex link on IGP phy.\n"); ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 100000, &link); if (ret_val) return ret_val; if (!link) DEBUGOUT("Link taking longer than expected.\n"); /* Try once more */ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 100000, &link); } return ret_val; } /** * e1000_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY * @hw: pointer to the HW structure * * Calls the PHY setup function to force speed and duplex. Clears the * auto-crossover to force MDI manually. Resets the PHY to commit the * changes. If time expires while waiting for link up, we reset the DSP. * After reset, TX_CLK and CRS on Tx must be set. Return successful upon * successful completion, else return corresponding error code. **/ s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data; bool link; DEBUGFUNC("e1000_phy_force_speed_duplex_m88"); /* I210 and I211 devices support Auto-Crossover in forced operation. */ if (phy->type != e1000_phy_i210) { /* Clear Auto-Crossover to force MDI manually. M88E1000 * requires MDI forced whenever speed and duplex are forced. */ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); if (ret_val) return ret_val; phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); if (ret_val) return ret_val; + + DEBUGOUT1("M88E1000 PSCR: %X\n", phy_data); } - DEBUGOUT1("M88E1000 PSCR: %X\n", phy_data); - ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); if (ret_val) return ret_val; e1000_phy_force_speed_duplex_setup(hw, &phy_data); ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); if (ret_val) return ret_val; /* Reset the phy to commit changes. */ ret_val = hw->phy.ops.commit(hw); if (ret_val) return ret_val; if (phy->autoneg_wait_to_complete) { DEBUGOUT("Waiting for forced speed/duplex link on M88 phy.\n"); ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 100000, &link); if (ret_val) return ret_val; if (!link) { bool reset_dsp = TRUE; switch (hw->phy.id) { case I347AT4_E_PHY_ID: case M88E1340M_E_PHY_ID: case M88E1112_E_PHY_ID: case M88E1543_E_PHY_ID: case M88E1512_E_PHY_ID: case I210_I_PHY_ID: reset_dsp = FALSE; break; default: if (hw->phy.type != e1000_phy_m88) reset_dsp = FALSE; break; } if (!reset_dsp) { DEBUGOUT("Link taking longer than expected.\n"); } else { /* We didn't get link. * Reset the DSP and cross our fingers. */ ret_val = phy->ops.write_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x001d); if (ret_val) return ret_val; ret_val = e1000_phy_reset_dsp_generic(hw); if (ret_val) return ret_val; } } /* Try once more */ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 100000, &link); if (ret_val) return ret_val; } if (hw->phy.type != e1000_phy_m88) return E1000_SUCCESS; if (hw->phy.id == I347AT4_E_PHY_ID || hw->phy.id == M88E1340M_E_PHY_ID || hw->phy.id == M88E1112_E_PHY_ID) return E1000_SUCCESS; if (hw->phy.id == I210_I_PHY_ID) return E1000_SUCCESS; if ((hw->phy.id == M88E1543_E_PHY_ID) || (hw->phy.id == M88E1512_E_PHY_ID)) return E1000_SUCCESS; ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); if (ret_val) return ret_val; /* Resetting the phy means we need to re-force TX_CLK in the * Extended PHY Specific Control Register to 25MHz clock from * the reset value of 2.5MHz. */ phy_data |= M88E1000_EPSCR_TX_CLK_25; ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); if (ret_val) return ret_val; /* In addition, we must re-enable CRS on Tx for both half and full * duplex. */ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); if (ret_val) return ret_val; phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); return ret_val; } /** * e1000_phy_force_speed_duplex_ife - Force PHY speed & duplex * @hw: pointer to the HW structure * * Forces the speed and duplex settings of the PHY. * This is a function pointer entry point only called by * PHY setup routines. **/ s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; bool link; DEBUGFUNC("e1000_phy_force_speed_duplex_ife"); ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &data); if (ret_val) return ret_val; e1000_phy_force_speed_duplex_setup(hw, &data); ret_val = phy->ops.write_reg(hw, PHY_CONTROL, data); if (ret_val) return ret_val; /* Disable MDI-X support for 10/100 */ ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data); if (ret_val) return ret_val; data &= ~IFE_PMC_AUTO_MDIX; data &= ~IFE_PMC_FORCE_MDIX; ret_val = phy->ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, data); if (ret_val) return ret_val; DEBUGOUT1("IFE PMC: %X\n", data); usec_delay(1); if (phy->autoneg_wait_to_complete) { DEBUGOUT("Waiting for forced speed/duplex link on IFE phy.\n"); ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 100000, &link); if (ret_val) return ret_val; if (!link) DEBUGOUT("Link taking longer than expected.\n"); /* Try once more */ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 100000, &link); if (ret_val) return ret_val; } return E1000_SUCCESS; } /** * e1000_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex * @hw: pointer to the HW structure * @phy_ctrl: pointer to current value of PHY_CONTROL * * Forces speed and duplex on the PHY by doing the following: disable flow * control, force speed/duplex on the MAC, disable auto speed detection, * disable auto-negotiation, configure duplex, configure speed, configure * the collision distance, write configuration to CTRL register. The * caller must write to the PHY_CONTROL register for these settings to * take affect. **/ void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl) { struct e1000_mac_info *mac = &hw->mac; u32 ctrl; DEBUGFUNC("e1000_phy_force_speed_duplex_setup"); /* Turn off flow control when forcing speed/duplex */ hw->fc.current_mode = e1000_fc_none; /* Force speed/duplex on the mac */ ctrl = E1000_READ_REG(hw, E1000_CTRL); ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); ctrl &= ~E1000_CTRL_SPD_SEL; /* Disable Auto Speed Detection */ ctrl &= ~E1000_CTRL_ASDE; /* Disable autoneg on the phy */ *phy_ctrl &= ~MII_CR_AUTO_NEG_EN; /* Forcing Full or Half Duplex? */ if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { ctrl &= ~E1000_CTRL_FD; *phy_ctrl &= ~MII_CR_FULL_DUPLEX; DEBUGOUT("Half Duplex\n"); } else { ctrl |= E1000_CTRL_FD; *phy_ctrl |= MII_CR_FULL_DUPLEX; DEBUGOUT("Full Duplex\n"); } /* Forcing 10mb or 100mb? */ if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) { ctrl |= E1000_CTRL_SPD_100; *phy_ctrl |= MII_CR_SPEED_100; *phy_ctrl &= ~MII_CR_SPEED_1000; DEBUGOUT("Forcing 100mb\n"); } else { ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); DEBUGOUT("Forcing 10mb\n"); } hw->mac.ops.config_collision_dist(hw); E1000_WRITE_REG(hw, E1000_CTRL, ctrl); } /** * e1000_set_d3_lplu_state_generic - Sets low power link up state for D3 * @hw: pointer to the HW structure * @active: boolean used to enable/disable lplu * * Success returns 0, Failure returns 1 * * The low power link up (lplu) state is set to the power management level D3 * and SmartSpeed is disabled when active is TRUE, else clear lplu for D3 * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU * is used during Dx states where the power conservation is most important. * During driver activity, SmartSpeed should be enabled so performance is * maintained. **/ s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; DEBUGFUNC("e1000_set_d3_lplu_state_generic"); if (!hw->phy.ops.read_reg) return E1000_SUCCESS; ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); if (ret_val) return ret_val; if (!active) { data &= ~IGP02E1000_PM_D3_LPLU; ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, data); if (ret_val) return ret_val; /* LPLU and SmartSpeed are mutually exclusive. LPLU is used * during Dx states where the power conservation is most * important. During driver activity we should enable * SmartSpeed, so performance is maintained. */ if (phy->smart_speed == e1000_smart_speed_on) { ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data |= IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) return ret_val; } else if (phy->smart_speed == e1000_smart_speed_off) { ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, data); if (ret_val) return ret_val; } } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { data |= IGP02E1000_PM_D3_LPLU; ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, data); if (ret_val) return ret_val; /* When LPLU is enabled, we should disable SmartSpeed */ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &data); if (ret_val) return ret_val; data &= ~IGP01E1000_PSCFR_SMART_SPEED; ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, data); } return ret_val; } /** * e1000_check_downshift_generic - Checks whether a downshift in speed occurred * @hw: pointer to the HW structure * * Success returns 0, Failure returns 1 * * A downshift is detected by querying the PHY link health. **/ s32 e1000_check_downshift_generic(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data, offset, mask; DEBUGFUNC("e1000_check_downshift_generic"); switch (phy->type) { case e1000_phy_i210: case e1000_phy_m88: case e1000_phy_gg82563: case e1000_phy_bm: case e1000_phy_82578: offset = M88E1000_PHY_SPEC_STATUS; mask = M88E1000_PSSR_DOWNSHIFT; break; case e1000_phy_igp: case e1000_phy_igp_2: case e1000_phy_igp_3: offset = IGP01E1000_PHY_LINK_HEALTH; mask = IGP01E1000_PLHR_SS_DOWNGRADE; break; default: /* speed downshift not supported */ phy->speed_downgraded = FALSE; return E1000_SUCCESS; } ret_val = phy->ops.read_reg(hw, offset, &phy_data); if (!ret_val) phy->speed_downgraded = !!(phy_data & mask); return ret_val; } /** * e1000_check_polarity_m88 - Checks the polarity. * @hw: pointer to the HW structure * * Success returns 0, Failure returns -E1000_ERR_PHY (-2) * * Polarity is determined based on the PHY specific status register. **/ s32 e1000_check_polarity_m88(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; DEBUGFUNC("e1000_check_polarity_m88"); ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &data); if (!ret_val) phy->cable_polarity = ((data & M88E1000_PSSR_REV_POLARITY) ? e1000_rev_polarity_reversed : e1000_rev_polarity_normal); return ret_val; } /** * e1000_check_polarity_igp - Checks the polarity. * @hw: pointer to the HW structure * * Success returns 0, Failure returns -E1000_ERR_PHY (-2) * * Polarity is determined based on the PHY port status register, and the * current speed (since there is no polarity at 100Mbps). **/ s32 e1000_check_polarity_igp(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data, offset, mask; DEBUGFUNC("e1000_check_polarity_igp"); /* Polarity is determined based on the speed of * our connection. */ ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); if (ret_val) return ret_val; if ((data & IGP01E1000_PSSR_SPEED_MASK) == IGP01E1000_PSSR_SPEED_1000MBPS) { offset = IGP01E1000_PHY_PCS_INIT_REG; mask = IGP01E1000_PHY_POLARITY_MASK; } else { /* This really only applies to 10Mbps since * there is no polarity for 100Mbps (always 0). */ offset = IGP01E1000_PHY_PORT_STATUS; mask = IGP01E1000_PSSR_POLARITY_REVERSED; } ret_val = phy->ops.read_reg(hw, offset, &data); if (!ret_val) phy->cable_polarity = ((data & mask) ? e1000_rev_polarity_reversed : e1000_rev_polarity_normal); return ret_val; } /** * e1000_check_polarity_ife - Check cable polarity for IFE PHY * @hw: pointer to the HW structure * * Polarity is determined on the polarity reversal feature being enabled. **/ s32 e1000_check_polarity_ife(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data, offset, mask; DEBUGFUNC("e1000_check_polarity_ife"); /* Polarity is determined based on the reversal feature being enabled. */ if (phy->polarity_correction) { offset = IFE_PHY_EXTENDED_STATUS_CONTROL; mask = IFE_PESC_POLARITY_REVERSED; } else { offset = IFE_PHY_SPECIAL_CONTROL; mask = IFE_PSC_FORCE_POLARITY; } ret_val = phy->ops.read_reg(hw, offset, &phy_data); if (!ret_val) phy->cable_polarity = ((phy_data & mask) ? e1000_rev_polarity_reversed : e1000_rev_polarity_normal); return ret_val; } /** * e1000_wait_autoneg - Wait for auto-neg completion * @hw: pointer to the HW structure * * Waits for auto-negotiation to complete or for the auto-negotiation time * limit to expire, which ever happens first. **/ static s32 e1000_wait_autoneg(struct e1000_hw *hw) { s32 ret_val = E1000_SUCCESS; u16 i, phy_status; DEBUGFUNC("e1000_wait_autoneg"); if (!hw->phy.ops.read_reg) return E1000_SUCCESS; /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); if (ret_val) break; ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); if (ret_val) break; if (phy_status & MII_SR_AUTONEG_COMPLETE) break; msec_delay(100); } /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation * has completed. */ return ret_val; } /** * e1000_phy_has_link_generic - Polls PHY for link * @hw: pointer to the HW structure * @iterations: number of times to poll for link * @usec_interval: delay between polling attempts * @success: pointer to whether polling was successful or not * * Polls the PHY status register for link, 'iterations' number of times. **/ s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, u32 usec_interval, bool *success) { s32 ret_val = E1000_SUCCESS; u16 i, phy_status; DEBUGFUNC("e1000_phy_has_link_generic"); if (!hw->phy.ops.read_reg) return E1000_SUCCESS; for (i = 0; i < iterations; i++) { /* Some PHYs require the PHY_STATUS register to be read * twice due to the link bit being sticky. No harm doing * it across the board. */ ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); if (ret_val) { /* If the first read fails, another entity may have * ownership of the resources, wait and try again to * see if they have relinquished the resources yet. */ if (usec_interval >= 1000) msec_delay(usec_interval/1000); else usec_delay(usec_interval); } ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); if (ret_val) break; if (phy_status & MII_SR_LINK_STATUS) break; if (usec_interval >= 1000) msec_delay(usec_interval/1000); else usec_delay(usec_interval); } *success = (i < iterations); return ret_val; } /** * e1000_get_cable_length_m88 - Determine cable length for m88 PHY * @hw: pointer to the HW structure * * Reads the PHY specific status register to retrieve the cable length * information. The cable length is determined by averaging the minimum and * maximum values to get the "average" cable length. The m88 PHY has four * possible cable length values, which are: * Register Value Cable Length * 0 < 50 meters * 1 50 - 80 meters * 2 80 - 110 meters * 3 110 - 140 meters * 4 > 140 meters **/ s32 e1000_get_cable_length_m88(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data, index; DEBUGFUNC("e1000_get_cable_length_m88"); ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); if (ret_val) return ret_val; index = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >> M88E1000_PSSR_CABLE_LENGTH_SHIFT); if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) return -E1000_ERR_PHY; phy->min_cable_length = e1000_m88_cable_length_table[index]; phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; return E1000_SUCCESS; } s32 e1000_get_cable_length_m88_gen2(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data, phy_data2, is_cm; u16 index, default_page; DEBUGFUNC("e1000_get_cable_length_m88_gen2"); switch (hw->phy.id) { case I210_I_PHY_ID: /* Get cable length from PHY Cable Diagnostics Control Reg */ ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) + (I347AT4_PCDL + phy->addr), &phy_data); if (ret_val) return ret_val; /* Check if the unit of cable length is meters or cm */ ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) + I347AT4_PCDC, &phy_data2); if (ret_val) return ret_val; is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT); /* Populate the phy structure with cable length in meters */ phy->min_cable_length = phy_data / (is_cm ? 100 : 1); phy->max_cable_length = phy_data / (is_cm ? 100 : 1); phy->cable_length = phy_data / (is_cm ? 100 : 1); break; case M88E1543_E_PHY_ID: case M88E1512_E_PHY_ID: case M88E1340M_E_PHY_ID: case I347AT4_E_PHY_ID: /* Remember the original page select and set it to 7 */ ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, &default_page); if (ret_val) return ret_val; ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07); if (ret_val) return ret_val; /* Get cable length from PHY Cable Diagnostics Control Reg */ ret_val = phy->ops.read_reg(hw, (I347AT4_PCDL + phy->addr), &phy_data); if (ret_val) return ret_val; /* Check if the unit of cable length is meters or cm */ ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2); if (ret_val) return ret_val; is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT); /* Populate the phy structure with cable length in meters */ phy->min_cable_length = phy_data / (is_cm ? 100 : 1); phy->max_cable_length = phy_data / (is_cm ? 100 : 1); phy->cable_length = phy_data / (is_cm ? 100 : 1); /* Reset the page select to its original value */ ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, default_page); if (ret_val) return ret_val; break; case M88E1112_E_PHY_ID: /* Remember the original page select and set it to 5 */ ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, &default_page); if (ret_val) return ret_val; ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x05); if (ret_val) return ret_val; ret_val = phy->ops.read_reg(hw, M88E1112_VCT_DSP_DISTANCE, &phy_data); if (ret_val) return ret_val; index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> M88E1000_PSSR_CABLE_LENGTH_SHIFT; if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) return -E1000_ERR_PHY; phy->min_cable_length = e1000_m88_cable_length_table[index]; phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; /* Reset the page select to its original value */ ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, default_page); if (ret_val) return ret_val; break; default: return -E1000_ERR_PHY; } return ret_val; } /** * e1000_get_cable_length_igp_2 - Determine cable length for igp2 PHY * @hw: pointer to the HW structure * * The automatic gain control (agc) normalizes the amplitude of the * received signal, adjusting for the attenuation produced by the * cable. By reading the AGC registers, which represent the * combination of coarse and fine gain value, the value can be put * into a lookup table to obtain the approximate cable length * for each channel. **/ s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data, i, agc_value = 0; u16 cur_agc_index, max_agc_index = 0; u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { IGP02E1000_PHY_AGC_A, IGP02E1000_PHY_AGC_B, IGP02E1000_PHY_AGC_C, IGP02E1000_PHY_AGC_D }; DEBUGFUNC("e1000_get_cable_length_igp_2"); /* Read the AGC registers for all channels */ for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &phy_data); if (ret_val) return ret_val; /* Getting bits 15:9, which represent the combination of * coarse and fine gain values. The result is a number * that can be put into the lookup table to obtain the * approximate cable length. */ cur_agc_index = ((phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & IGP02E1000_AGC_LENGTH_MASK); /* Array index bound check. */ if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) || (cur_agc_index == 0)) return -E1000_ERR_PHY; /* Remove min & max AGC values from calculation. */ if (e1000_igp_2_cable_length_table[min_agc_index] > e1000_igp_2_cable_length_table[cur_agc_index]) min_agc_index = cur_agc_index; if (e1000_igp_2_cable_length_table[max_agc_index] < e1000_igp_2_cable_length_table[cur_agc_index]) max_agc_index = cur_agc_index; agc_value += e1000_igp_2_cable_length_table[cur_agc_index]; } agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] + e1000_igp_2_cable_length_table[max_agc_index]); agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); /* Calculate cable length with the error range of +/- 10 meters. */ phy->min_cable_length = (((agc_value - IGP02E1000_AGC_RANGE) > 0) ? (agc_value - IGP02E1000_AGC_RANGE) : 0); phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE; phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; return E1000_SUCCESS; } /** * e1000_get_phy_info_m88 - Retrieve PHY information * @hw: pointer to the HW structure * * Valid for only copper links. Read the PHY status register (sticky read) * to verify that link is up. Read the PHY special control register to * determine the polarity and 10base-T extended distance. Read the PHY * special status register to determine MDI/MDIx and current speed. If * speed is 1000, then determine cable length, local and remote receiver. **/ s32 e1000_get_phy_info_m88(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data; bool link; DEBUGFUNC("e1000_get_phy_info_m88"); if (phy->media_type != e1000_media_type_copper) { DEBUGOUT("Phy info is only valid for copper media\n"); return -E1000_ERR_CONFIG; } ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); if (ret_val) return ret_val; if (!link) { DEBUGOUT("Phy info is only valid if link is up\n"); return -E1000_ERR_CONFIG; } ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); if (ret_val) return ret_val; phy->polarity_correction = !!(phy_data & M88E1000_PSCR_POLARITY_REVERSAL); ret_val = e1000_check_polarity_m88(hw); if (ret_val) return ret_val; ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); if (ret_val) return ret_val; phy->is_mdix = !!(phy_data & M88E1000_PSSR_MDIX); if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { ret_val = hw->phy.ops.get_cable_length(hw); if (ret_val) return ret_val; ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data); if (ret_val) return ret_val; phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS) ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; } else { /* Set values to "undefined" */ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; phy->local_rx = e1000_1000t_rx_status_undefined; phy->remote_rx = e1000_1000t_rx_status_undefined; } return ret_val; } /** * e1000_get_phy_info_igp - Retrieve igp PHY information * @hw: pointer to the HW structure * * Read PHY status to determine if link is up. If link is up, then * set/determine 10base-T extended distance and polarity correction. Read * PHY port status to determine MDI/MDIx and speed. Based on the speed, * determine on the cable length, local and remote receiver. **/ s32 e1000_get_phy_info_igp(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; bool link; DEBUGFUNC("e1000_get_phy_info_igp"); ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); if (ret_val) return ret_val; if (!link) { DEBUGOUT("Phy info is only valid if link is up\n"); return -E1000_ERR_CONFIG; } phy->polarity_correction = TRUE; ret_val = e1000_check_polarity_igp(hw); if (ret_val) return ret_val; ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); if (ret_val) return ret_val; phy->is_mdix = !!(data & IGP01E1000_PSSR_MDIX); if ((data & IGP01E1000_PSSR_SPEED_MASK) == IGP01E1000_PSSR_SPEED_1000MBPS) { ret_val = phy->ops.get_cable_length(hw); if (ret_val) return ret_val; ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); if (ret_val) return ret_val; phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; } else { phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; phy->local_rx = e1000_1000t_rx_status_undefined; phy->remote_rx = e1000_1000t_rx_status_undefined; } return ret_val; } /** * e1000_get_phy_info_ife - Retrieves various IFE PHY states * @hw: pointer to the HW structure * * Populates "phy" structure with various feature states. **/ s32 e1000_get_phy_info_ife(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; bool link; DEBUGFUNC("e1000_get_phy_info_ife"); ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); if (ret_val) return ret_val; if (!link) { DEBUGOUT("Phy info is only valid if link is up\n"); return -E1000_ERR_CONFIG; } ret_val = phy->ops.read_reg(hw, IFE_PHY_SPECIAL_CONTROL, &data); if (ret_val) return ret_val; phy->polarity_correction = !(data & IFE_PSC_AUTO_POLARITY_DISABLE); if (phy->polarity_correction) { ret_val = e1000_check_polarity_ife(hw); if (ret_val) return ret_val; } else { /* Polarity is forced */ phy->cable_polarity = ((data & IFE_PSC_FORCE_POLARITY) ? e1000_rev_polarity_reversed : e1000_rev_polarity_normal); } ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data); if (ret_val) return ret_val; phy->is_mdix = !!(data & IFE_PMC_MDIX_STATUS); /* The following parameters are undefined for 10/100 operation. */ phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; phy->local_rx = e1000_1000t_rx_status_undefined; phy->remote_rx = e1000_1000t_rx_status_undefined; return E1000_SUCCESS; } /** * e1000_phy_sw_reset_generic - PHY software reset * @hw: pointer to the HW structure * * Does a software reset of the PHY by reading the PHY control register and * setting/write the control register reset bit to the PHY. **/ s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw) { s32 ret_val; u16 phy_ctrl; DEBUGFUNC("e1000_phy_sw_reset_generic"); if (!hw->phy.ops.read_reg) return E1000_SUCCESS; ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); if (ret_val) return ret_val; phy_ctrl |= MII_CR_RESET; ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_ctrl); if (ret_val) return ret_val; usec_delay(1); return ret_val; } /** * e1000_phy_hw_reset_generic - PHY hardware reset * @hw: pointer to the HW structure * * Verify the reset block is not blocking us from resetting. Acquire * semaphore (if necessary) and read/set/write the device control reset * bit in the PHY. Wait the appropriate delay time for the device to * reset and release the semaphore (if necessary). **/ s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u32 ctrl; DEBUGFUNC("e1000_phy_hw_reset_generic"); if (phy->ops.check_reset_block) { ret_val = phy->ops.check_reset_block(hw); if (ret_val) return E1000_SUCCESS; } ret_val = phy->ops.acquire(hw); if (ret_val) return ret_val; ctrl = E1000_READ_REG(hw, E1000_CTRL); E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PHY_RST); E1000_WRITE_FLUSH(hw); usec_delay(phy->reset_delay_us); E1000_WRITE_REG(hw, E1000_CTRL, ctrl); E1000_WRITE_FLUSH(hw); usec_delay(150); phy->ops.release(hw); return phy->ops.get_cfg_done(hw); } /** * e1000_get_cfg_done_generic - Generic configuration done * @hw: pointer to the HW structure * * Generic function to wait 10 milli-seconds for configuration to complete * and return success. **/ s32 e1000_get_cfg_done_generic(struct e1000_hw E1000_UNUSEDARG *hw) { DEBUGFUNC("e1000_get_cfg_done_generic"); msec_delay_irq(10); return E1000_SUCCESS; } /** * e1000_phy_init_script_igp3 - Inits the IGP3 PHY * @hw: pointer to the HW structure * * Initializes a Intel Gigabit PHY3 when an EEPROM is not present. **/ s32 e1000_phy_init_script_igp3(struct e1000_hw *hw) { DEBUGOUT("Running IGP 3 PHY init script\n"); /* PHY init IGP 3 */ /* Enable rise/fall, 10-mode work in class-A */ hw->phy.ops.write_reg(hw, 0x2F5B, 0x9018); /* Remove all caps from Replica path filter */ hw->phy.ops.write_reg(hw, 0x2F52, 0x0000); /* Bias trimming for ADC, AFE and Driver (Default) */ hw->phy.ops.write_reg(hw, 0x2FB1, 0x8B24); /* Increase Hybrid poly bias */ hw->phy.ops.write_reg(hw, 0x2FB2, 0xF8F0); /* Add 4% to Tx amplitude in Gig mode */ hw->phy.ops.write_reg(hw, 0x2010, 0x10B0); /* Disable trimming (TTT) */ hw->phy.ops.write_reg(hw, 0x2011, 0x0000); /* Poly DC correction to 94.6% + 2% for all channels */ hw->phy.ops.write_reg(hw, 0x20DD, 0x249A); /* ABS DC correction to 95.9% */ hw->phy.ops.write_reg(hw, 0x20DE, 0x00D3); /* BG temp curve trim */ hw->phy.ops.write_reg(hw, 0x28B4, 0x04CE); /* Increasing ADC OPAMP stage 1 currents to max */ hw->phy.ops.write_reg(hw, 0x2F70, 0x29E4); /* Force 1000 ( required for enabling PHY regs configuration) */ hw->phy.ops.write_reg(hw, 0x0000, 0x0140); /* Set upd_freq to 6 */ hw->phy.ops.write_reg(hw, 0x1F30, 0x1606); /* Disable NPDFE */ hw->phy.ops.write_reg(hw, 0x1F31, 0xB814); /* Disable adaptive fixed FFE (Default) */ hw->phy.ops.write_reg(hw, 0x1F35, 0x002A); /* Enable FFE hysteresis */ hw->phy.ops.write_reg(hw, 0x1F3E, 0x0067); /* Fixed FFE for short cable lengths */ hw->phy.ops.write_reg(hw, 0x1F54, 0x0065); /* Fixed FFE for medium cable lengths */ hw->phy.ops.write_reg(hw, 0x1F55, 0x002A); /* Fixed FFE for long cable lengths */ hw->phy.ops.write_reg(hw, 0x1F56, 0x002A); /* Enable Adaptive Clip Threshold */ hw->phy.ops.write_reg(hw, 0x1F72, 0x3FB0); /* AHT reset limit to 1 */ hw->phy.ops.write_reg(hw, 0x1F76, 0xC0FF); /* Set AHT master delay to 127 msec */ hw->phy.ops.write_reg(hw, 0x1F77, 0x1DEC); /* Set scan bits for AHT */ hw->phy.ops.write_reg(hw, 0x1F78, 0xF9EF); /* Set AHT Preset bits */ hw->phy.ops.write_reg(hw, 0x1F79, 0x0210); /* Change integ_factor of channel A to 3 */ hw->phy.ops.write_reg(hw, 0x1895, 0x0003); /* Change prop_factor of channels BCD to 8 */ hw->phy.ops.write_reg(hw, 0x1796, 0x0008); /* Change cg_icount + enable integbp for channels BCD */ hw->phy.ops.write_reg(hw, 0x1798, 0xD008); /* Change cg_icount + enable integbp + change prop_factor_master * to 8 for channel A */ hw->phy.ops.write_reg(hw, 0x1898, 0xD918); /* Disable AHT in Slave mode on channel A */ hw->phy.ops.write_reg(hw, 0x187A, 0x0800); /* Enable LPLU and disable AN to 1000 in non-D0a states, * Enable SPD+B2B */ hw->phy.ops.write_reg(hw, 0x0019, 0x008D); /* Enable restart AN on an1000_dis change */ hw->phy.ops.write_reg(hw, 0x001B, 0x2080); /* Enable wh_fifo read clock in 10/100 modes */ hw->phy.ops.write_reg(hw, 0x0014, 0x0045); /* Restart AN, Speed selection is 1000 */ hw->phy.ops.write_reg(hw, 0x0000, 0x1340); return E1000_SUCCESS; } /** * e1000_get_phy_type_from_id - Get PHY type from id * @phy_id: phy_id read from the phy * * Returns the phy type from the id. **/ enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id) { enum e1000_phy_type phy_type = e1000_phy_unknown; switch (phy_id) { case M88E1000_I_PHY_ID: case M88E1000_E_PHY_ID: case M88E1111_I_PHY_ID: case M88E1011_I_PHY_ID: case M88E1543_E_PHY_ID: case M88E1512_E_PHY_ID: case I347AT4_E_PHY_ID: case M88E1112_E_PHY_ID: case M88E1340M_E_PHY_ID: phy_type = e1000_phy_m88; break; case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */ phy_type = e1000_phy_igp_2; break; case GG82563_E_PHY_ID: phy_type = e1000_phy_gg82563; break; case IGP03E1000_E_PHY_ID: phy_type = e1000_phy_igp_3; break; case IFE_E_PHY_ID: case IFE_PLUS_E_PHY_ID: case IFE_C_E_PHY_ID: phy_type = e1000_phy_ife; break; case BME1000_E_PHY_ID: case BME1000_E_PHY_ID_R2: phy_type = e1000_phy_bm; break; case I82578_E_PHY_ID: phy_type = e1000_phy_82578; break; case I82577_E_PHY_ID: phy_type = e1000_phy_82577; break; case I82579_E_PHY_ID: phy_type = e1000_phy_82579; break; case I217_E_PHY_ID: phy_type = e1000_phy_i217; break; case I82580_I_PHY_ID: phy_type = e1000_phy_82580; break; case I210_I_PHY_ID: phy_type = e1000_phy_i210; break; default: phy_type = e1000_phy_unknown; break; } return phy_type; } /** * e1000_determine_phy_address - Determines PHY address. * @hw: pointer to the HW structure * * This uses a trial and error method to loop through possible PHY * addresses. It tests each by reading the PHY ID registers and * checking for a match. **/ s32 e1000_determine_phy_address(struct e1000_hw *hw) { u32 phy_addr = 0; u32 i; enum e1000_phy_type phy_type = e1000_phy_unknown; hw->phy.id = phy_type; for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) { hw->phy.addr = phy_addr; i = 0; do { e1000_get_phy_id(hw); phy_type = e1000_get_phy_type_from_id(hw->phy.id); /* If phy_type is valid, break - we found our * PHY address */ if (phy_type != e1000_phy_unknown) return E1000_SUCCESS; msec_delay(1); i++; } while (i < 10); } return -E1000_ERR_PHY_TYPE; } /** * e1000_get_phy_addr_for_bm_page - Retrieve PHY page address * @page: page to access * * Returns the phy address for the page requested. **/ static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg) { u32 phy_addr = 2; if ((page >= 768) || (page == 0 && reg == 25) || (reg == 31)) phy_addr = 1; return phy_addr; } /** * e1000_write_phy_reg_bm - Write BM PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Acquires semaphore, if necessary, then writes the data to PHY register * at the offset. Release any acquired semaphores before exiting. **/ s32 e1000_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) { s32 ret_val; u32 page = offset >> IGP_PAGE_SHIFT; DEBUGFUNC("e1000_write_phy_reg_bm"); ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; /* Page 800 works differently than the rest so it has its own func */ if (page == BM_WUC_PAGE) { ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, FALSE, false); goto release; } hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); if (offset > MAX_PHY_MULTI_PAGE_REG) { u32 page_shift, page_select; /* Page select is register 31 for phy address 1 and 22 for * phy address 2 and 3. Page select is shifted only for * phy address 1. */ if (hw->phy.addr == 1) { page_shift = IGP_PAGE_SHIFT; page_select = IGP01E1000_PHY_PAGE_SELECT; } else { page_shift = 0; page_select = BM_PHY_PAGE_SELECT; } /* Page is shifted left, PHY expects (page x 32) */ ret_val = e1000_write_phy_reg_mdic(hw, page_select, (page << page_shift)); if (ret_val) goto release; } ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); release: hw->phy.ops.release(hw); return ret_val; } /** * e1000_read_phy_reg_bm - Read BM PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Acquires semaphore, if necessary, then reads the PHY register at offset * and storing the retrieved information in data. Release any acquired * semaphores before exiting. **/ s32 e1000_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) { s32 ret_val; u32 page = offset >> IGP_PAGE_SHIFT; DEBUGFUNC("e1000_read_phy_reg_bm"); ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; /* Page 800 works differently than the rest so it has its own func */ if (page == BM_WUC_PAGE) { ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, TRUE, FALSE); goto release; } hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); if (offset > MAX_PHY_MULTI_PAGE_REG) { u32 page_shift, page_select; /* Page select is register 31 for phy address 1 and 22 for * phy address 2 and 3. Page select is shifted only for * phy address 1. */ if (hw->phy.addr == 1) { page_shift = IGP_PAGE_SHIFT; page_select = IGP01E1000_PHY_PAGE_SELECT; } else { page_shift = 0; page_select = BM_PHY_PAGE_SELECT; } /* Page is shifted left, PHY expects (page x 32) */ ret_val = e1000_write_phy_reg_mdic(hw, page_select, (page << page_shift)); if (ret_val) goto release; } ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); release: hw->phy.ops.release(hw); return ret_val; } /** * e1000_read_phy_reg_bm2 - Read BM PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Acquires semaphore, if necessary, then reads the PHY register at offset * and storing the retrieved information in data. Release any acquired * semaphores before exiting. **/ s32 e1000_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data) { s32 ret_val; u16 page = (u16)(offset >> IGP_PAGE_SHIFT); DEBUGFUNC("e1000_read_phy_reg_bm2"); ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; /* Page 800 works differently than the rest so it has its own func */ if (page == BM_WUC_PAGE) { ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, TRUE, FALSE); goto release; } hw->phy.addr = 1; if (offset > MAX_PHY_MULTI_PAGE_REG) { /* Page is shifted left, PHY expects (page x 32) */ ret_val = e1000_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, page); if (ret_val) goto release; } ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); release: hw->phy.ops.release(hw); return ret_val; } /** * e1000_write_phy_reg_bm2 - Write BM PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Acquires semaphore, if necessary, then writes the data to PHY register * at the offset. Release any acquired semaphores before exiting. **/ s32 e1000_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data) { s32 ret_val; u16 page = (u16)(offset >> IGP_PAGE_SHIFT); DEBUGFUNC("e1000_write_phy_reg_bm2"); ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; /* Page 800 works differently than the rest so it has its own func */ if (page == BM_WUC_PAGE) { ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, FALSE, false); goto release; } hw->phy.addr = 1; if (offset > MAX_PHY_MULTI_PAGE_REG) { /* Page is shifted left, PHY expects (page x 32) */ ret_val = e1000_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, page); if (ret_val) goto release; } ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); release: hw->phy.ops.release(hw); return ret_val; } /** * e1000_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers * @hw: pointer to the HW structure * @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG * * Assumes semaphore already acquired and phy_reg points to a valid memory * address to store contents of the BM_WUC_ENABLE_REG register. **/ s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg) { s32 ret_val; u16 temp; DEBUGFUNC("e1000_enable_phy_wakeup_reg_access_bm"); if (!phy_reg) return -E1000_ERR_PARAM; /* All page select, port ctrl and wakeup registers use phy address 1 */ hw->phy.addr = 1; /* Select Port Control Registers page */ ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); if (ret_val) { DEBUGOUT("Could not set Port Control page\n"); return ret_val; } ret_val = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); if (ret_val) { DEBUGOUT2("Could not read PHY register %d.%d\n", BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); return ret_val; } /* Enable both PHY wakeup mode and Wakeup register page writes. * Prevent a power state change by disabling ME and Host PHY wakeup. */ temp = *phy_reg; temp |= BM_WUC_ENABLE_BIT; temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT); ret_val = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, temp); if (ret_val) { DEBUGOUT2("Could not write PHY register %d.%d\n", BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); return ret_val; } /* Select Host Wakeup Registers page - caller now able to write * registers on the Wakeup registers page */ return e1000_set_page_igp(hw, (BM_WUC_PAGE << IGP_PAGE_SHIFT)); } /** * e1000_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs * @hw: pointer to the HW structure * @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG * * Restore BM_WUC_ENABLE_REG to its original value. * * Assumes semaphore already acquired and *phy_reg is the contents of the * BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by * caller. **/ s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg) { s32 ret_val; DEBUGFUNC("e1000_disable_phy_wakeup_reg_access_bm"); if (!phy_reg) return -E1000_ERR_PARAM; /* Select Port Control Registers page */ ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); if (ret_val) { DEBUGOUT("Could not set Port Control page\n"); return ret_val; } /* Restore 769.17 to its original value */ ret_val = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, *phy_reg); if (ret_val) DEBUGOUT2("Could not restore PHY register %d.%d\n", BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); return ret_val; } /** * e1000_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register * @hw: pointer to the HW structure * @offset: register offset to be read or written * @data: pointer to the data to read or write * @read: determines if operation is read or write * @page_set: BM_WUC_PAGE already set and access enabled * * Read the PHY register at offset and store the retrieved information in * data, or write data to PHY register at offset. Note the procedure to * access the PHY wakeup registers is different than reading the other PHY * registers. It works as such: * 1) Set 769.17.2 (page 769, register 17, bit 2) = 1 * 2) Set page to 800 for host (801 if we were manageability) * 3) Write the address using the address opcode (0x11) * 4) Read or write the data using the data opcode (0x12) * 5) Restore 769.17.2 to its original value * * Steps 1 and 2 are done by e1000_enable_phy_wakeup_reg_access_bm() and * step 5 is done by e1000_disable_phy_wakeup_reg_access_bm(). * * Assumes semaphore is already acquired. When page_set==TRUE, assumes * the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack * is responsible for calls to e1000_[enable|disable]_phy_wakeup_reg_bm()). **/ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data, bool read, bool page_set) { s32 ret_val; - u16 reg, page; + u16 reg = BM_PHY_REG_NUM(offset); + u16 page = BM_PHY_REG_PAGE(offset); u16 phy_reg = 0; DEBUGFUNC("e1000_access_phy_wakeup_reg_bm"); - - reg = BM_PHY_REG_NUM(offset); - page = BM_PHY_REG_PAGE(offset); /* Gig must be disabled for MDIO accesses to Host Wakeup reg page */ if ((hw->mac.type == e1000_pchlan) && (!(E1000_READ_REG(hw, E1000_PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE))) DEBUGOUT1("Attempting to access page %d while gig enabled.\n", page); if (!page_set) { /* Enable access to PHY wakeup registers */ ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg); if (ret_val) { DEBUGOUT("Could not enable PHY wakeup reg access\n"); return ret_val; } } DEBUGOUT2("Accessing PHY page %d reg 0x%x\n", page, reg); /* Write the Wakeup register page offset value using opcode 0x11 */ ret_val = e1000_write_phy_reg_mdic(hw, BM_WUC_ADDRESS_OPCODE, reg); if (ret_val) { DEBUGOUT1("Could not write address opcode to page %d\n", page); return ret_val; } if (read) { /* Read the Wakeup register page value using opcode 0x12 */ ret_val = e1000_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, data); } else { /* Write the Wakeup register page value using opcode 0x12 */ ret_val = e1000_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, *data); } if (ret_val) { DEBUGOUT2("Could not access PHY reg %d.%d\n", page, reg); return ret_val; } if (!page_set) ret_val = e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); return ret_val; } /** * e1000_power_up_phy_copper - Restore copper link in case of PHY power down * @hw: pointer to the HW structure * * In the case of a PHY power down to save power, or to turn off link during a * driver unload, or wake on lan is not enabled, restore the link to previous * settings. **/ void e1000_power_up_phy_copper(struct e1000_hw *hw) { u16 mii_reg = 0; /* The PHY will retain its settings across a power down/up cycle */ hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); mii_reg &= ~MII_CR_POWER_DOWN; hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); } /** * e1000_power_down_phy_copper - Restore copper link in case of PHY power down * @hw: pointer to the HW structure * * In the case of a PHY power down to save power, or to turn off link during a * driver unload, or wake on lan is not enabled, restore the link to previous * settings. **/ void e1000_power_down_phy_copper(struct e1000_hw *hw) { u16 mii_reg = 0; /* The PHY will retain its settings across a power down/up cycle */ hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); mii_reg |= MII_CR_POWER_DOWN; hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); msec_delay(1); } /** * __e1000_read_phy_reg_hv - Read HV PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * @locked: semaphore has already been acquired or not * * Acquires semaphore, if necessary, then reads the PHY register at offset * and stores the retrieved information in data. Release any acquired * semaphore before exiting. **/ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data, bool locked, bool page_set) { s32 ret_val; u16 page = BM_PHY_REG_PAGE(offset); u16 reg = BM_PHY_REG_NUM(offset); u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); DEBUGFUNC("__e1000_read_phy_reg_hv"); if (!locked) { ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; } /* Page 800 works differently than the rest so it has its own func */ if (page == BM_WUC_PAGE) { ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, TRUE, page_set); goto out; } if (page > 0 && page < HV_INTC_FC_PAGE_START) { ret_val = e1000_access_phy_debug_regs_hv(hw, offset, data, TRUE); goto out; } if (!page_set) { if (page == HV_INTC_FC_PAGE_START) page = 0; if (reg > MAX_PHY_MULTI_PAGE_REG) { /* Page is shifted left, PHY expects (page x 32) */ ret_val = e1000_set_page_igp(hw, (page << IGP_PAGE_SHIFT)); hw->phy.addr = phy_addr; if (ret_val) goto out; } } DEBUGOUT3("reading PHY page %d (or 0x%x shifted) reg 0x%x\n", page, page << IGP_PAGE_SHIFT, reg); ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, data); out: if (!locked) hw->phy.ops.release(hw); return ret_val; } /** * e1000_read_phy_reg_hv - Read HV PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Acquires semaphore then reads the PHY register at offset and stores * the retrieved information in data. Release the acquired semaphore * before exiting. **/ s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data) { return __e1000_read_phy_reg_hv(hw, offset, data, FALSE, false); } /** * e1000_read_phy_reg_hv_locked - Read HV PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data * * Reads the PHY register at offset and stores the retrieved information * in data. Assumes semaphore already acquired. **/ s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data) { return __e1000_read_phy_reg_hv(hw, offset, data, TRUE, FALSE); } /** * e1000_read_phy_reg_page_hv - Read HV PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Reads the PHY register at offset and stores the retrieved information * in data. Assumes semaphore already acquired and page already set. **/ s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data) { return __e1000_read_phy_reg_hv(hw, offset, data, TRUE, true); } /** * __e1000_write_phy_reg_hv - Write HV PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * @locked: semaphore has already been acquired or not * * Acquires semaphore, if necessary, then writes the data to PHY register * at the offset. Release any acquired semaphores before exiting. **/ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data, bool locked, bool page_set) { s32 ret_val; u16 page = BM_PHY_REG_PAGE(offset); u16 reg = BM_PHY_REG_NUM(offset); u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); DEBUGFUNC("__e1000_write_phy_reg_hv"); if (!locked) { ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; } /* Page 800 works differently than the rest so it has its own func */ if (page == BM_WUC_PAGE) { ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, FALSE, page_set); goto out; } if (page > 0 && page < HV_INTC_FC_PAGE_START) { ret_val = e1000_access_phy_debug_regs_hv(hw, offset, &data, FALSE); goto out; } if (!page_set) { if (page == HV_INTC_FC_PAGE_START) page = 0; /* Workaround MDIO accesses being disabled after entering IEEE * Power Down (when bit 11 of the PHY Control register is set) */ if ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision >= 1) && (hw->phy.addr == 2) && !(MAX_PHY_REG_ADDRESS & reg) && (data & (1 << 11))) { u16 data2 = 0x7EFF; ret_val = e1000_access_phy_debug_regs_hv(hw, (1 << 6) | 0x3, &data2, FALSE); if (ret_val) goto out; } if (reg > MAX_PHY_MULTI_PAGE_REG) { /* Page is shifted left, PHY expects (page x 32) */ ret_val = e1000_set_page_igp(hw, (page << IGP_PAGE_SHIFT)); hw->phy.addr = phy_addr; if (ret_val) goto out; } } DEBUGOUT3("writing PHY page %d (or 0x%x shifted) reg 0x%x\n", page, page << IGP_PAGE_SHIFT, reg); ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, data); out: if (!locked) hw->phy.ops.release(hw); return ret_val; } /** * e1000_write_phy_reg_hv - Write HV PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Acquires semaphore then writes the data to PHY register at the offset. * Release the acquired semaphores before exiting. **/ s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data) { return __e1000_write_phy_reg_hv(hw, offset, data, FALSE, false); } /** * e1000_write_phy_reg_hv_locked - Write HV PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Writes the data to PHY register at the offset. Assumes semaphore * already acquired. **/ s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data) { return __e1000_write_phy_reg_hv(hw, offset, data, TRUE, FALSE); } /** * e1000_write_phy_reg_page_hv - Write HV PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Writes the data to PHY register at the offset. Assumes semaphore * already acquired and page already set. **/ s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 data) { return __e1000_write_phy_reg_hv(hw, offset, data, TRUE, true); } /** * e1000_get_phy_addr_for_hv_page - Get PHY adrress based on page * @page: page to be accessed **/ static u32 e1000_get_phy_addr_for_hv_page(u32 page) { u32 phy_addr = 2; if (page >= HV_INTC_FC_PAGE_START) phy_addr = 1; return phy_addr; } /** * e1000_access_phy_debug_regs_hv - Read HV PHY vendor specific high registers * @hw: pointer to the HW structure * @offset: register offset to be read or written * @data: pointer to the data to be read or written * @read: determines if operation is read or write * * Reads the PHY register at offset and stores the retreived information * in data. Assumes semaphore already acquired. Note that the procedure * to access these regs uses the address port and data port to read/write. * These accesses done with PHY address 2 and without using pages. **/ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, u16 *data, bool read) { s32 ret_val; u32 addr_reg; u32 data_reg; DEBUGFUNC("e1000_access_phy_debug_regs_hv"); /* This takes care of the difference with desktop vs mobile phy */ addr_reg = ((hw->phy.type == e1000_phy_82578) ? I82578_ADDR_REG : I82577_ADDR_REG); data_reg = addr_reg + 1; /* All operations in this function are phy address 2 */ hw->phy.addr = 2; /* masking with 0x3F to remove the page from offset */ ret_val = e1000_write_phy_reg_mdic(hw, addr_reg, (u16)offset & 0x3F); if (ret_val) { DEBUGOUT("Could not write the Address Offset port register\n"); return ret_val; } /* Read or write the data value next */ if (read) ret_val = e1000_read_phy_reg_mdic(hw, data_reg, data); else ret_val = e1000_write_phy_reg_mdic(hw, data_reg, *data); if (ret_val) DEBUGOUT("Could not access the Data port register\n"); return ret_val; } /** * e1000_link_stall_workaround_hv - Si workaround * @hw: pointer to the HW structure * * This function works around a Si bug where the link partner can get * a link up indication before the PHY does. If small packets are sent * by the link partner they can be placed in the packet buffer without * being properly accounted for by the PHY and will stall preventing * further packets from being received. The workaround is to clear the * packet buffer after the PHY detects link up. **/ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw) { s32 ret_val = E1000_SUCCESS; u16 data; DEBUGFUNC("e1000_link_stall_workaround_hv"); if (hw->phy.type != e1000_phy_82578) return E1000_SUCCESS; /* Do not apply workaround if in PHY loopback bit 14 set */ hw->phy.ops.read_reg(hw, PHY_CONTROL, &data); if (data & PHY_CONTROL_LB) return E1000_SUCCESS; /* check if link is up and at 1Gbps */ ret_val = hw->phy.ops.read_reg(hw, BM_CS_STATUS, &data); if (ret_val) return ret_val; data &= (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | BM_CS_STATUS_SPEED_MASK); if (data != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | BM_CS_STATUS_SPEED_1000)) return E1000_SUCCESS; msec_delay(200); /* flush the packets in the fifo buffer */ ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL, (HV_MUX_DATA_CTRL_GEN_TO_MAC | HV_MUX_DATA_CTRL_FORCE_SPEED)); if (ret_val) return ret_val; return hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC); } /** * e1000_check_polarity_82577 - Checks the polarity. * @hw: pointer to the HW structure * * Success returns 0, Failure returns -E1000_ERR_PHY (-2) * * Polarity is determined based on the PHY specific status register. **/ s32 e1000_check_polarity_82577(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; DEBUGFUNC("e1000_check_polarity_82577"); ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); if (!ret_val) phy->cable_polarity = ((data & I82577_PHY_STATUS2_REV_POLARITY) ? e1000_rev_polarity_reversed : e1000_rev_polarity_normal); return ret_val; } /** * e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY * @hw: pointer to the HW structure * * Calls the PHY setup function to force speed and duplex. **/ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data; bool link; DEBUGFUNC("e1000_phy_force_speed_duplex_82577"); ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); if (ret_val) return ret_val; e1000_phy_force_speed_duplex_setup(hw, &phy_data); ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); if (ret_val) return ret_val; usec_delay(1); if (phy->autoneg_wait_to_complete) { DEBUGOUT("Waiting for forced speed/duplex link on 82577 phy\n"); ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 100000, &link); if (ret_val) return ret_val; if (!link) DEBUGOUT("Link taking longer than expected.\n"); /* Try once more */ ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 100000, &link); } return ret_val; } /** * e1000_get_phy_info_82577 - Retrieve I82577 PHY information * @hw: pointer to the HW structure * * Read PHY status to determine if link is up. If link is up, then * set/determine 10base-T extended distance and polarity correction. Read * PHY port status to determine MDI/MDIx and speed. Based on the speed, * determine on the cable length, local and remote receiver. **/ s32 e1000_get_phy_info_82577(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 data; bool link; DEBUGFUNC("e1000_get_phy_info_82577"); ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); if (ret_val) return ret_val; if (!link) { DEBUGOUT("Phy info is only valid if link is up\n"); return -E1000_ERR_CONFIG; } phy->polarity_correction = TRUE; ret_val = e1000_check_polarity_82577(hw); if (ret_val) return ret_val; ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); if (ret_val) return ret_val; phy->is_mdix = !!(data & I82577_PHY_STATUS2_MDIX); if ((data & I82577_PHY_STATUS2_SPEED_MASK) == I82577_PHY_STATUS2_SPEED_1000MBPS) { ret_val = hw->phy.ops.get_cable_length(hw); if (ret_val) return ret_val; ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); if (ret_val) return ret_val; phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; } else { phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; phy->local_rx = e1000_1000t_rx_status_undefined; phy->remote_rx = e1000_1000t_rx_status_undefined; } return E1000_SUCCESS; } /** * e1000_get_cable_length_82577 - Determine cable length for 82577 PHY * @hw: pointer to the HW structure * * Reads the diagnostic status register and verifies result is valid before * placing it in the phy_cable_length field. **/ s32 e1000_get_cable_length_82577(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; s32 ret_val; u16 phy_data, length; DEBUGFUNC("e1000_get_cable_length_82577"); ret_val = phy->ops.read_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data); if (ret_val) return ret_val; length = ((phy_data & I82577_DSTATUS_CABLE_LENGTH) >> I82577_DSTATUS_CABLE_LENGTH_SHIFT); if (length == E1000_CABLE_LENGTH_UNDEFINED) return -E1000_ERR_PHY; phy->cable_length = length; return E1000_SUCCESS; } /** * e1000_write_phy_reg_gs40g - Write GS40G PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset * * Acquires semaphore, if necessary, then writes the data to PHY register * at the offset. Release any acquired semaphores before exiting. **/ s32 e1000_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data) { s32 ret_val; u16 page = offset >> GS40G_PAGE_SHIFT; DEBUGFUNC("e1000_write_phy_reg_gs40g"); offset = offset & GS40G_OFFSET_MASK; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; ret_val = e1000_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page); if (ret_val) goto release; ret_val = e1000_write_phy_reg_mdic(hw, offset, data); release: hw->phy.ops.release(hw); return ret_val; } /** * e1000_read_phy_reg_gs40g - Read GS40G PHY register * @hw: pointer to the HW structure * @offset: lower half is register offset to read to * upper half is page to use. * @data: data to read at register offset * * Acquires semaphore, if necessary, then reads the data in the PHY register * at the offset. Release any acquired semaphores before exiting. **/ s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data) { s32 ret_val; u16 page = offset >> GS40G_PAGE_SHIFT; DEBUGFUNC("e1000_read_phy_reg_gs40g"); offset = offset & GS40G_OFFSET_MASK; ret_val = hw->phy.ops.acquire(hw); if (ret_val) return ret_val; ret_val = e1000_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page); if (ret_val) goto release; ret_val = e1000_read_phy_reg_mdic(hw, offset, data); release: hw->phy.ops.release(hw); return ret_val; } /** * e1000_read_phy_reg_mphy - Read mPHY control register * @hw: pointer to the HW structure * @address: address to be read * @data: pointer to the read data * * Reads the mPHY control register in the PHY at offset and stores the * information read to data. **/ s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data) { u32 mphy_ctrl = 0; bool locked = FALSE; bool ready; DEBUGFUNC("e1000_read_phy_reg_mphy"); /* Check if mPHY is ready to read/write operations */ ready = e1000_is_mphy_ready(hw); if (!ready) return -E1000_ERR_PHY; /* Check if mPHY access is disabled and enable it if so */ mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL); if (mphy_ctrl & E1000_MPHY_DIS_ACCESS) { locked = TRUE; ready = e1000_is_mphy_ready(hw); if (!ready) return -E1000_ERR_PHY; mphy_ctrl |= E1000_MPHY_ENA_ACCESS; E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl); } /* Set the address that we want to read */ ready = e1000_is_mphy_ready(hw); if (!ready) return -E1000_ERR_PHY; /* We mask address, because we want to use only current lane */ mphy_ctrl = (mphy_ctrl & ~E1000_MPHY_ADDRESS_MASK & ~E1000_MPHY_ADDRESS_FNC_OVERRIDE) | (address & E1000_MPHY_ADDRESS_MASK); E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl); /* Read data from the address */ ready = e1000_is_mphy_ready(hw); if (!ready) return -E1000_ERR_PHY; *data = E1000_READ_REG(hw, E1000_MPHY_DATA); /* Disable access to mPHY if it was originally disabled */ if (locked) ready = e1000_is_mphy_ready(hw); if (!ready) return -E1000_ERR_PHY; E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, E1000_MPHY_DIS_ACCESS); return E1000_SUCCESS; } /** * e1000_write_phy_reg_mphy - Write mPHY control register * @hw: pointer to the HW structure * @address: address to write to * @data: data to write to register at offset * @line_override: used when we want to use different line than default one * * Writes data to mPHY control register. **/ s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data, bool line_override) { u32 mphy_ctrl = 0; bool locked = FALSE; bool ready; DEBUGFUNC("e1000_write_phy_reg_mphy"); /* Check if mPHY is ready to read/write operations */ ready = e1000_is_mphy_ready(hw); if (!ready) return -E1000_ERR_PHY; /* Check if mPHY access is disabled and enable it if so */ mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL); if (mphy_ctrl & E1000_MPHY_DIS_ACCESS) { locked = TRUE; ready = e1000_is_mphy_ready(hw); if (!ready) return -E1000_ERR_PHY; mphy_ctrl |= E1000_MPHY_ENA_ACCESS; E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl); } /* Set the address that we want to read */ ready = e1000_is_mphy_ready(hw); if (!ready) return -E1000_ERR_PHY; /* We mask address, because we want to use only current lane */ if (line_override) mphy_ctrl |= E1000_MPHY_ADDRESS_FNC_OVERRIDE; else mphy_ctrl &= ~E1000_MPHY_ADDRESS_FNC_OVERRIDE; mphy_ctrl = (mphy_ctrl & ~E1000_MPHY_ADDRESS_MASK) | (address & E1000_MPHY_ADDRESS_MASK); E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl); /* Read data from the address */ ready = e1000_is_mphy_ready(hw); if (!ready) return -E1000_ERR_PHY; E1000_WRITE_REG(hw, E1000_MPHY_DATA, data); /* Disable access to mPHY if it was originally disabled */ if (locked) ready = e1000_is_mphy_ready(hw); if (!ready) return -E1000_ERR_PHY; E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, E1000_MPHY_DIS_ACCESS); return E1000_SUCCESS; } /** * e1000_is_mphy_ready - Check if mPHY control register is not busy * @hw: pointer to the HW structure * * Returns mPHY control register status. **/ bool e1000_is_mphy_ready(struct e1000_hw *hw) { u16 retry_count = 0; u32 mphy_ctrl = 0; bool ready = FALSE; while (retry_count < 2) { mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL); if (mphy_ctrl & E1000_MPHY_BUSY) { usec_delay(20); retry_count++; continue; } ready = TRUE; break; } if (!ready) DEBUGOUT("ERROR READING mPHY control register, phy is busy.\n"); return ready; } Index: stable/10/sys/dev/e1000/e1000_regs.h =================================================================== --- stable/10/sys/dev/e1000/e1000_regs.h (revision 296054) +++ stable/10/sys/dev/e1000/e1000_regs.h (revision 296055) @@ -1,686 +1,695 @@ /****************************************************************************** Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _E1000_REGS_H_ #define _E1000_REGS_H_ #define E1000_CTRL 0x00000 /* Device Control - RW */ #define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */ #define E1000_STATUS 0x00008 /* Device Status - RO */ #define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ #define E1000_EERD 0x00014 /* EEPROM Read - RW */ #define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ #define E1000_FLA 0x0001C /* Flash Access - RW */ #define E1000_MDIC 0x00020 /* MDI Control - RW */ #define E1000_MDICNFG 0x00E04 /* MDI Config - RW */ #define E1000_REGISTER_SET_SIZE 0x20000 /* CSR Size */ #define E1000_EEPROM_INIT_CTRL_WORD_2 0x0F /* EEPROM Init Ctrl Word 2 */ #define E1000_EEPROM_PCIE_CTRL_WORD_2 0x28 /* EEPROM PCIe Ctrl Word 2 */ #define E1000_BARCTRL 0x5BBC /* BAR ctrl reg */ #define E1000_BARCTRL_FLSIZE 0x0700 /* BAR ctrl Flsize */ #define E1000_BARCTRL_CSRSIZE 0x2000 /* BAR ctrl CSR size */ #define E1000_MPHY_ADDR_CTRL 0x0024 /* GbE MPHY Address Control */ #define E1000_MPHY_DATA 0x0E10 /* GBE MPHY Data */ #define E1000_MPHY_STAT 0x0E0C /* GBE MPHY Statistics */ #define E1000_PPHY_CTRL 0x5b48 /* PCIe PHY Control */ #define E1000_I350_BARCTRL 0x5BFC /* BAR ctrl reg */ #define E1000_I350_DTXMXPKTSZ 0x355C /* Maximum sent packet size reg*/ #define E1000_SCTL 0x00024 /* SerDes Control - RW */ #define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ #define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ #define E1000_FEXT 0x0002C /* Future Extended - RW */ #define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */ #define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */ #define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */ #define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */ #define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */ +#define E1000_FEXTNVM9 0x5BB4 /* Future Extended NVM 9 - RW */ +#define E1000_FEXTNVM11 0x5BBC /* Future Extended NVM 11 - RW */ +#define E1000_PCIEANACFG 0x00F18 /* PCIE Analog Config */ #define E1000_FCT 0x00030 /* Flow Control Type - RW */ #define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ #define E1000_VET 0x00038 /* VLAN Ether Type - RW */ #define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ #define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ #define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ #define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ #define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ #define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ #define E1000_IVAR 0x000E4 /* Interrupt Vector Allocation Register - RW */ #define E1000_SVCR 0x000F0 #define E1000_SVT 0x000F4 #define E1000_LPIC 0x000FC /* Low Power IDLE control */ #define E1000_RCTL 0x00100 /* Rx Control - RW */ #define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ #define E1000_TXCW 0x00178 /* Tx Configuration Word - RW */ #define E1000_RXCW 0x00180 /* Rx Configuration Word - RO */ #define E1000_PBA_ECC 0x01100 /* PBA ECC Register */ #define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ #define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) #define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ #define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ #define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ #define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ #define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ #define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */ #define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ #define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ #define E1000_TCTL 0x00400 /* Tx Control - RW */ #define E1000_TCTL_EXT 0x00404 /* Extended Tx Control - RW */ #define E1000_TIPG 0x00410 /* Tx Inter-packet gap -RW */ #define E1000_TBT 0x00448 /* Tx Burst Timer - RW */ #define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ #define E1000_LEDCTL 0x00E00 /* LED Control - RW */ #define E1000_LEDMUX 0x08130 /* LED MUX Control */ #define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ #define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ #define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */ #define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */ #define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ #define E1000_PBS 0x01008 /* Packet Buffer Size */ #define E1000_PBECCSTS 0x0100C /* Packet Buffer ECC Status - RW */ +#define E1000_IOSFPC 0x00F28 /* TX corrupted data */ #define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_EEMNGCTL_I210 0x01010 /* i210 MNG EEprom Mode Control */ #define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */ #define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */ #define E1000_FLASHT 0x01028 /* FLASH Timer Register */ #define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ #define E1000_FLSWCTL 0x01030 /* FLASH control register */ #define E1000_FLSWDATA 0x01034 /* FLASH data register */ #define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */ #define E1000_FLOP 0x0103C /* FLASH Opcode Register */ #define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */ #define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */ #define E1000_I2CBB_EN 0x00000100 /* I2C - Bit Bang Enable */ #define E1000_I2C_CLK_OUT 0x00000200 /* I2C- Clock */ #define E1000_I2C_DATA_OUT 0x00000400 /* I2C- Data Out */ #define E1000_I2C_DATA_OE_N 0x00000800 /* I2C- Data Output Enable */ #define E1000_I2C_DATA_IN 0x00001000 /* I2C- Data In */ #define E1000_I2C_CLK_OE_N 0x00002000 /* I2C- Clock Output Enable */ #define E1000_I2C_CLK_IN 0x00004000 /* I2C- Clock In */ #define E1000_I2C_CLK_STRETCH_DIS 0x00008000 /* I2C- Dis Clk Stretching */ #define E1000_WDSTP 0x01040 /* Watchdog Setup - RW */ #define E1000_SWDSTS 0x01044 /* SW Device Status - RW */ #define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */ #define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */ #define E1000_VPDDIAG 0x01060 /* VPD Diagnostic - RO */ #define E1000_ICR_V2 0x01500 /* Intr Cause - new location - RC */ #define E1000_ICS_V2 0x01504 /* Intr Cause Set - new location - WO */ #define E1000_IMS_V2 0x01508 /* Intr Mask Set/Read - new location - RW */ #define E1000_IMC_V2 0x0150C /* Intr Mask Clear - new location - WO */ #define E1000_IAM_V2 0x01510 /* Intr Ack Auto Mask - new location - RW */ #define E1000_ERT 0x02008 /* Early Rx Threshold - RW */ #define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ #define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ #define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ #define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */ #define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */ #define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */ #define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */ #define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */ #define E1000_PBRTH 0x02458 /* PB Rx Arbitration Threshold - RW */ #define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */ /* Split and Replication Rx Control - RW */ #define E1000_RDPUMB 0x025CC /* DMA Rx Descriptor uC Mailbox - RW */ #define E1000_RDPUAD 0x025D0 /* DMA Rx Descriptor uC Addr Command - RW */ #define E1000_RDPUWD 0x025D4 /* DMA Rx Descriptor uC Data Write - RW */ #define E1000_RDPURD 0x025D8 /* DMA Rx Descriptor uC Data Read - RW */ #define E1000_RDPUCTL 0x025DC /* DMA Rx Descriptor uC Control - RW */ #define E1000_PBDIAG 0x02458 /* Packet Buffer Diagnostic - RW */ #define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ #define E1000_IRPBS 0x02404 /* Same as RXPBS, renamed for newer Si - RW */ #define E1000_PBRWAC 0x024E8 /* Rx packet buffer wrap around counter - RO */ #define E1000_RDTR 0x02820 /* Rx Delay Timer - RW */ #define E1000_RADV 0x0282C /* Rx Interrupt Absolute Delay Timer - RW */ #define E1000_EMIADD 0x10 /* Extended Memory Indirect Address */ #define E1000_EMIDATA 0x11 /* Extended Memory Indirect Data */ #define E1000_SRWR 0x12018 /* Shadow Ram Write Register - RW */ #define E1000_I210_FLMNGCTL 0x12038 #define E1000_I210_FLMNGDATA 0x1203C #define E1000_I210_FLMNGCNT 0x12040 #define E1000_I210_FLSWCTL 0x12048 #define E1000_I210_FLSWDATA 0x1204C #define E1000_I210_FLSWCNT 0x12050 #define E1000_I210_FLA 0x1201C #define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n)) #define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */ /* QAV Tx mode control register */ #define E1000_I210_TQAVCTRL 0x3570 /* QAV Tx mode control register bitfields masks */ /* QAV enable */ #define E1000_TQAVCTRL_MODE (1 << 0) /* Fetching arbitration type */ #define E1000_TQAVCTRL_FETCH_ARB (1 << 4) /* Fetching timer enable */ #define E1000_TQAVCTRL_FETCH_TIMER_ENABLE (1 << 5) /* Launch arbitration type */ #define E1000_TQAVCTRL_LAUNCH_ARB (1 << 8) /* Launch timer enable */ #define E1000_TQAVCTRL_LAUNCH_TIMER_ENABLE (1 << 9) /* SP waits for SR enable */ #define E1000_TQAVCTRL_SP_WAIT_SR (1 << 10) /* Fetching timer correction */ #define E1000_TQAVCTRL_FETCH_TIMER_DELTA_OFFSET 16 #define E1000_TQAVCTRL_FETCH_TIMER_DELTA \ (0xFFFF << E1000_TQAVCTRL_FETCH_TIMER_DELTA_OFFSET) /* High credit registers where _n can be 0 or 1. */ #define E1000_I210_TQAVHC(_n) (0x300C + 0x40 * (_n)) /* Queues fetch arbitration priority control register */ #define E1000_I210_TQAVARBCTRL 0x3574 /* Queues priority masks where _n and _p can be 0-3. */ #define E1000_TQAVARBCTRL_QUEUE_PRI(_n, _p) ((_p) << (2 * (_n))) /* QAV Tx mode control registers where _n can be 0 or 1. */ #define E1000_I210_TQAVCC(_n) (0x3004 + 0x40 * (_n)) /* QAV Tx mode control register bitfields masks */ #define E1000_TQAVCC_IDLE_SLOPE 0xFFFF /* Idle slope */ #define E1000_TQAVCC_KEEP_CREDITS (1 << 30) /* Keep credits opt enable */ #define E1000_TQAVCC_QUEUE_MODE (1 << 31) /* SP vs. SR Tx mode */ /* Good transmitted packets counter registers */ #define E1000_PQGPTC(_n) (0x010014 + (0x100 * (_n))) /* Queues packet buffer size masks where _n can be 0-3 and _s 0-63 [kB] */ #define E1000_I210_TXPBS_SIZE(_n, _s) ((_s) << (6 * (_n))) #define E1000_MMDAC 13 /* MMD Access Control */ #define E1000_MMDAAD 14 /* MMD Access Address/Data */ /* Convenience macros * * Note: "_n" is the queue number of the register to be written to. * * Example usage: * E1000_RDBAL_REG(current_rx_queue) */ #define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \ (0x0C000 + ((_n) * 0x40))) #define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \ (0x0C004 + ((_n) * 0x40))) #define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \ (0x0C008 + ((_n) * 0x40))) #define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \ (0x0C00C + ((_n) * 0x40))) #define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \ (0x0C010 + ((_n) * 0x40))) #define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \ (0x0C014 + ((_n) * 0x40))) #define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n) #define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \ (0x0C018 + ((_n) * 0x40))) #define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \ (0x0C028 + ((_n) * 0x40))) #define E1000_RQDPC(_n) ((_n) < 4 ? (0x02830 + ((_n) * 0x100)) : \ (0x0C030 + ((_n) * 0x40))) #define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \ (0x0E000 + ((_n) * 0x40))) #define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \ (0x0E004 + ((_n) * 0x40))) #define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \ (0x0E008 + ((_n) * 0x40))) #define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \ (0x0E010 + ((_n) * 0x40))) #define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \ (0x0E014 + ((_n) * 0x40))) #define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n) #define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \ (0x0E018 + ((_n) * 0x40))) #define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \ (0x0E028 + ((_n) * 0x40))) #define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) : \ (0x0E038 + ((_n) * 0x40))) #define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) : \ (0x0E03C + ((_n) * 0x40))) #define E1000_TARC(_n) (0x03840 + ((_n) * 0x100)) #define E1000_RSRPD 0x02C00 /* Rx Small Packet Detect - RW */ #define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ #define E1000_TXDMAC 0x03000 /* Tx DMA Control - RW */ #define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */ #define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4)) #define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ (0x054E0 + ((_i - 16) * 8))) #define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ (0x054E4 + ((_i - 16) * 8))) #define E1000_SHRAL(_i) (0x05438 + ((_i) * 8)) #define E1000_SHRAH(_i) (0x0543C + ((_i) * 8)) #define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8)) #define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4)) #define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) #define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8)) #define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8)) #define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8)) #define E1000_PBSLAC 0x03100 /* Pkt Buffer Slave Access Control */ #define E1000_PBSLAD(_n) (0x03110 + (0x4 * (_n))) /* Pkt Buffer DWORD */ #define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */ /* Same as TXPBS, renamed for newer Si - RW */ #define E1000_ITPBS 0x03404 #define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ #define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ #define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ #define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ #define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ #define E1000_TDPUMB 0x0357C /* DMA Tx Desc uC Mail Box - RW */ #define E1000_TDPUAD 0x03580 /* DMA Tx Desc uC Addr Command - RW */ #define E1000_TDPUWD 0x03584 /* DMA Tx Desc uC Data Write - RW */ #define E1000_TDPURD 0x03588 /* DMA Tx Desc uC Data Read - RW */ #define E1000_TDPUCTL 0x0358C /* DMA Tx Desc uC Control - RW */ #define E1000_DTXCTL 0x03590 /* DMA Tx Control - RW */ #define E1000_DTXTCPFLGL 0x0359C /* DMA Tx Control flag low - RW */ #define E1000_DTXTCPFLGH 0x035A0 /* DMA Tx Control flag high - RW */ /* DMA Tx Max Total Allow Size Reqs - RW */ #define E1000_DTXMXSZRQ 0x03540 #define E1000_TIDV 0x03820 /* Tx Interrupt Delay Value - RW */ #define E1000_TADV 0x0382C /* Tx Interrupt Absolute Delay Val - RW */ #define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */ #define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ #define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ #define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ #define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ #define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ #define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ #define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ #define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ #define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ #define E1000_COLC 0x04028 /* Collision Count - R/clr */ #define E1000_DC 0x04030 /* Defer Count - R/clr */ #define E1000_TNCRS 0x04034 /* Tx-No CRS - R/clr */ #define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ #define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ #define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ #define E1000_XONRXC 0x04048 /* XON Rx Count - R/clr */ #define E1000_XONTXC 0x0404C /* XON Tx Count - R/clr */ #define E1000_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */ #define E1000_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */ #define E1000_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */ #define E1000_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */ #define E1000_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */ #define E1000_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */ #define E1000_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */ #define E1000_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */ #define E1000_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */ #define E1000_GPRC 0x04074 /* Good Packets Rx Count - R/clr */ #define E1000_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */ #define E1000_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */ #define E1000_GPTC 0x04080 /* Good Packets Tx Count - R/clr */ #define E1000_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */ #define E1000_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */ #define E1000_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */ #define E1000_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */ #define E1000_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */ #define E1000_RUC 0x040A4 /* Rx Undersize Count - R/clr */ #define E1000_RFC 0x040A8 /* Rx Fragment Count - R/clr */ #define E1000_ROC 0x040AC /* Rx Oversize Count - R/clr */ #define E1000_RJC 0x040B0 /* Rx Jabber Count - R/clr */ #define E1000_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */ #define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ #define E1000_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */ #define E1000_TORL 0x040C0 /* Total Octets Rx Low - R/clr */ #define E1000_TORH 0x040C4 /* Total Octets Rx High - R/clr */ #define E1000_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */ #define E1000_TOTH 0x040CC /* Total Octets Tx High - R/clr */ #define E1000_TPR 0x040D0 /* Total Packets Rx - R/clr */ #define E1000_TPT 0x040D4 /* Total Packets Tx - R/clr */ #define E1000_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */ #define E1000_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */ #define E1000_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */ #define E1000_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */ #define E1000_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */ #define E1000_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */ #define E1000_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */ #define E1000_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */ #define E1000_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */ #define E1000_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */ #define E1000_IAC 0x04100 /* Interrupt Assertion Count */ #define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Pkt Timer Expire Count */ #define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Abs Timer Expire Count */ #define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */ #define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */ #define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ #define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */ #define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Desc Min Thresh Count */ #define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ #define E1000_CRC_OFFSET 0x05F50 /* CRC Offset register */ #define E1000_VFGPRC 0x00F10 #define E1000_VFGORC 0x00F18 #define E1000_VFMPRC 0x00F3C #define E1000_VFGPTC 0x00F14 #define E1000_VFGOTC 0x00F34 #define E1000_VFGOTLBC 0x00F50 #define E1000_VFGPTLBC 0x00F44 #define E1000_VFGORLBC 0x00F48 #define E1000_VFGPRLBC 0x00F40 /* Virtualization statistical counters */ #define E1000_PFVFGPRC(_n) (0x010010 + (0x100 * (_n))) #define E1000_PFVFGPTC(_n) (0x010014 + (0x100 * (_n))) #define E1000_PFVFGORC(_n) (0x010018 + (0x100 * (_n))) #define E1000_PFVFGOTC(_n) (0x010034 + (0x100 * (_n))) #define E1000_PFVFMPRC(_n) (0x010038 + (0x100 * (_n))) #define E1000_PFVFGPRLBC(_n) (0x010040 + (0x100 * (_n))) #define E1000_PFVFGPTLBC(_n) (0x010044 + (0x100 * (_n))) #define E1000_PFVFGORLBC(_n) (0x010048 + (0x100 * (_n))) #define E1000_PFVFGOTLBC(_n) (0x010050 + (0x100 * (_n))) /* LinkSec */ #define E1000_LSECTXUT 0x04300 /* Tx Untagged Pkt Cnt */ #define E1000_LSECTXPKTE 0x04304 /* Encrypted Tx Pkts Cnt */ #define E1000_LSECTXPKTP 0x04308 /* Protected Tx Pkt Cnt */ #define E1000_LSECTXOCTE 0x0430C /* Encrypted Tx Octets Cnt */ #define E1000_LSECTXOCTP 0x04310 /* Protected Tx Octets Cnt */ #define E1000_LSECRXUT 0x04314 /* Untagged non-Strict Rx Pkt Cnt */ #define E1000_LSECRXOCTD 0x0431C /* Rx Octets Decrypted Count */ #define E1000_LSECRXOCTV 0x04320 /* Rx Octets Validated */ #define E1000_LSECRXBAD 0x04324 /* Rx Bad Tag */ #define E1000_LSECRXNOSCI 0x04328 /* Rx Packet No SCI Count */ #define E1000_LSECRXUNSCI 0x0432C /* Rx Packet Unknown SCI Count */ #define E1000_LSECRXUNCH 0x04330 /* Rx Unchecked Packets Count */ #define E1000_LSECRXDELAY 0x04340 /* Rx Delayed Packet Count */ #define E1000_LSECRXLATE 0x04350 /* Rx Late Packets Count */ #define E1000_LSECRXOK(_n) (0x04360 + (0x04 * (_n))) /* Rx Pkt OK Cnt */ #define E1000_LSECRXINV(_n) (0x04380 + (0x04 * (_n))) /* Rx Invalid Cnt */ #define E1000_LSECRXNV(_n) (0x043A0 + (0x04 * (_n))) /* Rx Not Valid Cnt */ #define E1000_LSECRXUNSA 0x043C0 /* Rx Unused SA Count */ #define E1000_LSECRXNUSA 0x043D0 /* Rx Not Using SA Count */ #define E1000_LSECTXCAP 0x0B000 /* Tx Capabilities Register - RO */ #define E1000_LSECRXCAP 0x0B300 /* Rx Capabilities Register - RO */ #define E1000_LSECTXCTRL 0x0B004 /* Tx Control - RW */ #define E1000_LSECRXCTRL 0x0B304 /* Rx Control - RW */ #define E1000_LSECTXSCL 0x0B008 /* Tx SCI Low - RW */ #define E1000_LSECTXSCH 0x0B00C /* Tx SCI High - RW */ #define E1000_LSECTXSA 0x0B010 /* Tx SA0 - RW */ #define E1000_LSECTXPN0 0x0B018 /* Tx SA PN 0 - RW */ #define E1000_LSECTXPN1 0x0B01C /* Tx SA PN 1 - RW */ #define E1000_LSECRXSCL 0x0B3D0 /* Rx SCI Low - RW */ #define E1000_LSECRXSCH 0x0B3E0 /* Rx SCI High - RW */ /* LinkSec Tx 128-bit Key 0 - WO */ #define E1000_LSECTXKEY0(_n) (0x0B020 + (0x04 * (_n))) /* LinkSec Tx 128-bit Key 1 - WO */ #define E1000_LSECTXKEY1(_n) (0x0B030 + (0x04 * (_n))) #define E1000_LSECRXSA(_n) (0x0B310 + (0x04 * (_n))) /* Rx SAs - RW */ #define E1000_LSECRXPN(_n) (0x0B330 + (0x04 * (_n))) /* Rx SAs - RW */ /* LinkSec Rx Keys - where _n is the SA no. and _m the 4 dwords of the 128 bit * key - RW. */ #define E1000_LSECRXKEY(_n, _m) (0x0B350 + (0x10 * (_n)) + (0x04 * (_m))) #define E1000_SSVPC 0x041A0 /* Switch Security Violation Pkt Cnt */ #define E1000_IPSCTRL 0xB430 /* IpSec Control Register */ #define E1000_IPSRXCMD 0x0B408 /* IPSec Rx Command Register - RW */ #define E1000_IPSRXIDX 0x0B400 /* IPSec Rx Index - RW */ /* IPSec Rx IPv4/v6 Address - RW */ #define E1000_IPSRXIPADDR(_n) (0x0B420 + (0x04 * (_n))) /* IPSec Rx 128-bit Key - RW */ #define E1000_IPSRXKEY(_n) (0x0B410 + (0x04 * (_n))) #define E1000_IPSRXSALT 0x0B404 /* IPSec Rx Salt - RW */ #define E1000_IPSRXSPI 0x0B40C /* IPSec Rx SPI - RW */ /* IPSec Tx 128-bit Key - RW */ #define E1000_IPSTXKEY(_n) (0x0B460 + (0x04 * (_n))) #define E1000_IPSTXSALT 0x0B454 /* IPSec Tx Salt - RW */ #define E1000_IPSTXIDX 0x0B450 /* IPSec Tx SA IDX - RW */ #define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */ #define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */ #define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */ #define E1000_CBTMPC 0x0402C /* Circuit Breaker Tx Packet Count */ #define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */ #define E1000_CBRDPC 0x04044 /* Circuit Breaker Rx Dropped Count */ #define E1000_CBRMPC 0x040FC /* Circuit Breaker Rx Packet Count */ #define E1000_RPTHC 0x04104 /* Rx Packets To Host */ #define E1000_HGPTC 0x04118 /* Host Good Packets Tx Count */ #define E1000_HTCBDPC 0x04124 /* Host Tx Circuit Breaker Dropped Count */ #define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */ #define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */ #define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */ #define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */ #define E1000_LENERRS 0x04138 /* Length Errors Count */ #define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */ #define E1000_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */ #define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */ #define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */ #define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */ #define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Pg - RW */ #define E1000_RXCSUM 0x05000 /* Rx Checksum Control - RW */ #define E1000_RLPML 0x05004 /* Rx Long Packet Max Length */ #define E1000_RFCTL 0x05008 /* Receive Filter Control*/ #define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ #define E1000_RA 0x05400 /* Receive Address - RW Array */ #define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */ #define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ #define E1000_VT_CTL 0x0581C /* VMDq Control - RW */ #define E1000_CIAA 0x05B88 /* Config Indirect Access Address - RW */ #define E1000_CIAD 0x05B8C /* Config Indirect Access Data - RW */ #define E1000_VFQA0 0x0B000 /* VLAN Filter Queue Array 0 - RW Array */ #define E1000_VFQA1 0x0B200 /* VLAN Filter Queue Array 1 - RW Array */ #define E1000_WUC 0x05800 /* Wakeup Control - RW */ #define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ #define E1000_WUS 0x05810 /* Wakeup Status - RO */ #define E1000_MANC 0x05820 /* Management Control - RW */ #define E1000_IPAV 0x05838 /* IP Address Valid - RW */ #define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */ #define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */ #define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ #define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */ #define E1000_PBACL 0x05B68 /* MSIx PBA Clear - Read/Write 1's to clear */ #define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */ #define E1000_HOST_IF 0x08800 /* Host Interface */ #define E1000_HIBBA 0x8F40 /* Host Interface Buffer Base Address */ /* Flexible Host Filter Table */ #define E1000_FHFT(_n) (0x09000 + ((_n) * 0x100)) /* Ext Flexible Host Filter Table */ #define E1000_FHFT_EXT(_n) (0x09A00 + ((_n) * 0x100)) #define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */ #define E1000_MANC2H 0x05860 /* Management Control To Host - RW */ /* Management Decision Filters */ #define E1000_MDEF(_n) (0x05890 + (4 * (_n))) #define E1000_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */ #define E1000_CCMCTL 0x05B48 /* CCM Control Register */ #define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */ #define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */ #define E1000_GCR 0x05B00 /* PCI-Ex Control */ #define E1000_GCR2 0x05B64 /* PCI-Ex Control #2 */ #define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ #define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ #define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */ #define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */ #define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ #define E1000_SWSM 0x05B50 /* SW Semaphore */ #define E1000_FWSM 0x05B54 /* FW Semaphore */ /* Driver-only SW semaphore (not used by BOOT agents) */ #define E1000_SWSM2 0x05B58 #define E1000_DCA_ID 0x05B70 /* DCA Requester ID Information - RO */ #define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */ #define E1000_UFUSE 0x05B78 /* UFUSE - RO */ #define E1000_FFLT_DBG 0x05F04 /* Debug Register */ #define E1000_HICR 0x08F00 /* Host Interface Control */ #define E1000_FWSTS 0x08F0C /* FW Status */ /* RSS registers */ #define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */ #define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ #define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */ #define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate INTR Ext*/ #define E1000_IMIRVP 0x05AC0 /* Immediate INT Rx VLAN Priority -RW */ #define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4)) /* MSI-X Alloc Reg -RW */ #define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) /* Redirection Table - RW */ #define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW */ #define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */ #define E1000_RSSIR 0x05868 /* RSS Interrupt Request */ /* VT Registers */ #define E1000_SWPBS 0x03004 /* Switch Packet Buffer Size - RW */ #define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */ #define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */ #define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */ #define E1000_VFRE 0x00C8C /* VF Receive Enables */ #define E1000_VFTE 0x00C90 /* VF Transmit Enables */ #define E1000_QDE 0x02408 /* Queue Drop Enable - RW */ #define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */ #define E1000_WVBR 0x03554 /* VM Wrong Behavior - RWS */ #define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */ #define E1000_UTA 0x0A000 /* Unicast Table Array - RW */ #define E1000_IOVCTL 0x05BBC /* IOV Control Register */ #define E1000_VMRCTL 0X05D80 /* Virtual Mirror Rule Control */ #define E1000_VMRVLAN 0x05D90 /* Virtual Mirror Rule VLAN */ #define E1000_VMRVM 0x05DA0 /* Virtual Mirror Rule VM */ #define E1000_MDFB 0x03558 /* Malicious Driver free block */ #define E1000_LVMMC 0x03548 /* Last VM Misbehavior cause */ #define E1000_TXSWC 0x05ACC /* Tx Switch Control */ #define E1000_SCCRL 0x05DB0 /* Storm Control Control */ #define E1000_BSCTRH 0x05DB8 /* Broadcast Storm Control Threshold */ #define E1000_MSCTRH 0x05DBC /* Multicast Storm Control Threshold */ /* These act per VF so an array friendly macro is used */ #define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n))) #define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n))) #define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) #define E1000_VFVMBMEM(_n) (0x00800 + (_n)) #define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n))) /* VLAN Virtual Machine Filter - RW */ #define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) #define E1000_VMVIR(_n) (0x03700 + (4 * (_n))) #define E1000_DVMOLR(_n) (0x0C038 + (0x40 * (_n))) /* DMA VM offload */ #define E1000_VTCTRL(_n) (0x10000 + (0x100 * (_n))) /* VT Control */ #define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ #define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ #define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */ #define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */ #define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */ #define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */ #define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */ #define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ #define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ #define E1000_SYSTIML 0x0B600 /* System time register Low - RO */ #define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ #define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ #define E1000_TIMADJL 0x0B60C /* Time sync time adjustment offset Low - RW */ #define E1000_TIMADJH 0x0B610 /* Time sync time adjustment offset High - RW */ #define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ +#define E1000_SYSSTMPL 0x0B648 /* HH Timesync system stamp low register */ +#define E1000_SYSSTMPH 0x0B64C /* HH Timesync system stamp hi register */ +#define E1000_PLTSTMPL 0x0B640 /* HH Timesync platform stamp low register */ +#define E1000_PLTSTMPH 0x0B644 /* HH Timesync platform stamp hi register */ #define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */ #define E1000_TSICR 0x0B66C /* Interrupt Cause Register */ #define E1000_TSIM 0x0B674 /* Interrupt Mask Register */ #define E1000_RXMTRL 0x0B634 /* Time sync Rx EtherType and Msg Type - RW */ #define E1000_RXUDP 0x0B638 /* Time Sync Rx UDP Port - RW */ /* Filtering Registers */ #define E1000_SAQF(_n) (0x05980 + (4 * (_n))) /* Source Address Queue Fltr */ #define E1000_DAQF(_n) (0x059A0 + (4 * (_n))) /* Dest Address Queue Fltr */ #define E1000_SPQF(_n) (0x059C0 + (4 * (_n))) /* Source Port Queue Fltr */ #define E1000_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */ #define E1000_TTQF(_n) (0x059E0 + (4 * (_n))) /* 2-tuple Queue Fltr */ #define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */ #define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ #define E1000_RTTDCS 0x3600 /* Reedtown Tx Desc plane control and status */ #define E1000_RTTPCS 0x3474 /* Reedtown Tx Packet Plane control and status */ #define E1000_RTRPCS 0x2474 /* Rx packet plane control and status */ #define E1000_RTRUP2TC 0x05AC4 /* Rx User Priority to Traffic Class */ #define E1000_RTTUP2TC 0x0418 /* Transmit User Priority to Traffic Class */ /* Tx Desc plane TC Rate-scheduler config */ #define E1000_RTTDTCRC(_n) (0x3610 + ((_n) * 4)) /* Tx Packet plane TC Rate-Scheduler Config */ #define E1000_RTTPTCRC(_n) (0x3480 + ((_n) * 4)) /* Rx Packet plane TC Rate-Scheduler Config */ #define E1000_RTRPTCRC(_n) (0x2480 + ((_n) * 4)) /* Tx Desc Plane TC Rate-Scheduler Status */ #define E1000_RTTDTCRS(_n) (0x3630 + ((_n) * 4)) /* Tx Desc Plane TC Rate-Scheduler MMW */ #define E1000_RTTDTCRM(_n) (0x3650 + ((_n) * 4)) /* Tx Packet plane TC Rate-Scheduler Status */ #define E1000_RTTPTCRS(_n) (0x34A0 + ((_n) * 4)) /* Tx Packet plane TC Rate-scheduler MMW */ #define E1000_RTTPTCRM(_n) (0x34C0 + ((_n) * 4)) /* Rx Packet plane TC Rate-Scheduler Status */ #define E1000_RTRPTCRS(_n) (0x24A0 + ((_n) * 4)) /* Rx Packet plane TC Rate-Scheduler MMW */ #define E1000_RTRPTCRM(_n) (0x24C0 + ((_n) * 4)) /* Tx Desc plane VM Rate-Scheduler MMW*/ #define E1000_RTTDVMRM(_n) (0x3670 + ((_n) * 4)) /* Tx BCN Rate-Scheduler MMW */ #define E1000_RTTBCNRM(_n) (0x3690 + ((_n) * 4)) #define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select */ #define E1000_RTTDVMRC 0x3608 /* Tx Desc Plane VM Rate-Scheduler Config */ #define E1000_RTTDVMRS 0x360C /* Tx Desc Plane VM Rate-Scheduler Status */ #define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config */ #define E1000_RTTBCNRS 0x36B4 /* Tx BCN Rate-Scheduler Status */ #define E1000_RTTBCNCR 0xB200 /* Tx BCN Control Register */ #define E1000_RTTBCNTG 0x35A4 /* Tx BCN Tagging */ #define E1000_RTTBCNCP 0xB208 /* Tx BCN Congestion point */ #define E1000_RTRBCNCR 0xB20C /* Rx BCN Control Register */ #define E1000_RTTBCNRD 0x36B8 /* Tx BCN Rate Drift */ #define E1000_PFCTOP 0x1080 /* Priority Flow Control Type and Opcode */ #define E1000_RTTBCNIDX 0xB204 /* Tx BCN Congestion Point */ #define E1000_RTTBCNACH 0x0B214 /* Tx BCN Control High */ #define E1000_RTTBCNACL 0x0B210 /* Tx BCN Control Low */ /* DMA Coalescing registers */ #define E1000_DMACR 0x02508 /* Control Register */ #define E1000_DMCTXTH 0x03550 /* Transmit Threshold */ #define E1000_DMCTLX 0x02514 /* Time to Lx Request */ #define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */ #define E1000_DMCCNT 0x05DD4 /* Current Rx Count */ #define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */ #define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ /* PCIe Parity Status Register */ #define E1000_PCIEERRSTS 0x05BA8 #define E1000_PROXYS 0x5F64 /* Proxying Status */ #define E1000_PROXYFC 0x5F60 /* Proxying Filter Control */ /* Thermal sensor configuration and status registers */ #define E1000_THMJT 0x08100 /* Junction Temperature */ #define E1000_THLOWTC 0x08104 /* Low Threshold Control */ #define E1000_THMIDTC 0x08108 /* Mid Threshold Control */ #define E1000_THHIGHTC 0x0810C /* High Threshold Control */ #define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ /* Energy Efficient Ethernet "EEE" registers */ #define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */ #define E1000_LTRC 0x01A0 /* Latency Tolerance Reporting Control */ #define E1000_EEER 0x0E30 /* Energy Efficient Ethernet "EEE"*/ #define E1000_EEE_SU 0x0E34 /* EEE Setup */ #define E1000_TLPIC 0x4148 /* EEE Tx LPI Count - TLPIC */ #define E1000_RLPIC 0x414C /* EEE Rx LPI Count - RLPIC */ /* OS2BMC Registers */ #define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */ #define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */ #define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */ #define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */ #define E1000_DOBFFCTL 0x3F24 /* DMA OBFF Control Register */ #endif Index: stable/10/sys/dev/e1000/if_em.c =================================================================== --- stable/10/sys/dev/e1000/if_em.c (revision 296054) +++ stable/10/sys/dev/e1000/if_em.c (revision 296055) @@ -1,6116 +1,6266 @@ /****************************************************************************** Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "opt_em.h" #include "opt_ddb.h" #include "opt_inet.h" #include "opt_inet6.h" #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #ifdef DDB #include #include #endif #if __FreeBSD_version >= 800000 #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "e1000_api.h" #include "e1000_82571.h" #include "if_em.h" /********************************************************************* - * Set this to one to display debug statistics - *********************************************************************/ -int em_display_debug_stats = 0; - -/********************************************************************* * Driver version: *********************************************************************/ -char em_driver_version[] = "7.4.2"; +char em_driver_version[] = "7.6.1-k"; /********************************************************************* * PCI Device ID Table * * Used by probe to select devices to load on * Last field stores an index into e1000_strings * Last entry must be all 0s * * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } *********************************************************************/ static em_vendor_info_t em_vendor_info_array[] = { /* Intel(R) PRO/1000 Network Connection */ { 0x8086, E1000_DEV_ID_82571EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82571EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82571EB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82571EB_SERDES_DUAL, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82571EB_SERDES_QUAD, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER_LP, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82571EB_QUAD_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82571PT_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82572EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82572EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82572EI_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82572EI, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82573E, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82573E_IAMT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82573L, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82583V, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_SPT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_SPT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_DPT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_DPT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH8_IGP_M_AMT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH8_IGP_AMT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH8_IGP_C, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH8_IFE, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH8_IFE_GT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH8_IFE_G, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH8_IGP_M, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH8_82567V_3, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH9_IGP_M_AMT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH9_IGP_AMT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH9_IGP_C, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH9_IGP_M, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH9_IGP_M_V, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH9_IFE, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH9_IFE_GT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH9_IFE_G, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH9_BM, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82574L, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82574LA, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH10_R_BM_LM, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH10_R_BM_LF, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH10_R_BM_V, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH10_D_BM_LM, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH10_D_BM_LF, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH10_D_BM_V, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_PCH_M_HV_LM, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_PCH_M_HV_LC, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_PCH_D_HV_DM, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_PCH_D_HV_DC, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_PCH2_LV_LM, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_PCH2_LV_V, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_PCH_LPT_I217_LM, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_PCH_LPT_I217_V, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_PCH_LPTLP_I218_LM, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_PCH_LPTLP_I218_V, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_PCH_I218_LM2, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_PCH_I218_V2, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_PCH_I218_LM3, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_PCH_I218_V3, PCI_ANY_ID, PCI_ANY_ID, 0}, + { 0x8086, E1000_DEV_ID_PCH_SPT_I219_LM, PCI_ANY_ID, PCI_ANY_ID, 0}, + { 0x8086, E1000_DEV_ID_PCH_SPT_I219_V, PCI_ANY_ID, PCI_ANY_ID, 0}, + { 0x8086, E1000_DEV_ID_PCH_SPT_I219_LM2, + PCI_ANY_ID, PCI_ANY_ID, 0}, + { 0x8086, E1000_DEV_ID_PCH_SPT_I219_V2, PCI_ANY_ID, PCI_ANY_ID, 0}, + { 0x8086, E1000_DEV_ID_PCH_LBG_I219_LM3, + PCI_ANY_ID, PCI_ANY_ID, 0}, /* required last entry */ { 0, 0, 0, 0, 0} }; /********************************************************************* * Table of branding strings for all supported NICs. *********************************************************************/ static char *em_strings[] = { "Intel(R) PRO/1000 Network Connection" }; /********************************************************************* * Function prototypes *********************************************************************/ static int em_probe(device_t); static int em_attach(device_t); static int em_detach(device_t); static int em_shutdown(device_t); static int em_suspend(device_t); static int em_resume(device_t); #ifdef EM_MULTIQUEUE static int em_mq_start(struct ifnet *, struct mbuf *); static int em_mq_start_locked(struct ifnet *, struct tx_ring *); static void em_qflush(struct ifnet *); #else static void em_start(struct ifnet *); static void em_start_locked(struct ifnet *, struct tx_ring *); #endif static int em_ioctl(struct ifnet *, u_long, caddr_t); static void em_init(void *); static void em_init_locked(struct adapter *); static void em_stop(void *); static void em_media_status(struct ifnet *, struct ifmediareq *); static int em_media_change(struct ifnet *); static void em_identify_hardware(struct adapter *); static int em_allocate_pci_resources(struct adapter *); static int em_allocate_legacy(struct adapter *); static int em_allocate_msix(struct adapter *); static int em_allocate_queues(struct adapter *); static int em_setup_msix(struct adapter *); static void em_free_pci_resources(struct adapter *); static void em_local_timer(void *); static void em_reset(struct adapter *); static int em_setup_interface(device_t, struct adapter *); +static void em_flush_desc_rings(struct adapter *); static void em_setup_transmit_structures(struct adapter *); static void em_initialize_transmit_unit(struct adapter *); static int em_allocate_transmit_buffers(struct tx_ring *); static void em_free_transmit_structures(struct adapter *); static void em_free_transmit_buffers(struct tx_ring *); static int em_setup_receive_structures(struct adapter *); static int em_allocate_receive_buffers(struct rx_ring *); static void em_initialize_receive_unit(struct adapter *); static void em_free_receive_structures(struct adapter *); static void em_free_receive_buffers(struct rx_ring *); static void em_enable_intr(struct adapter *); static void em_disable_intr(struct adapter *); static void em_update_stats_counters(struct adapter *); static void em_add_hw_stats(struct adapter *adapter); static void em_txeof(struct tx_ring *); static bool em_rxeof(struct rx_ring *, int, int *); #ifndef __NO_STRICT_ALIGNMENT static int em_fixup_rx(struct rx_ring *); #endif static void em_setup_rxdesc(union e1000_rx_desc_extended *, const struct em_rxbuffer *rxbuf); static void em_receive_checksum(uint32_t status, struct mbuf *); static void em_transmit_checksum_setup(struct tx_ring *, struct mbuf *, int, struct ip *, u32 *, u32 *); static void em_tso_setup(struct tx_ring *, struct mbuf *, int, struct ip *, struct tcphdr *, u32 *, u32 *); static void em_set_promisc(struct adapter *); static void em_disable_promisc(struct adapter *); static void em_set_multi(struct adapter *); static void em_update_link_status(struct adapter *); static void em_refresh_mbufs(struct rx_ring *, int); static void em_register_vlan(void *, struct ifnet *, u16); static void em_unregister_vlan(void *, struct ifnet *, u16); static void em_setup_vlan_hw_support(struct adapter *); static int em_xmit(struct tx_ring *, struct mbuf **); static int em_dma_malloc(struct adapter *, bus_size_t, struct em_dma_alloc *, int); static void em_dma_free(struct adapter *, struct em_dma_alloc *); static int em_sysctl_nvm_info(SYSCTL_HANDLER_ARGS); static void em_print_nvm_info(struct adapter *); static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS); static void em_print_debug_info(struct adapter *); static int em_is_valid_ether_addr(u8 *); static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS); static void em_add_int_delay_sysctl(struct adapter *, const char *, const char *, struct em_int_delay_info *, int, int); /* Management and WOL Support */ static void em_init_manageability(struct adapter *); static void em_release_manageability(struct adapter *); static void em_get_hw_control(struct adapter *); static void em_release_hw_control(struct adapter *); static void em_get_wakeup(device_t); static void em_enable_wakeup(device_t); static int em_enable_phy_wakeup(struct adapter *); static void em_led_func(void *, int); static void em_disable_aspm(struct adapter *); static int em_irq_fast(void *); /* MSIX handlers */ static void em_msix_tx(void *); static void em_msix_rx(void *); static void em_msix_link(void *); static void em_handle_tx(void *context, int pending); static void em_handle_rx(void *context, int pending); static void em_handle_link(void *context, int pending); #ifdef EM_MULTIQUEUE static void em_enable_vectors_82574(struct adapter *); #endif static void em_set_sysctl_value(struct adapter *, const char *, const char *, int *, int); static int em_set_flowcntl(SYSCTL_HANDLER_ARGS); static int em_sysctl_eee(SYSCTL_HANDLER_ARGS); static __inline void em_rx_discard(struct rx_ring *, int); #ifdef DEVICE_POLLING static poll_handler_t em_poll; #endif /* POLLING */ /********************************************************************* * FreeBSD Device Interface Entry Points *********************************************************************/ static device_method_t em_methods[] = { /* Device interface */ DEVMETHOD(device_probe, em_probe), DEVMETHOD(device_attach, em_attach), DEVMETHOD(device_detach, em_detach), DEVMETHOD(device_shutdown, em_shutdown), DEVMETHOD(device_suspend, em_suspend), DEVMETHOD(device_resume, em_resume), DEVMETHOD_END }; static driver_t em_driver = { "em", em_methods, sizeof(struct adapter), }; devclass_t em_devclass; DRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0); MODULE_DEPEND(em, pci, 1, 1, 1); MODULE_DEPEND(em, ether, 1, 1, 1); /********************************************************************* * Tunable default values. *********************************************************************/ #define EM_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000) #define EM_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024) #define M_TSO_LEN 66 #define MAX_INTS_PER_SEC 8000 #define DEFAULT_ITR (1000000000/(MAX_INTS_PER_SEC * 256)) /* Allow common code without TSO */ #ifndef CSUM_TSO #define CSUM_TSO 0 #endif #define TSO_WORKAROUND 4 static SYSCTL_NODE(_hw, OID_AUTO, em, CTLFLAG_RD, 0, "EM driver parameters"); static int em_disable_crc_stripping = 0; SYSCTL_INT(_hw_em, OID_AUTO, disable_crc_stripping, CTLFLAG_RDTUN, &em_disable_crc_stripping, 0, "Disable CRC Stripping"); static int em_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV); static int em_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR); TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt); TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt); SYSCTL_INT(_hw_em, OID_AUTO, tx_int_delay, CTLFLAG_RDTUN, &em_tx_int_delay_dflt, 0, "Default transmit interrupt delay in usecs"); SYSCTL_INT(_hw_em, OID_AUTO, rx_int_delay, CTLFLAG_RDTUN, &em_rx_int_delay_dflt, 0, "Default receive interrupt delay in usecs"); static int em_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV); static int em_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV); TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt); TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt); SYSCTL_INT(_hw_em, OID_AUTO, tx_abs_int_delay, CTLFLAG_RDTUN, &em_tx_abs_int_delay_dflt, 0, "Default transmit interrupt delay limit in usecs"); SYSCTL_INT(_hw_em, OID_AUTO, rx_abs_int_delay, CTLFLAG_RDTUN, &em_rx_abs_int_delay_dflt, 0, "Default receive interrupt delay limit in usecs"); static int em_rxd = EM_DEFAULT_RXD; static int em_txd = EM_DEFAULT_TXD; TUNABLE_INT("hw.em.rxd", &em_rxd); TUNABLE_INT("hw.em.txd", &em_txd); SYSCTL_INT(_hw_em, OID_AUTO, rxd, CTLFLAG_RDTUN, &em_rxd, 0, "Number of receive descriptors per queue"); SYSCTL_INT(_hw_em, OID_AUTO, txd, CTLFLAG_RDTUN, &em_txd, 0, "Number of transmit descriptors per queue"); static int em_smart_pwr_down = FALSE; TUNABLE_INT("hw.em.smart_pwr_down", &em_smart_pwr_down); SYSCTL_INT(_hw_em, OID_AUTO, smart_pwr_down, CTLFLAG_RDTUN, &em_smart_pwr_down, 0, "Set to true to leave smart power down enabled on newer adapters"); /* Controls whether promiscuous also shows bad packets */ static int em_debug_sbp = FALSE; TUNABLE_INT("hw.em.sbp", &em_debug_sbp); SYSCTL_INT(_hw_em, OID_AUTO, sbp, CTLFLAG_RDTUN, &em_debug_sbp, 0, "Show bad packets in promiscuous mode"); static int em_enable_msix = TRUE; TUNABLE_INT("hw.em.enable_msix", &em_enable_msix); SYSCTL_INT(_hw_em, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &em_enable_msix, 0, "Enable MSI-X interrupts"); #ifdef EM_MULTIQUEUE static int em_num_queues = 1; TUNABLE_INT("hw.em.num_queues", &em_num_queues); SYSCTL_INT(_hw_em, OID_AUTO, num_queues, CTLFLAG_RDTUN, &em_num_queues, 0, "82574 only: Number of queues to configure, 0 indicates autoconfigure"); #endif /* ** Global variable to store last used CPU when binding queues ** to CPUs in igb_allocate_msix. Starts at CPU_FIRST and increments when a ** queue is bound to a cpu. */ static int em_last_bind_cpu = -1; /* How many packets rxeof tries to clean at a time */ static int em_rx_process_limit = 100; TUNABLE_INT("hw.em.rx_process_limit", &em_rx_process_limit); SYSCTL_INT(_hw_em, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN, &em_rx_process_limit, 0, "Maximum number of received packets to process " "at a time, -1 means unlimited"); /* Energy efficient ethernet - default to OFF */ static int eee_setting = 1; TUNABLE_INT("hw.em.eee_setting", &eee_setting); SYSCTL_INT(_hw_em, OID_AUTO, eee_setting, CTLFLAG_RDTUN, &eee_setting, 0, "Enable Energy Efficient Ethernet"); /* Global used in WOL setup with multiport cards */ static int global_quad_port_a = 0; #ifdef DEV_NETMAP /* see ixgbe.c for details */ #include #endif /* DEV_NETMAP */ /********************************************************************* * Device identification routine * * em_probe determines if the driver should be loaded on * adapter based on PCI vendor/device id of the adapter. * * return BUS_PROBE_DEFAULT on success, positive on failure *********************************************************************/ static int em_probe(device_t dev) { char adapter_name[60]; uint16_t pci_vendor_id = 0; uint16_t pci_device_id = 0; uint16_t pci_subvendor_id = 0; uint16_t pci_subdevice_id = 0; em_vendor_info_t *ent; INIT_DEBUGOUT("em_probe: begin"); pci_vendor_id = pci_get_vendor(dev); if (pci_vendor_id != EM_VENDOR_ID) return (ENXIO); pci_device_id = pci_get_device(dev); pci_subvendor_id = pci_get_subvendor(dev); pci_subdevice_id = pci_get_subdevice(dev); ent = em_vendor_info_array; while (ent->vendor_id != 0) { if ((pci_vendor_id == ent->vendor_id) && (pci_device_id == ent->device_id) && ((pci_subvendor_id == ent->subvendor_id) || (ent->subvendor_id == PCI_ANY_ID)) && ((pci_subdevice_id == ent->subdevice_id) || (ent->subdevice_id == PCI_ANY_ID))) { sprintf(adapter_name, "%s %s", em_strings[ent->index], em_driver_version); device_set_desc_copy(dev, adapter_name); return (BUS_PROBE_DEFAULT); } ent++; } return (ENXIO); } /********************************************************************* * Device initialization routine * * The attach entry point is called when the driver is being loaded. * This routine identifies the type of hardware, allocates all resources * and initializes the hardware. * * return 0 on success, positive on failure *********************************************************************/ static int em_attach(device_t dev) { struct adapter *adapter; struct e1000_hw *hw; int error = 0; INIT_DEBUGOUT("em_attach: begin"); if (resource_disabled("em", device_get_unit(dev))) { device_printf(dev, "Disabled by device hint\n"); return (ENXIO); } adapter = device_get_softc(dev); adapter->dev = adapter->osdep.dev = dev; hw = &adapter->hw; EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev)); /* SYSCTL stuff */ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, em_sysctl_nvm_info, "I", "NVM Information"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, em_sysctl_debug_info, "I", "Debug Information"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "fc", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, em_set_flowcntl, "I", "Flow Control"); callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0); /* Determine hardware and mac info */ em_identify_hardware(adapter); /* Setup PCI resources */ if (em_allocate_pci_resources(adapter)) { device_printf(dev, "Allocation of PCI resources failed\n"); error = ENXIO; goto err_pci; } /* ** For ICH8 and family we need to ** map the flash memory, and this ** must happen after the MAC is ** identified */ if ((hw->mac.type == e1000_ich8lan) || (hw->mac.type == e1000_ich9lan) || (hw->mac.type == e1000_ich10lan) || (hw->mac.type == e1000_pchlan) || (hw->mac.type == e1000_pch2lan) || (hw->mac.type == e1000_pch_lpt)) { int rid = EM_BAR_TYPE_FLASH; adapter->flash = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (adapter->flash == NULL) { device_printf(dev, "Mapping of Flash failed\n"); error = ENXIO; goto err_pci; } /* This is used in the shared code */ hw->flash_address = (u8 *)adapter->flash; adapter->osdep.flash_bus_space_tag = rman_get_bustag(adapter->flash); adapter->osdep.flash_bus_space_handle = rman_get_bushandle(adapter->flash); } + /* + ** In the new SPT device flash is not a + ** seperate BAR, rather it is also in BAR0, + ** so use the same tag and an offset handle for the + ** FLASH read/write macros in the shared code. + */ + else if (hw->mac.type == e1000_pch_spt) { + adapter->osdep.flash_bus_space_tag = + adapter->osdep.mem_bus_space_tag; + adapter->osdep.flash_bus_space_handle = + adapter->osdep.mem_bus_space_handle + + E1000_FLASH_BASE_ADDR; + } /* Do Shared Code initialization */ - if (e1000_setup_init_funcs(hw, TRUE)) { - device_printf(dev, "Setup of Shared code failed\n"); + error = e1000_setup_init_funcs(hw, TRUE); + if (error) { + device_printf(dev, "Setup of Shared code failed, error %d\n", + error); error = ENXIO; goto err_pci; } /* * Setup MSI/X or MSI if PCI Express */ adapter->msix = em_setup_msix(adapter); e1000_get_bus_info(hw); /* Set up some sysctls for the tunable interrupt delays */ em_add_int_delay_sysctl(adapter, "rx_int_delay", "receive interrupt delay in usecs", &adapter->rx_int_delay, E1000_REGISTER(hw, E1000_RDTR), em_rx_int_delay_dflt); em_add_int_delay_sysctl(adapter, "tx_int_delay", "transmit interrupt delay in usecs", &adapter->tx_int_delay, E1000_REGISTER(hw, E1000_TIDV), em_tx_int_delay_dflt); em_add_int_delay_sysctl(adapter, "rx_abs_int_delay", "receive interrupt delay limit in usecs", &adapter->rx_abs_int_delay, E1000_REGISTER(hw, E1000_RADV), em_rx_abs_int_delay_dflt); em_add_int_delay_sysctl(adapter, "tx_abs_int_delay", "transmit interrupt delay limit in usecs", &adapter->tx_abs_int_delay, E1000_REGISTER(hw, E1000_TADV), em_tx_abs_int_delay_dflt); em_add_int_delay_sysctl(adapter, "itr", "interrupt delay limit in usecs/4", &adapter->tx_itr, E1000_REGISTER(hw, E1000_ITR), DEFAULT_ITR); /* Sysctl for limiting the amount of work done in the taskqueue */ em_set_sysctl_value(adapter, "rx_processing_limit", "max number of rx packets to process", &adapter->rx_process_limit, em_rx_process_limit); /* * Validate number of transmit and receive descriptors. It * must not exceed hardware maximum, and must be multiple * of E1000_DBA_ALIGN. */ if (((em_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 || (em_txd > EM_MAX_TXD) || (em_txd < EM_MIN_TXD)) { device_printf(dev, "Using %d TX descriptors instead of %d!\n", EM_DEFAULT_TXD, em_txd); adapter->num_tx_desc = EM_DEFAULT_TXD; } else adapter->num_tx_desc = em_txd; if (((em_rxd * sizeof(union e1000_rx_desc_extended)) % EM_DBA_ALIGN) != 0 || (em_rxd > EM_MAX_RXD) || (em_rxd < EM_MIN_RXD)) { device_printf(dev, "Using %d RX descriptors instead of %d!\n", EM_DEFAULT_RXD, em_rxd); adapter->num_rx_desc = EM_DEFAULT_RXD; } else adapter->num_rx_desc = em_rxd; hw->mac.autoneg = DO_AUTO_NEG; hw->phy.autoneg_wait_to_complete = FALSE; hw->phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; /* Copper options */ if (hw->phy.media_type == e1000_media_type_copper) { hw->phy.mdix = AUTO_ALL_MODES; hw->phy.disable_polarity_correction = FALSE; hw->phy.ms_type = EM_MASTER_SLAVE; } /* * Set the frame limits assuming * standard ethernet sized frames. */ adapter->hw.mac.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE; /* * This controls when hardware reports transmit completion * status. */ hw->mac.report_tx_early = 1; /* ** Get queue/ring memory */ if (em_allocate_queues(adapter)) { error = ENOMEM; goto err_pci; } /* Allocate multicast array memory. */ adapter->mta = malloc(sizeof(u8) * ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT); if (adapter->mta == NULL) { device_printf(dev, "Can not allocate multicast setup array\n"); error = ENOMEM; goto err_late; } /* Check SOL/IDER usage */ if (e1000_check_reset_block(hw)) device_printf(dev, "PHY reset is blocked" " due to SOL/IDER session.\n"); /* Sysctl for setting Energy Efficient Ethernet */ hw->dev_spec.ich8lan.eee_disable = eee_setting; SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "eee_control", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, em_sysctl_eee, "I", "Disable Energy Efficient Ethernet"); /* ** Start from a known state, this is ** important in reading the nvm and ** mac from that. */ e1000_reset_hw(hw); /* Make sure we have a good EEPROM before we read from it */ if (e1000_validate_nvm_checksum(hw) < 0) { /* ** Some PCI-E parts fail the first check due to ** the link being in sleep state, call it again, ** if it fails a second time its a real issue. */ if (e1000_validate_nvm_checksum(hw) < 0) { device_printf(dev, "The EEPROM Checksum Is Not Valid\n"); error = EIO; goto err_late; } } /* Copy the permanent MAC address out of the EEPROM */ if (e1000_read_mac_addr(hw) < 0) { device_printf(dev, "EEPROM read error while reading MAC" " address\n"); error = EIO; goto err_late; } if (!em_is_valid_ether_addr(hw->mac.addr)) { device_printf(dev, "Invalid MAC address\n"); error = EIO; goto err_late; } /* Disable ULP support */ e1000_disable_ulp_lpt_lp(hw, TRUE); /* ** Do interrupt configuration */ if (adapter->msix > 1) /* Do MSIX */ error = em_allocate_msix(adapter); else /* MSI or Legacy */ error = em_allocate_legacy(adapter); if (error) goto err_late; /* * Get Wake-on-Lan and Management info for later use */ em_get_wakeup(dev); /* Setup OS specific network interface */ if (em_setup_interface(dev, adapter) != 0) goto err_late; em_reset(adapter); /* Initialize statistics */ em_update_stats_counters(adapter); hw->mac.get_link_status = 1; em_update_link_status(adapter); /* Register for VLAN events */ adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, em_register_vlan, adapter, EVENTHANDLER_PRI_FIRST); adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, em_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); em_add_hw_stats(adapter); /* Non-AMT based hardware can now take control from firmware */ if (adapter->has_manage && !adapter->has_amt) em_get_hw_control(adapter); /* Tell the stack that the interface is not active */ adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING; adapter->ifp->if_drv_flags |= IFF_DRV_OACTIVE; adapter->led_dev = led_create(em_led_func, adapter, device_get_nameunit(dev)); #ifdef DEV_NETMAP em_netmap_attach(adapter); #endif /* DEV_NETMAP */ INIT_DEBUGOUT("em_attach: end"); return (0); err_late: em_free_transmit_structures(adapter); em_free_receive_structures(adapter); em_release_hw_control(adapter); if (adapter->ifp != NULL) if_free(adapter->ifp); err_pci: em_free_pci_resources(adapter); free(adapter->mta, M_DEVBUF); EM_CORE_LOCK_DESTROY(adapter); return (error); } /********************************************************************* * Device removal routine * * The detach entry point is called when the driver is being removed. * This routine stops the adapter and deallocates all the resources * that were allocated for driver operation. * * return 0 on success, positive on failure *********************************************************************/ static int em_detach(device_t dev) { struct adapter *adapter = device_get_softc(dev); struct ifnet *ifp = adapter->ifp; INIT_DEBUGOUT("em_detach: begin"); /* Make sure VLANS are not using driver */ if (adapter->ifp->if_vlantrunk != NULL) { device_printf(dev,"Vlan in use, detach first\n"); return (EBUSY); } #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(ifp); #endif if (adapter->led_dev != NULL) led_destroy(adapter->led_dev); EM_CORE_LOCK(adapter); adapter->in_detach = 1; em_stop(adapter); EM_CORE_UNLOCK(adapter); EM_CORE_LOCK_DESTROY(adapter); e1000_phy_hw_reset(&adapter->hw); em_release_manageability(adapter); em_release_hw_control(adapter); /* Unregister VLAN events */ if (adapter->vlan_attach != NULL) EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach); if (adapter->vlan_detach != NULL) EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach); ether_ifdetach(adapter->ifp); callout_drain(&adapter->timer); #ifdef DEV_NETMAP netmap_detach(ifp); #endif /* DEV_NETMAP */ em_free_pci_resources(adapter); bus_generic_detach(dev); if_free(ifp); em_free_transmit_structures(adapter); em_free_receive_structures(adapter); em_release_hw_control(adapter); free(adapter->mta, M_DEVBUF); return (0); } /********************************************************************* * * Shutdown entry point * **********************************************************************/ static int em_shutdown(device_t dev) { return em_suspend(dev); } /* * Suspend/resume device methods. */ static int em_suspend(device_t dev) { struct adapter *adapter = device_get_softc(dev); EM_CORE_LOCK(adapter); em_release_manageability(adapter); em_release_hw_control(adapter); em_enable_wakeup(dev); EM_CORE_UNLOCK(adapter); return bus_generic_suspend(dev); } static int em_resume(device_t dev) { struct adapter *adapter = device_get_softc(dev); struct tx_ring *txr = adapter->tx_rings; struct ifnet *ifp = adapter->ifp; EM_CORE_LOCK(adapter); if (adapter->hw.mac.type == e1000_pch2lan) e1000_resume_workarounds_pchlan(&adapter->hw); em_init_locked(adapter); em_init_manageability(adapter); if ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING) && adapter->link_active) { for (int i = 0; i < adapter->num_queues; i++, txr++) { EM_TX_LOCK(txr); #ifdef EM_MULTIQUEUE if (!drbr_empty(ifp, txr->br)) em_mq_start_locked(ifp, txr); #else if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) em_start_locked(ifp, txr); #endif EM_TX_UNLOCK(txr); } } EM_CORE_UNLOCK(adapter); return bus_generic_resume(dev); } #ifndef EM_MULTIQUEUE static void em_start_locked(struct ifnet *ifp, struct tx_ring *txr) { struct adapter *adapter = ifp->if_softc; struct mbuf *m_head; EM_TX_LOCK_ASSERT(txr); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return; if (!adapter->link_active) return; while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { /* Call cleanup if number of TX descriptors low */ if (txr->tx_avail <= EM_TX_CLEANUP_THRESHOLD) em_txeof(txr); if (txr->tx_avail < EM_MAX_SCATTER) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* * Encapsulation can modify our pointer, and or make it * NULL on failure. In that event, we can't requeue. */ if (em_xmit(txr, &m_head)) { if (m_head == NULL) break; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); break; } /* Mark the queue as having work */ if (txr->busy == EM_TX_IDLE) txr->busy = EM_TX_BUSY; /* Send a copy of the frame to the BPF listener */ ETHER_BPF_MTAP(ifp, m_head); } return; } static void em_start(struct ifnet *ifp) { struct adapter *adapter = ifp->if_softc; struct tx_ring *txr = adapter->tx_rings; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { EM_TX_LOCK(txr); em_start_locked(ifp, txr); EM_TX_UNLOCK(txr); } return; } #else /* EM_MULTIQUEUE */ /********************************************************************* * Multiqueue Transmit routines * * em_mq_start is called by the stack to initiate a transmit. * however, if busy the driver can queue the request rather * than do an immediate send. It is this that is an advantage * in this driver, rather than also having multiple tx queues. **********************************************************************/ /* ** Multiqueue capable stack interface */ static int em_mq_start(struct ifnet *ifp, struct mbuf *m) { struct adapter *adapter = ifp->if_softc; struct tx_ring *txr = adapter->tx_rings; unsigned int i, error; if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) i = m->m_pkthdr.flowid % adapter->num_queues; else i = curcpu % adapter->num_queues; txr = &adapter->tx_rings[i]; error = drbr_enqueue(ifp, txr->br, m); if (error) return (error); if (EM_TX_TRYLOCK(txr)) { em_mq_start_locked(ifp, txr); EM_TX_UNLOCK(txr); } else taskqueue_enqueue(txr->tq, &txr->tx_task); return (0); } static int em_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr) { struct adapter *adapter = txr->adapter; struct mbuf *next; int err = 0, enq = 0; if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING || adapter->link_active == 0) { return (ENETDOWN); } /* Process the queue */ while ((next = drbr_peek(ifp, txr->br)) != NULL) { if ((err = em_xmit(txr, &next)) != 0) { if (next == NULL) { /* It was freed, move forward */ drbr_advance(ifp, txr->br); } else { /* * Still have one left, it may not be * the same since the transmit function * may have changed it. */ drbr_putback(ifp, txr->br, next); } break; } drbr_advance(ifp, txr->br); enq++; ifp->if_obytes += next->m_pkthdr.len; if (next->m_flags & M_MCAST) ifp->if_omcasts++; ETHER_BPF_MTAP(ifp, next); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) break; } /* Mark the queue as having work */ if ((enq > 0) && (txr->busy == EM_TX_IDLE)) txr->busy = EM_TX_BUSY; if (txr->tx_avail < EM_MAX_SCATTER) em_txeof(txr); if (txr->tx_avail < EM_MAX_SCATTER) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; } return (err); } /* ** Flush all ring buffers */ static void em_qflush(struct ifnet *ifp) { struct adapter *adapter = ifp->if_softc; struct tx_ring *txr = adapter->tx_rings; struct mbuf *m; for (int i = 0; i < adapter->num_queues; i++, txr++) { EM_TX_LOCK(txr); while ((m = buf_ring_dequeue_sc(txr->br)) != NULL) m_freem(m); EM_TX_UNLOCK(txr); } if_qflush(ifp); } #endif /* EM_MULTIQUEUE */ /********************************************************************* * Ioctl entry point * * em_ioctl is called when the user wants to configure the * interface. * * return 0 on success, positive on failure **********************************************************************/ static int em_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct adapter *adapter = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; #if defined(INET) || defined(INET6) struct ifaddr *ifa = (struct ifaddr *)data; #endif bool avoid_reset = FALSE; int error = 0; if (adapter->in_detach) return (error); switch (command) { case SIOCSIFADDR: #ifdef INET if (ifa->ifa_addr->sa_family == AF_INET) avoid_reset = TRUE; #endif #ifdef INET6 if (ifa->ifa_addr->sa_family == AF_INET6) avoid_reset = TRUE; #endif /* ** Calling init results in link renegotiation, ** so we avoid doing it when possible. */ if (avoid_reset) { ifp->if_flags |= IFF_UP; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) em_init(adapter); #ifdef INET if (!(ifp->if_flags & IFF_NOARP)) arp_ifinit(ifp, ifa); #endif } else error = ether_ioctl(ifp, command, data); break; case SIOCSIFMTU: { int max_frame_size; IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)"); EM_CORE_LOCK(adapter); switch (adapter->hw.mac.type) { case e1000_82571: case e1000_82572: case e1000_ich9lan: case e1000_ich10lan: case e1000_pch2lan: case e1000_pch_lpt: + case e1000_pch_spt: case e1000_82574: case e1000_82583: case e1000_80003es2lan: /* 9K Jumbo Frame size */ max_frame_size = 9234; break; case e1000_pchlan: max_frame_size = 4096; break; /* Adapters that do not support jumbo frames */ case e1000_ich8lan: max_frame_size = ETHER_MAX_LEN; break; default: max_frame_size = MAX_JUMBO_FRAME_SIZE; } if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - ETHER_CRC_LEN) { EM_CORE_UNLOCK(adapter); error = EINVAL; break; } ifp->if_mtu = ifr->ifr_mtu; adapter->hw.mac.max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; em_init_locked(adapter); EM_CORE_UNLOCK(adapter); break; } case SIOCSIFFLAGS: IOCTL_DEBUGOUT("ioctl rcv'd:\ SIOCSIFFLAGS (Set Interface Flags)"); EM_CORE_LOCK(adapter); if (ifp->if_flags & IFF_UP) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { if ((ifp->if_flags ^ adapter->if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) { em_disable_promisc(adapter); em_set_promisc(adapter); } } else em_init_locked(adapter); } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) em_stop(adapter); adapter->if_flags = ifp->if_flags; EM_CORE_UNLOCK(adapter); break; case SIOCADDMULTI: case SIOCDELMULTI: IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI"); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { EM_CORE_LOCK(adapter); em_disable_intr(adapter); em_set_multi(adapter); #ifdef DEVICE_POLLING if (!(ifp->if_capenable & IFCAP_POLLING)) #endif em_enable_intr(adapter); EM_CORE_UNLOCK(adapter); } break; case SIOCSIFMEDIA: /* Check SOL/IDER usage */ EM_CORE_LOCK(adapter); if (e1000_check_reset_block(&adapter->hw)) { EM_CORE_UNLOCK(adapter); device_printf(adapter->dev, "Media change is" " blocked due to SOL/IDER session.\n"); break; } EM_CORE_UNLOCK(adapter); /* falls thru */ case SIOCGIFMEDIA: IOCTL_DEBUGOUT("ioctl rcv'd: \ SIOCxIFMEDIA (Get/Set Interface Media)"); error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); break; case SIOCSIFCAP: { int mask, reinit; IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)"); reinit = 0; mask = ifr->ifr_reqcap ^ ifp->if_capenable; #ifdef DEVICE_POLLING if (mask & IFCAP_POLLING) { if (ifr->ifr_reqcap & IFCAP_POLLING) { error = ether_poll_register(em_poll, ifp); if (error) return (error); EM_CORE_LOCK(adapter); em_disable_intr(adapter); ifp->if_capenable |= IFCAP_POLLING; EM_CORE_UNLOCK(adapter); } else { error = ether_poll_deregister(ifp); /* Enable interrupt even in error case */ EM_CORE_LOCK(adapter); em_enable_intr(adapter); ifp->if_capenable &= ~IFCAP_POLLING; EM_CORE_UNLOCK(adapter); } } #endif if (mask & IFCAP_HWCSUM) { ifp->if_capenable ^= IFCAP_HWCSUM; reinit = 1; } if (mask & IFCAP_TSO4) { ifp->if_capenable ^= IFCAP_TSO4; reinit = 1; } if (mask & IFCAP_VLAN_HWTAGGING) { ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; reinit = 1; } if (mask & IFCAP_VLAN_HWFILTER) { ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; reinit = 1; } if (mask & IFCAP_VLAN_HWTSO) { ifp->if_capenable ^= IFCAP_VLAN_HWTSO; reinit = 1; } if ((mask & IFCAP_WOL) && (ifp->if_capabilities & IFCAP_WOL) != 0) { if (mask & IFCAP_WOL_MCAST) ifp->if_capenable ^= IFCAP_WOL_MCAST; if (mask & IFCAP_WOL_MAGIC) ifp->if_capenable ^= IFCAP_WOL_MAGIC; } if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) em_init(adapter); VLAN_CAPABILITIES(ifp); break; } default: error = ether_ioctl(ifp, command, data); break; } return (error); } /********************************************************************* * Init entry point * * This routine is used in two ways. It is used by the stack as * init entry point in network interface structure. It is also used * by the driver as a hw/sw initialization routine to get to a * consistent state. * * return 0 on success, positive on failure **********************************************************************/ static void em_init_locked(struct adapter *adapter) { struct ifnet *ifp = adapter->ifp; device_t dev = adapter->dev; INIT_DEBUGOUT("em_init: begin"); EM_CORE_LOCK_ASSERT(adapter); em_disable_intr(adapter); callout_stop(&adapter->timer); /* Get the latest mac address, User can use a LAA */ bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr, ETHER_ADDR_LEN); /* Put the address into the Receive Address Array */ e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); /* * With the 82571 adapter, RAR[0] may be overwritten * when the other port is reset, we make a duplicate * in RAR[14] for that eventuality, this assures * the interface continues to function. */ if (adapter->hw.mac.type == e1000_82571) { e1000_set_laa_state_82571(&adapter->hw, TRUE); e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, E1000_RAR_ENTRIES - 1); } /* Initialize the hardware */ em_reset(adapter); em_update_link_status(adapter); /* Setup VLAN support, basic and offload if available */ E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN); /* Set hardware offload abilities */ ifp->if_hwassist = 0; if (ifp->if_capenable & IFCAP_TXCSUM) ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); /* ** There have proven to be problems with TSO when not ** at full gigabit speed, so disable the assist automatically ** when at lower speeds. -jfv */ if (ifp->if_capenable & IFCAP_TSO4) { if (adapter->link_speed == SPEED_1000) ifp->if_hwassist |= CSUM_TSO; } /* Configure for OS presence */ em_init_manageability(adapter); /* Prepare transmit descriptors and buffers */ em_setup_transmit_structures(adapter); em_initialize_transmit_unit(adapter); /* Setup Multicast table */ em_set_multi(adapter); /* ** Figure out the desired mbuf ** pool for doing jumbos */ if (adapter->hw.mac.max_frame_size <= 2048) adapter->rx_mbuf_sz = MCLBYTES; else if (adapter->hw.mac.max_frame_size <= 4096) adapter->rx_mbuf_sz = MJUMPAGESIZE; else adapter->rx_mbuf_sz = MJUM9BYTES; /* Prepare receive descriptors and buffers */ if (em_setup_receive_structures(adapter)) { device_printf(dev, "Could not setup receive structures\n"); em_stop(adapter); return; } em_initialize_receive_unit(adapter); /* Use real VLAN Filter support? */ if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) /* Use real VLAN Filter support */ em_setup_vlan_hw_support(adapter); else { u32 ctrl; ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL); ctrl |= E1000_CTRL_VME; E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl); } } /* Don't lose promiscuous settings */ em_set_promisc(adapter); /* Set the interface as ACTIVE */ ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; callout_reset(&adapter->timer, hz, em_local_timer, adapter); e1000_clear_hw_cntrs_base_generic(&adapter->hw); /* MSI/X configuration for 82574 */ if (adapter->hw.mac.type == e1000_82574) { int tmp; tmp = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); tmp |= E1000_CTRL_EXT_PBA_CLR; E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, tmp); /* Set the IVAR - interrupt vector routing. */ E1000_WRITE_REG(&adapter->hw, E1000_IVAR, adapter->ivars); } #ifdef DEVICE_POLLING /* * Only enable interrupts if we are not polling, make sure * they are off otherwise. */ if (ifp->if_capenable & IFCAP_POLLING) em_disable_intr(adapter); else #endif /* DEVICE_POLLING */ em_enable_intr(adapter); /* AMT based hardware can now take control from firmware */ if (adapter->has_manage && adapter->has_amt) em_get_hw_control(adapter); } static void em_init(void *arg) { struct adapter *adapter = arg; EM_CORE_LOCK(adapter); em_init_locked(adapter); EM_CORE_UNLOCK(adapter); } #ifdef DEVICE_POLLING /********************************************************************* * * Legacy polling routine: note this only works with single queue * *********************************************************************/ static int em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct adapter *adapter = ifp->if_softc; struct tx_ring *txr = adapter->tx_rings; struct rx_ring *rxr = adapter->rx_rings; u32 reg_icr; int rx_done; EM_CORE_LOCK(adapter); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { EM_CORE_UNLOCK(adapter); return (0); } if (cmd == POLL_AND_CHECK_STATUS) { reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { callout_stop(&adapter->timer); adapter->hw.mac.get_link_status = 1; em_update_link_status(adapter); callout_reset(&adapter->timer, hz, em_local_timer, adapter); } } EM_CORE_UNLOCK(adapter); em_rxeof(rxr, count, &rx_done); EM_TX_LOCK(txr); em_txeof(txr); #ifdef EM_MULTIQUEUE if (!drbr_empty(ifp, txr->br)) em_mq_start_locked(ifp, txr); #else if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) em_start_locked(ifp, txr); #endif EM_TX_UNLOCK(txr); return (rx_done); } #endif /* DEVICE_POLLING */ /********************************************************************* * * Fast Legacy/MSI Combined Interrupt Service routine * *********************************************************************/ static int em_irq_fast(void *arg) { struct adapter *adapter = arg; struct ifnet *ifp; u32 reg_icr; ifp = adapter->ifp; reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); /* Hot eject? */ if (reg_icr == 0xffffffff) return FILTER_STRAY; /* Definitely not our interrupt. */ if (reg_icr == 0x0) return FILTER_STRAY; /* * Starting with the 82571 chip, bit 31 should be used to * determine whether the interrupt belongs to us. */ if (adapter->hw.mac.type >= e1000_82571 && (reg_icr & E1000_ICR_INT_ASSERTED) == 0) return FILTER_STRAY; em_disable_intr(adapter); taskqueue_enqueue(adapter->tq, &adapter->que_task); /* Link status change */ if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { adapter->hw.mac.get_link_status = 1; taskqueue_enqueue(taskqueue_fast, &adapter->link_task); } if (reg_icr & E1000_ICR_RXO) adapter->rx_overruns++; return FILTER_HANDLED; } /* Combined RX/TX handler, used by Legacy and MSI */ static void em_handle_que(void *context, int pending) { struct adapter *adapter = context; struct ifnet *ifp = adapter->ifp; struct tx_ring *txr = adapter->tx_rings; struct rx_ring *rxr = adapter->rx_rings; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { bool more = em_rxeof(rxr, adapter->rx_process_limit, NULL); EM_TX_LOCK(txr); em_txeof(txr); #ifdef EM_MULTIQUEUE if (!drbr_empty(ifp, txr->br)) em_mq_start_locked(ifp, txr); #else if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) em_start_locked(ifp, txr); #endif EM_TX_UNLOCK(txr); if (more) { taskqueue_enqueue(adapter->tq, &adapter->que_task); return; } } em_enable_intr(adapter); return; } /********************************************************************* * * MSIX Interrupt Service Routines * **********************************************************************/ static void em_msix_tx(void *arg) { struct tx_ring *txr = arg; struct adapter *adapter = txr->adapter; struct ifnet *ifp = adapter->ifp; ++txr->tx_irq; EM_TX_LOCK(txr); em_txeof(txr); #ifdef EM_MULTIQUEUE if (!drbr_empty(ifp, txr->br)) em_mq_start_locked(ifp, txr); #else if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) em_start_locked(ifp, txr); #endif /* Reenable this interrupt */ E1000_WRITE_REG(&adapter->hw, E1000_IMS, txr->ims); EM_TX_UNLOCK(txr); return; } /********************************************************************* * * MSIX RX Interrupt Service routine * **********************************************************************/ static void em_msix_rx(void *arg) { struct rx_ring *rxr = arg; struct adapter *adapter = rxr->adapter; bool more; ++rxr->rx_irq; if (!(adapter->ifp->if_drv_flags & IFF_DRV_RUNNING)) return; more = em_rxeof(rxr, adapter->rx_process_limit, NULL); if (more) taskqueue_enqueue(rxr->tq, &rxr->rx_task); else { /* Reenable this interrupt */ E1000_WRITE_REG(&adapter->hw, E1000_IMS, rxr->ims); } return; } /********************************************************************* * * MSIX Link Fast Interrupt Service routine * **********************************************************************/ static void em_msix_link(void *arg) { struct adapter *adapter = arg; u32 reg_icr; ++adapter->link_irq; reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); if (reg_icr & E1000_ICR_RXO) adapter->rx_overruns++; if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { adapter->hw.mac.get_link_status = 1; em_handle_link(adapter, 0); } else E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_LINK | E1000_IMS_LSC); /* ** Because we must read the ICR for this interrupt ** it may clear other causes using autoclear, for ** this reason we simply create a soft interrupt ** for all these vectors. */ if (reg_icr) { E1000_WRITE_REG(&adapter->hw, E1000_ICS, adapter->ims); } return; } static void em_handle_rx(void *context, int pending) { struct rx_ring *rxr = context; struct adapter *adapter = rxr->adapter; bool more; more = em_rxeof(rxr, adapter->rx_process_limit, NULL); if (more) taskqueue_enqueue(rxr->tq, &rxr->rx_task); else { /* Reenable this interrupt */ E1000_WRITE_REG(&adapter->hw, E1000_IMS, rxr->ims); } } static void em_handle_tx(void *context, int pending) { struct tx_ring *txr = context; struct adapter *adapter = txr->adapter; struct ifnet *ifp = adapter->ifp; EM_TX_LOCK(txr); em_txeof(txr); #ifdef EM_MULTIQUEUE if (!drbr_empty(ifp, txr->br)) em_mq_start_locked(ifp, txr); #else if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) em_start_locked(ifp, txr); #endif E1000_WRITE_REG(&adapter->hw, E1000_IMS, txr->ims); EM_TX_UNLOCK(txr); } static void em_handle_link(void *context, int pending) { struct adapter *adapter = context; struct tx_ring *txr = adapter->tx_rings; struct ifnet *ifp = adapter->ifp; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) return; EM_CORE_LOCK(adapter); callout_stop(&adapter->timer); em_update_link_status(adapter); callout_reset(&adapter->timer, hz, em_local_timer, adapter); E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_LINK | E1000_IMS_LSC); if (adapter->link_active) { for (int i = 0; i < adapter->num_queues; i++, txr++) { EM_TX_LOCK(txr); #ifdef EM_MULTIQUEUE if (!drbr_empty(ifp, txr->br)) em_mq_start_locked(ifp, txr); #else if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) em_start_locked(ifp, txr); #endif EM_TX_UNLOCK(txr); } } EM_CORE_UNLOCK(adapter); } /********************************************************************* * * Media Ioctl callback * * This routine is called whenever the user queries the status of * the interface using ifconfig. * **********************************************************************/ static void em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) { struct adapter *adapter = ifp->if_softc; u_char fiber_type = IFM_1000_SX; INIT_DEBUGOUT("em_media_status: begin"); EM_CORE_LOCK(adapter); em_update_link_status(adapter); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (!adapter->link_active) { EM_CORE_UNLOCK(adapter); return; } ifmr->ifm_status |= IFM_ACTIVE; if ((adapter->hw.phy.media_type == e1000_media_type_fiber) || (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) { ifmr->ifm_active |= fiber_type | IFM_FDX; } else { switch (adapter->link_speed) { case 10: ifmr->ifm_active |= IFM_10_T; break; case 100: ifmr->ifm_active |= IFM_100_TX; break; case 1000: ifmr->ifm_active |= IFM_1000_T; break; } if (adapter->link_duplex == FULL_DUPLEX) ifmr->ifm_active |= IFM_FDX; else ifmr->ifm_active |= IFM_HDX; } EM_CORE_UNLOCK(adapter); } /********************************************************************* * * Media Ioctl callback * * This routine is called when the user changes speed/duplex using * media/mediopt option with ifconfig. * **********************************************************************/ static int em_media_change(struct ifnet *ifp) { struct adapter *adapter = ifp->if_softc; struct ifmedia *ifm = &adapter->media; INIT_DEBUGOUT("em_media_change: begin"); if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); EM_CORE_LOCK(adapter); switch (IFM_SUBTYPE(ifm->ifm_media)) { case IFM_AUTO: adapter->hw.mac.autoneg = DO_AUTO_NEG; adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; break; case IFM_1000_LX: case IFM_1000_SX: case IFM_1000_T: adapter->hw.mac.autoneg = DO_AUTO_NEG; adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; break; case IFM_100_TX: adapter->hw.mac.autoneg = FALSE; adapter->hw.phy.autoneg_advertised = 0; if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; else adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; break; case IFM_10_T: adapter->hw.mac.autoneg = FALSE; adapter->hw.phy.autoneg_advertised = 0; if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; else adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; break; default: device_printf(adapter->dev, "Unsupported media type\n"); } em_init_locked(adapter); EM_CORE_UNLOCK(adapter); return (0); } /********************************************************************* * * This routine maps the mbufs to tx descriptors. * * return 0 on success, positive on failure **********************************************************************/ static int em_xmit(struct tx_ring *txr, struct mbuf **m_headp) { struct adapter *adapter = txr->adapter; bus_dma_segment_t segs[EM_MAX_SCATTER]; bus_dmamap_t map; struct em_txbuffer *tx_buffer, *tx_buffer_mapped; struct e1000_tx_desc *ctxd = NULL; struct mbuf *m_head; struct ether_header *eh; struct ip *ip = NULL; struct tcphdr *tp = NULL; u32 txd_upper = 0, txd_lower = 0; int ip_off, poff; int nsegs, i, j, first, last = 0; int error; bool do_tso, tso_desc, remap = TRUE; m_head = *m_headp; do_tso = (m_head->m_pkthdr.csum_flags & CSUM_TSO); tso_desc = FALSE; ip_off = poff = 0; /* * Intel recommends entire IP/TCP header length reside in a single * buffer. If multiple descriptors are used to describe the IP and * TCP header, each descriptor should describe one or more * complete headers; descriptors referencing only parts of headers * are not supported. If all layer headers are not coalesced into * a single buffer, each buffer should not cross a 4KB boundary, * or be larger than the maximum read request size. * Controller also requires modifing IP/TCP header to make TSO work * so we firstly get a writable mbuf chain then coalesce ethernet/ * IP/TCP header into a single buffer to meet the requirement of * controller. This also simplifies IP/TCP/UDP checksum offloading * which also has similiar restrictions. */ if (do_tso || m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) { if (do_tso || (m_head->m_next != NULL && m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)) { if (M_WRITABLE(*m_headp) == 0) { m_head = m_dup(*m_headp, M_NOWAIT); m_freem(*m_headp); if (m_head == NULL) { *m_headp = NULL; return (ENOBUFS); } *m_headp = m_head; } } /* * XXX * Assume IPv4, we don't have TSO/checksum offload support * for IPv6 yet. */ ip_off = sizeof(struct ether_header); if (m_head->m_len < ip_off) { m_head = m_pullup(m_head, ip_off); if (m_head == NULL) { *m_headp = NULL; return (ENOBUFS); } } eh = mtod(m_head, struct ether_header *); if (eh->ether_type == htons(ETHERTYPE_VLAN)) { ip_off = sizeof(struct ether_vlan_header); if (m_head->m_len < ip_off) { m_head = m_pullup(m_head, ip_off); if (m_head == NULL) { *m_headp = NULL; return (ENOBUFS); } } } if (m_head->m_len < ip_off + sizeof(struct ip)) { m_head = m_pullup(m_head, ip_off + sizeof(struct ip)); if (m_head == NULL) { *m_headp = NULL; return (ENOBUFS); } } ip = (struct ip *)(mtod(m_head, char *) + ip_off); poff = ip_off + (ip->ip_hl << 2); if (do_tso || (m_head->m_pkthdr.csum_flags & CSUM_TCP)) { if (m_head->m_len < poff + sizeof(struct tcphdr)) { m_head = m_pullup(m_head, poff + sizeof(struct tcphdr)); if (m_head == NULL) { *m_headp = NULL; return (ENOBUFS); } } tp = (struct tcphdr *)(mtod(m_head, char *) + poff); /* * TSO workaround: * pull 4 more bytes of data into it. */ if (m_head->m_len < poff + (tp->th_off << 2)) { m_head = m_pullup(m_head, poff + (tp->th_off << 2) + TSO_WORKAROUND); if (m_head == NULL) { *m_headp = NULL; return (ENOBUFS); } } ip = (struct ip *)(mtod(m_head, char *) + ip_off); tp = (struct tcphdr *)(mtod(m_head, char *) + poff); if (do_tso) { ip->ip_len = htons(m_head->m_pkthdr.tso_segsz + (ip->ip_hl << 2) + (tp->th_off << 2)); ip->ip_sum = 0; /* * The pseudo TCP checksum does not include TCP * payload length so driver should recompute * the checksum here what hardware expect to * see. This is adherence of Microsoft's Large * Send specification. */ tp->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htons(IPPROTO_TCP)); } } else if (m_head->m_pkthdr.csum_flags & CSUM_UDP) { if (m_head->m_len < poff + sizeof(struct udphdr)) { m_head = m_pullup(m_head, poff + sizeof(struct udphdr)); if (m_head == NULL) { *m_headp = NULL; return (ENOBUFS); } } ip = (struct ip *)(mtod(m_head, char *) + ip_off); } *m_headp = m_head; } /* * Map the packet for DMA * * Capture the first descriptor index, * this descriptor will have the index * of the EOP which is the only one that * now gets a DONE bit writeback. */ first = txr->next_avail_desc; tx_buffer = &txr->tx_buffers[first]; tx_buffer_mapped = tx_buffer; map = tx_buffer->map; retry: error = bus_dmamap_load_mbuf_sg(txr->txtag, map, *m_headp, segs, &nsegs, BUS_DMA_NOWAIT); /* * There are two types of errors we can (try) to handle: * - EFBIG means the mbuf chain was too long and bus_dma ran * out of segments. Defragment the mbuf chain and try again. * - ENOMEM means bus_dma could not obtain enough bounce buffers * at this point in time. Defer sending and try again later. * All other errors, in particular EINVAL, are fatal and prevent the * mbuf chain from ever going through. Drop it and report error. */ if (error == EFBIG && remap) { struct mbuf *m; m = m_collapse(*m_headp, M_NOWAIT, EM_MAX_SCATTER); if (m == NULL) { adapter->mbuf_defrag_failed++; m_freem(*m_headp); *m_headp = NULL; return (ENOBUFS); } *m_headp = m; /* Try it again, but only once */ remap = FALSE; goto retry; } else if (error != 0) { adapter->no_tx_dma_setup++; m_freem(*m_headp); *m_headp = NULL; return (error); } /* * TSO Hardware workaround, if this packet is not * TSO, and is only a single descriptor long, and * it follows a TSO burst, then we need to add a * sentinel descriptor to prevent premature writeback. */ if ((!do_tso) && (txr->tx_tso == TRUE)) { if (nsegs == 1) tso_desc = TRUE; txr->tx_tso = FALSE; } if (nsegs > (txr->tx_avail - EM_MAX_SCATTER)) { txr->no_desc_avail++; bus_dmamap_unload(txr->txtag, map); return (ENOBUFS); } m_head = *m_headp; /* Do hardware assists */ if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { em_tso_setup(txr, m_head, ip_off, ip, tp, &txd_upper, &txd_lower); /* we need to make a final sentinel transmit desc */ tso_desc = TRUE; } else if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) em_transmit_checksum_setup(txr, m_head, ip_off, ip, &txd_upper, &txd_lower); if (m_head->m_flags & M_VLANTAG) { /* Set the vlan id. */ txd_upper |= (htole16(m_head->m_pkthdr.ether_vtag) << 16); /* Tell hardware to add tag */ txd_lower |= htole32(E1000_TXD_CMD_VLE); } i = txr->next_avail_desc; /* Set up our transmit descriptors */ for (j = 0; j < nsegs; j++) { bus_size_t seg_len; bus_addr_t seg_addr; tx_buffer = &txr->tx_buffers[i]; ctxd = &txr->tx_base[i]; seg_addr = segs[j].ds_addr; seg_len = segs[j].ds_len; /* ** TSO Workaround: ** If this is the last descriptor, we want to ** split it so we have a small final sentinel */ if (tso_desc && (j == (nsegs - 1)) && (seg_len > 8)) { seg_len -= TSO_WORKAROUND; ctxd->buffer_addr = htole64(seg_addr); ctxd->lower.data = htole32( adapter->txd_cmd | txd_lower | seg_len); ctxd->upper.data = htole32(txd_upper); if (++i == adapter->num_tx_desc) i = 0; /* Now make the sentinel */ txr->tx_avail--; ctxd = &txr->tx_base[i]; tx_buffer = &txr->tx_buffers[i]; ctxd->buffer_addr = htole64(seg_addr + seg_len); ctxd->lower.data = htole32( adapter->txd_cmd | txd_lower | TSO_WORKAROUND); ctxd->upper.data = htole32(txd_upper); last = i; if (++i == adapter->num_tx_desc) i = 0; } else { ctxd->buffer_addr = htole64(seg_addr); ctxd->lower.data = htole32( adapter->txd_cmd | txd_lower | seg_len); ctxd->upper.data = htole32(txd_upper); last = i; if (++i == adapter->num_tx_desc) i = 0; } tx_buffer->m_head = NULL; tx_buffer->next_eop = -1; } txr->next_avail_desc = i; txr->tx_avail -= nsegs; tx_buffer->m_head = m_head; /* ** Here we swap the map so the last descriptor, ** which gets the completion interrupt has the ** real map, and the first descriptor gets the ** unused map from this descriptor. */ tx_buffer_mapped->map = tx_buffer->map; tx_buffer->map = map; bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE); /* * Last Descriptor of Packet * needs End Of Packet (EOP) * and Report Status (RS) */ ctxd->lower.data |= htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS); /* * Keep track in the first buffer which * descriptor will be written back */ tx_buffer = &txr->tx_buffers[first]; tx_buffer->next_eop = last; /* * Advance the Transmit Descriptor Tail (TDT), this tells the E1000 * that this frame is available to transmit. */ bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), i); return (0); } static void em_set_promisc(struct adapter *adapter) { struct ifnet *ifp = adapter->ifp; u32 reg_rctl; reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); if (ifp->if_flags & IFF_PROMISC) { reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); /* Turn this on if you want to see bad packets */ if (em_debug_sbp) reg_rctl |= E1000_RCTL_SBP; E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); } else if (ifp->if_flags & IFF_ALLMULTI) { reg_rctl |= E1000_RCTL_MPE; reg_rctl &= ~E1000_RCTL_UPE; E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); } } static void em_disable_promisc(struct adapter *adapter) { struct ifnet *ifp = adapter->ifp; u32 reg_rctl; int mcnt = 0; reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); reg_rctl &= (~E1000_RCTL_UPE); if (ifp->if_flags & IFF_ALLMULTI) mcnt = MAX_NUM_MULTICAST_ADDRESSES; else { struct ifmultiaddr *ifma; #if __FreeBSD_version < 800000 IF_ADDR_LOCK(ifp); #else if_maddr_rlock(ifp); #endif TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) break; mcnt++; } #if __FreeBSD_version < 800000 IF_ADDR_UNLOCK(ifp); #else if_maddr_runlock(ifp); #endif } /* Don't disable if in MAX groups */ if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) reg_rctl &= (~E1000_RCTL_MPE); reg_rctl &= (~E1000_RCTL_SBP); E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); } /********************************************************************* * Multicast Update * * This routine is called whenever multicast address list is updated. * **********************************************************************/ static void em_set_multi(struct adapter *adapter) { struct ifnet *ifp = adapter->ifp; struct ifmultiaddr *ifma; u32 reg_rctl = 0; u8 *mta; /* Multicast array memory */ int mcnt = 0; IOCTL_DEBUGOUT("em_set_multi: begin"); mta = adapter->mta; bzero(mta, sizeof(u8) * ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES); if (adapter->hw.mac.type == e1000_82542 && adapter->hw.revision_id == E1000_REVISION_2) { reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) e1000_pci_clear_mwi(&adapter->hw); reg_rctl |= E1000_RCTL_RST; E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); msec_delay(5); } #if __FreeBSD_version < 800000 IF_ADDR_LOCK(ifp); #else if_maddr_rlock(ifp); #endif TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) break; bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN); mcnt++; } #if __FreeBSD_version < 800000 IF_ADDR_UNLOCK(ifp); #else if_maddr_runlock(ifp); #endif if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) { reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); reg_rctl |= E1000_RCTL_MPE; E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); } else e1000_update_mc_addr_list(&adapter->hw, mta, mcnt); if (adapter->hw.mac.type == e1000_82542 && adapter->hw.revision_id == E1000_REVISION_2) { reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); reg_rctl &= ~E1000_RCTL_RST; E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); msec_delay(5); if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) e1000_pci_set_mwi(&adapter->hw); } } /********************************************************************* * Timer routine * * This routine checks for link status and updates statistics. * **********************************************************************/ static void em_local_timer(void *arg) { struct adapter *adapter = arg; struct ifnet *ifp = adapter->ifp; struct tx_ring *txr = adapter->tx_rings; struct rx_ring *rxr = adapter->rx_rings; u32 trigger = 0; EM_CORE_LOCK_ASSERT(adapter); em_update_link_status(adapter); em_update_stats_counters(adapter); /* Reset LAA into RAR[0] on 82571 */ if ((adapter->hw.mac.type == e1000_82571) && e1000_get_laa_state_82571(&adapter->hw)) e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); /* Mask to use in the irq trigger */ if (adapter->msix_mem) { for (int i = 0; i < adapter->num_queues; i++, rxr++) trigger |= rxr->ims; rxr = adapter->rx_rings; } else trigger = E1000_ICS_RXDMT0; /* ** Check on the state of the TX queue(s), this ** can be done without the lock because its RO ** and the HUNG state will be static if set. */ for (int i = 0; i < adapter->num_queues; i++, txr++) { if (txr->busy == EM_TX_HUNG) goto hung; if (txr->busy >= EM_TX_MAXTRIES) txr->busy = EM_TX_HUNG; /* Schedule a TX tasklet if needed */ if (txr->tx_avail <= EM_MAX_SCATTER) taskqueue_enqueue(txr->tq, &txr->tx_task); } callout_reset(&adapter->timer, hz, em_local_timer, adapter); #ifndef DEVICE_POLLING /* Trigger an RX interrupt to guarantee mbuf refresh */ E1000_WRITE_REG(&adapter->hw, E1000_ICS, trigger); #endif return; hung: /* Looks like we're hung */ device_printf(adapter->dev, "Watchdog timeout Queue[%d]-- resetting\n", txr->me); em_print_debug_info(adapter); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; adapter->watchdog_events++; em_init_locked(adapter); } static void em_update_link_status(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct ifnet *ifp = adapter->ifp; device_t dev = adapter->dev; struct tx_ring *txr = adapter->tx_rings; u32 link_check = 0; /* Get the cached link value or read phy for real */ switch (hw->phy.media_type) { case e1000_media_type_copper: if (hw->mac.get_link_status) { + if (hw->mac.type == e1000_pch_spt) + msec_delay(50); /* Do the work to read phy */ e1000_check_for_link(hw); link_check = !hw->mac.get_link_status; if (link_check) /* ESB2 fix */ e1000_cfg_on_link_up(hw); } else link_check = TRUE; break; case e1000_media_type_fiber: e1000_check_for_link(hw); link_check = (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU); break; case e1000_media_type_internal_serdes: e1000_check_for_link(hw); link_check = adapter->hw.mac.serdes_has_link; break; default: case e1000_media_type_unknown: break; } /* Now check for a transition */ if (link_check && (adapter->link_active == 0)) { e1000_get_speed_and_duplex(hw, &adapter->link_speed, &adapter->link_duplex); /* Check if we must disable SPEED_MODE bit on PCI-E */ if ((adapter->link_speed != SPEED_1000) && ((hw->mac.type == e1000_82571) || (hw->mac.type == e1000_82572))) { int tarc0; tarc0 = E1000_READ_REG(hw, E1000_TARC(0)); tarc0 &= ~TARC_SPEED_MODE_BIT; E1000_WRITE_REG(hw, E1000_TARC(0), tarc0); } if (bootverbose) device_printf(dev, "Link is up %d Mbps %s\n", adapter->link_speed, ((adapter->link_duplex == FULL_DUPLEX) ? "Full Duplex" : "Half Duplex")); adapter->link_active = 1; adapter->smartspeed = 0; ifp->if_baudrate = adapter->link_speed * 1000000; if_link_state_change(ifp, LINK_STATE_UP); } else if (!link_check && (adapter->link_active == 1)) { ifp->if_baudrate = adapter->link_speed = 0; adapter->link_duplex = 0; if (bootverbose) device_printf(dev, "Link is Down\n"); adapter->link_active = 0; /* Link down, disable hang detection */ for (int i = 0; i < adapter->num_queues; i++, txr++) txr->busy = EM_TX_IDLE; if_link_state_change(ifp, LINK_STATE_DOWN); } } /********************************************************************* * * This routine disables all traffic on the adapter by issuing a * global reset on the MAC and deallocates TX/RX buffers. * * This routine should always be called with BOTH the CORE * and TX locks. **********************************************************************/ static void em_stop(void *arg) { struct adapter *adapter = arg; struct ifnet *ifp = adapter->ifp; struct tx_ring *txr = adapter->tx_rings; EM_CORE_LOCK_ASSERT(adapter); INIT_DEBUGOUT("em_stop: begin"); em_disable_intr(adapter); callout_stop(&adapter->timer); /* Tell the stack that the interface is no longer active */ ifp->if_drv_flags &= ~IFF_DRV_RUNNING; ifp->if_drv_flags |= IFF_DRV_OACTIVE; /* Disarm Hang Detection. */ for (int i = 0; i < adapter->num_queues; i++, txr++) { EM_TX_LOCK(txr); txr->busy = EM_TX_IDLE; EM_TX_UNLOCK(txr); } + /* I219 needs some special flushing to avoid hangs */ + if (adapter->hw.mac.type == e1000_pch_spt) + em_flush_desc_rings(adapter); + e1000_reset_hw(&adapter->hw); E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0); e1000_led_off(&adapter->hw); e1000_cleanup_led(&adapter->hw); } /********************************************************************* * * Determine hardware revision. * **********************************************************************/ static void em_identify_hardware(struct adapter *adapter) { device_t dev = adapter->dev; /* Make sure our PCI config space has the necessary stuff set */ pci_enable_busmaster(dev); adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); /* Save off the information about this board */ adapter->hw.vendor_id = pci_get_vendor(dev); adapter->hw.device_id = pci_get_device(dev); adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2); adapter->hw.subsystem_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); /* Do Shared Code Init and Setup */ if (e1000_set_mac_type(&adapter->hw)) { device_printf(dev, "Setup init failure\n"); return; } } static int em_allocate_pci_resources(struct adapter *adapter) { device_t dev = adapter->dev; int rid; rid = PCIR_BAR(0); adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (adapter->memory == NULL) { device_printf(dev, "Unable to allocate bus resource: memory\n"); return (ENXIO); } adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->memory); adapter->osdep.mem_bus_space_handle = rman_get_bushandle(adapter->memory); adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle; adapter->hw.back = &adapter->osdep; return (0); } /********************************************************************* * * Setup the Legacy or MSI Interrupt handler * **********************************************************************/ int em_allocate_legacy(struct adapter *adapter) { device_t dev = adapter->dev; struct tx_ring *txr = adapter->tx_rings; int error, rid = 0; /* Manually turn off all interrupts */ E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff); if (adapter->msix == 1) /* using MSI */ rid = 1; /* We allocate a single interrupt resource */ adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (adapter->res == NULL) { device_printf(dev, "Unable to allocate bus resource: " "interrupt\n"); return (ENXIO); } /* * Allocate a fast interrupt and the associated * deferred processing contexts. */ TASK_INIT(&adapter->que_task, 0, em_handle_que, adapter); adapter->tq = taskqueue_create_fast("em_taskq", M_NOWAIT, taskqueue_thread_enqueue, &adapter->tq); taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s que", device_get_nameunit(adapter->dev)); /* Use a TX only tasklet for local timer */ TASK_INIT(&txr->tx_task, 0, em_handle_tx, txr); txr->tq = taskqueue_create_fast("em_txq", M_NOWAIT, taskqueue_thread_enqueue, &txr->tq); taskqueue_start_threads(&txr->tq, 1, PI_NET, "%s txq", device_get_nameunit(adapter->dev)); TASK_INIT(&adapter->link_task, 0, em_handle_link, adapter); if ((error = bus_setup_intr(dev, adapter->res, INTR_TYPE_NET, em_irq_fast, NULL, adapter, &adapter->tag)) != 0) { device_printf(dev, "Failed to register fast interrupt " "handler: %d\n", error); taskqueue_free(adapter->tq); adapter->tq = NULL; return (error); } return (0); } /********************************************************************* * * Setup the MSIX Interrupt handlers * This is not really Multiqueue, rather * its just seperate interrupt vectors * for TX, RX, and Link. * **********************************************************************/ int em_allocate_msix(struct adapter *adapter) { device_t dev = adapter->dev; struct tx_ring *txr = adapter->tx_rings; struct rx_ring *rxr = adapter->rx_rings; int error, rid, vector = 0; int cpu_id = 0; /* Make sure all interrupts are disabled */ E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff); /* First set up ring resources */ for (int i = 0; i < adapter->num_queues; i++, rxr++, vector++) { /* RX ring */ rid = vector + 1; rxr->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (rxr->res == NULL) { device_printf(dev, "Unable to allocate bus resource: " "RX MSIX Interrupt %d\n", i); return (ENXIO); } if ((error = bus_setup_intr(dev, rxr->res, INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_rx, rxr, &rxr->tag)) != 0) { device_printf(dev, "Failed to register RX handler"); return (error); } #if __FreeBSD_version >= 800504 bus_describe_intr(dev, rxr->res, rxr->tag, "rx%d", i); #endif rxr->msix = vector; if (em_last_bind_cpu < 0) em_last_bind_cpu = CPU_FIRST(); cpu_id = em_last_bind_cpu; bus_bind_intr(dev, rxr->res, cpu_id); TASK_INIT(&rxr->rx_task, 0, em_handle_rx, rxr); rxr->tq = taskqueue_create_fast("em_rxq", M_NOWAIT, taskqueue_thread_enqueue, &rxr->tq); taskqueue_start_threads(&rxr->tq, 1, PI_NET, "%s rxq (cpuid %d)", device_get_nameunit(adapter->dev), cpu_id); /* ** Set the bit to enable interrupt ** in E1000_IMS -- bits 20 and 21 ** are for RX0 and RX1, note this has ** NOTHING to do with the MSIX vector */ rxr->ims = 1 << (20 + i); adapter->ims |= rxr->ims; adapter->ivars |= (8 | rxr->msix) << (i * 4); em_last_bind_cpu = CPU_NEXT(em_last_bind_cpu); } for (int i = 0; i < adapter->num_queues; i++, txr++, vector++) { /* TX ring */ rid = vector + 1; txr->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (txr->res == NULL) { device_printf(dev, "Unable to allocate bus resource: " "TX MSIX Interrupt %d\n", i); return (ENXIO); } if ((error = bus_setup_intr(dev, txr->res, INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_tx, txr, &txr->tag)) != 0) { device_printf(dev, "Failed to register TX handler"); return (error); } #if __FreeBSD_version >= 800504 bus_describe_intr(dev, txr->res, txr->tag, "tx%d", i); #endif txr->msix = vector; if (em_last_bind_cpu < 0) em_last_bind_cpu = CPU_FIRST(); cpu_id = em_last_bind_cpu; bus_bind_intr(dev, txr->res, cpu_id); TASK_INIT(&txr->tx_task, 0, em_handle_tx, txr); txr->tq = taskqueue_create_fast("em_txq", M_NOWAIT, taskqueue_thread_enqueue, &txr->tq); taskqueue_start_threads(&txr->tq, 1, PI_NET, "%s txq (cpuid %d)", device_get_nameunit(adapter->dev), cpu_id); /* ** Set the bit to enable interrupt ** in E1000_IMS -- bits 22 and 23 ** are for TX0 and TX1, note this has ** NOTHING to do with the MSIX vector */ txr->ims = 1 << (22 + i); adapter->ims |= txr->ims; adapter->ivars |= (8 | txr->msix) << (8 + (i * 4)); em_last_bind_cpu = CPU_NEXT(em_last_bind_cpu); } /* Link interrupt */ rid = vector + 1; adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (!adapter->res) { device_printf(dev,"Unable to allocate " "bus resource: Link interrupt [%d]\n", rid); return (ENXIO); } /* Set the link handler function */ error = bus_setup_intr(dev, adapter->res, INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_link, adapter, &adapter->tag); if (error) { adapter->res = NULL; device_printf(dev, "Failed to register LINK handler"); return (error); } #if __FreeBSD_version >= 800504 bus_describe_intr(dev, adapter->res, adapter->tag, "link"); #endif adapter->linkvec = vector; adapter->ivars |= (8 | vector) << 16; adapter->ivars |= 0x80000000; return (0); } static void em_free_pci_resources(struct adapter *adapter) { device_t dev = adapter->dev; struct tx_ring *txr; struct rx_ring *rxr; int rid; /* ** Release all the queue interrupt resources: */ for (int i = 0; i < adapter->num_queues; i++) { txr = &adapter->tx_rings[i]; /* an early abort? */ if (txr == NULL) break; rid = txr->msix +1; if (txr->tag != NULL) { bus_teardown_intr(dev, txr->res, txr->tag); txr->tag = NULL; } if (txr->res != NULL) bus_release_resource(dev, SYS_RES_IRQ, rid, txr->res); rxr = &adapter->rx_rings[i]; /* an early abort? */ if (rxr == NULL) break; rid = rxr->msix +1; if (rxr->tag != NULL) { bus_teardown_intr(dev, rxr->res, rxr->tag); rxr->tag = NULL; } if (rxr->res != NULL) bus_release_resource(dev, SYS_RES_IRQ, rid, rxr->res); } if (adapter->linkvec) /* we are doing MSIX */ rid = adapter->linkvec + 1; else (adapter->msix != 0) ? (rid = 1):(rid = 0); if (adapter->tag != NULL) { bus_teardown_intr(dev, adapter->res, adapter->tag); adapter->tag = NULL; } if (adapter->res != NULL) bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res); if (adapter->msix) pci_release_msi(dev); if (adapter->msix_mem != NULL) bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(EM_MSIX_BAR), adapter->msix_mem); if (adapter->memory != NULL) bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), adapter->memory); if (adapter->flash != NULL) bus_release_resource(dev, SYS_RES_MEMORY, EM_FLASH, adapter->flash); } /* * Setup MSI or MSI/X */ static int em_setup_msix(struct adapter *adapter) { device_t dev = adapter->dev; int val; /* Nearly always going to use one queue */ adapter->num_queues = 1; /* ** Try using MSI-X for Hartwell adapters */ if ((adapter->hw.mac.type == e1000_82574) && (em_enable_msix == TRUE)) { #ifdef EM_MULTIQUEUE adapter->num_queues = (em_num_queues == 1) ? 1 : 2; if (adapter->num_queues > 1) em_enable_vectors_82574(adapter); #endif /* Map the MSIX BAR */ int rid = PCIR_BAR(EM_MSIX_BAR); adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (adapter->msix_mem == NULL) { /* May not be enabled */ device_printf(adapter->dev, "Unable to map MSIX table \n"); goto msi; } val = pci_msix_count(dev); #ifdef EM_MULTIQUEUE /* We need 5 vectors in the multiqueue case */ if (adapter->num_queues > 1 ) { if (val >= 5) val = 5; else { adapter->num_queues = 1; device_printf(adapter->dev, "Insufficient MSIX vectors for >1 queue, " "using single queue...\n"); goto msix_one; } } else { msix_one: #endif if (val >= 3) val = 3; else { device_printf(adapter->dev, "Insufficient MSIX vectors, using MSI\n"); goto msi; } #ifdef EM_MULTIQUEUE } #endif if ((pci_alloc_msix(dev, &val) == 0)) { device_printf(adapter->dev, "Using MSIX interrupts " "with %d vectors\n", val); return (val); } /* ** If MSIX alloc failed or provided us with ** less than needed, free and fall through to MSI */ pci_release_msi(dev); } msi: if (adapter->msix_mem != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(EM_MSIX_BAR), adapter->msix_mem); adapter->msix_mem = NULL; } val = 1; if (pci_alloc_msi(dev, &val) == 0) { device_printf(adapter->dev, "Using an MSI interrupt\n"); return (val); } /* Should only happen due to manual configuration */ device_printf(adapter->dev,"No MSI/MSIX using a Legacy IRQ\n"); return (0); } +/* +** The 3 following flush routines are used as a workaround in the +** I219 client parts and only for them. +** +** em_flush_tx_ring - remove all descriptors from the tx_ring +** +** We want to clear all pending descriptors from the TX ring. +** zeroing happens when the HW reads the regs. We assign the ring itself as +** the data of the next descriptor. We don't care about the data we are about +** to reset the HW. +*/ +static void +em_flush_tx_ring(struct adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct tx_ring *txr = adapter->tx_rings; + struct e1000_tx_desc *txd; + u32 tctl, txd_lower = E1000_TXD_CMD_IFCS; + u16 size = 512; + + tctl = E1000_READ_REG(hw, E1000_TCTL); + E1000_WRITE_REG(hw, E1000_TCTL, tctl | E1000_TCTL_EN); + + txd = &txr->tx_base[txr->next_avail_desc++]; + if (txr->next_avail_desc == adapter->num_tx_desc) + txr->next_avail_desc = 0; + + /* Just use the ring as a dummy buffer addr */ + txd->buffer_addr = txr->txdma.dma_paddr; + txd->lower.data = htole32(txd_lower | size); + txd->upper.data = 0; + + /* flush descriptors to memory before notifying the HW */ + wmb(); + + E1000_WRITE_REG(hw, E1000_TDT(0), txr->next_avail_desc); + mb(); + usec_delay(250); +} + +/* +** em_flush_rx_ring - remove all descriptors from the rx_ring +** +** Mark all descriptors in the RX ring as consumed and disable the rx ring +*/ +static void +em_flush_rx_ring(struct adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl, rxdctl; + + rctl = E1000_READ_REG(hw, E1000_RCTL); + E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); + E1000_WRITE_FLUSH(hw); + usec_delay(150); + + rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0)); + /* zero the lower 14 bits (prefetch and host thresholds) */ + rxdctl &= 0xffffc000; + /* + * update thresholds: prefetch threshold to 31, host threshold to 1 + * and make sure the granularity is "descriptors" and not "cache lines" + */ + rxdctl |= (0x1F | (1 << 8) | E1000_RXDCTL_THRESH_UNIT_DESC); + E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl); + + /* momentarily enable the RX ring for the changes to take effect */ + E1000_WRITE_REG(hw, E1000_RCTL, rctl | E1000_RCTL_EN); + E1000_WRITE_FLUSH(hw); + usec_delay(150); + E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); +} + +/* +** em_flush_desc_rings - remove all descriptors from the descriptor rings +** +** In i219, the descriptor rings must be emptied before resetting the HW +** or before changing the device state to D3 during runtime (runtime PM). +** +** Failure to do this will cause the HW to enter a unit hang state which can +** only be released by PCI reset on the device +** +*/ +static void +em_flush_desc_rings(struct adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + device_t dev = adapter->dev; + u16 hang_state; + u32 fext_nvm11, tdlen; + + /* First, disable MULR fix in FEXTNVM11 */ + fext_nvm11 = E1000_READ_REG(hw, E1000_FEXTNVM11); + fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX; + E1000_WRITE_REG(hw, E1000_FEXTNVM11, fext_nvm11); + + /* do nothing if we're not in faulty state, or if the queue is empty */ + tdlen = E1000_READ_REG(hw, E1000_TDLEN(0)); + hang_state = pci_read_config(dev, PCICFG_DESC_RING_STATUS, 2); + if (!(hang_state & FLUSH_DESC_REQUIRED) || !tdlen) + return; + em_flush_tx_ring(adapter); + + /* recheck, maybe the fault is caused by the rx ring */ + hang_state = pci_read_config(dev, PCICFG_DESC_RING_STATUS, 2); + if (hang_state & FLUSH_DESC_REQUIRED) + em_flush_rx_ring(adapter); +} + + /********************************************************************* * * Initialize the hardware to a configuration * as specified by the adapter structure. * **********************************************************************/ static void em_reset(struct adapter *adapter) { device_t dev = adapter->dev; struct ifnet *ifp = adapter->ifp; struct e1000_hw *hw = &adapter->hw; u16 rx_buffer_size; u32 pba; INIT_DEBUGOUT("em_reset: begin"); /* Set up smart power down as default off on newer adapters. */ if (!em_smart_pwr_down && (hw->mac.type == e1000_82571 || hw->mac.type == e1000_82572)) { u16 phy_tmp = 0; /* Speed up time to link by disabling smart power down. */ e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_tmp); phy_tmp &= ~IGP02E1000_PM_SPD; e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_tmp); } /* * Packet Buffer Allocation (PBA) * Writing PBA sets the receive portion of the buffer * the remainder is used for the transmit buffer. */ switch (hw->mac.type) { /* Total Packet Buffer on these is 48K */ case e1000_82571: case e1000_82572: case e1000_80003es2lan: pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ break; case e1000_82573: /* 82573: Total Packet Buffer is 32K */ pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */ break; case e1000_82574: case e1000_82583: pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */ break; case e1000_ich8lan: pba = E1000_PBA_8K; break; case e1000_ich9lan: case e1000_ich10lan: /* Boost Receive side for jumbo frames */ if (adapter->hw.mac.max_frame_size > 4096) pba = E1000_PBA_14K; else pba = E1000_PBA_10K; break; case e1000_pchlan: case e1000_pch2lan: case e1000_pch_lpt: + case e1000_pch_spt: pba = E1000_PBA_26K; break; default: if (adapter->hw.mac.max_frame_size > 8192) pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ else pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */ } E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba); /* * These parameters control the automatic generation (Tx) and * response (Rx) to Ethernet PAUSE frames. * - High water mark should allow for at least two frames to be * received after sending an XOFF. * - Low water mark works best when it is very near the high water mark. * This allows the receiver to restart by sending XON when it has * drained a bit. Here we use an arbitary value of 1500 which will * restart after one full frame is pulled from the buffer. There * could be several smaller frames in the buffer and if so they will * not trigger the XON until their total number reduces the buffer * by 1500. * - The pause time is fairly large at 1000 x 512ns = 512 usec. */ rx_buffer_size = ((E1000_READ_REG(hw, E1000_PBA) & 0xffff) << 10 ); hw->fc.high_water = rx_buffer_size - roundup2(adapter->hw.mac.max_frame_size, 1024); hw->fc.low_water = hw->fc.high_water - 1500; if (adapter->fc) /* locally set flow control value? */ hw->fc.requested_mode = adapter->fc; else hw->fc.requested_mode = e1000_fc_full; if (hw->mac.type == e1000_80003es2lan) hw->fc.pause_time = 0xFFFF; else hw->fc.pause_time = EM_FC_PAUSE_TIME; hw->fc.send_xon = TRUE; /* Device specific overrides/settings */ switch (hw->mac.type) { case e1000_pchlan: /* Workaround: no TX flow ctrl for PCH */ hw->fc.requested_mode = e1000_fc_rx_pause; hw->fc.pause_time = 0xFFFF; /* override */ if (ifp->if_mtu > ETHERMTU) { hw->fc.high_water = 0x3500; hw->fc.low_water = 0x1500; } else { hw->fc.high_water = 0x5000; hw->fc.low_water = 0x3000; } hw->fc.refresh_time = 0x1000; break; case e1000_pch2lan: case e1000_pch_lpt: + case e1000_pch_spt: hw->fc.high_water = 0x5C20; hw->fc.low_water = 0x5048; hw->fc.pause_time = 0x0650; hw->fc.refresh_time = 0x0400; /* Jumbos need adjusted PBA */ if (ifp->if_mtu > ETHERMTU) E1000_WRITE_REG(hw, E1000_PBA, 12); else E1000_WRITE_REG(hw, E1000_PBA, 26); break; case e1000_ich9lan: case e1000_ich10lan: if (ifp->if_mtu > ETHERMTU) { hw->fc.high_water = 0x2800; hw->fc.low_water = hw->fc.high_water - 8; break; } /* else fall thru */ default: if (hw->mac.type == e1000_80003es2lan) hw->fc.pause_time = 0xFFFF; break; } + /* I219 needs some special flushing to avoid hangs */ + if (hw->mac.type == e1000_pch_spt) + em_flush_desc_rings(adapter); + /* Issue a global reset */ e1000_reset_hw(hw); E1000_WRITE_REG(hw, E1000_WUC, 0); em_disable_aspm(adapter); /* and a re-init */ if (e1000_init_hw(hw) < 0) { device_printf(dev, "Hardware Initialization Failed\n"); return; } E1000_WRITE_REG(hw, E1000_VET, ETHERTYPE_VLAN); e1000_get_phy_info(hw); e1000_check_for_link(hw); return; } /********************************************************************* * * Setup networking device structure and register an interface. * **********************************************************************/ static int em_setup_interface(device_t dev, struct adapter *adapter) { struct ifnet *ifp; INIT_DEBUGOUT("em_setup_interface: begin"); ifp = adapter->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not allocate ifnet structure\n"); return (-1); } if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_init = em_init; ifp->if_softc = adapter; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = em_ioctl; /* TSO parameters */ ifp->if_hw_tsomax = IP_MAXPACKET; ifp->if_hw_tsomaxsegcount = EM_MAX_SCATTER; ifp->if_hw_tsomaxsegsize = EM_TSO_SEG_SIZE; #ifdef EM_MULTIQUEUE /* Multiqueue stack interface */ ifp->if_transmit = em_mq_start; ifp->if_qflush = em_qflush; #else ifp->if_start = em_start; IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1); ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1; IFQ_SET_READY(&ifp->if_snd); #endif ether_ifattach(ifp, adapter->hw.mac.addr); ifp->if_capabilities = ifp->if_capenable = 0; ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM; ifp->if_capabilities |= IFCAP_TSO4; /* * Tell the upper layer(s) we * support full VLAN capability */ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU; ifp->if_capenable = ifp->if_capabilities; /* ** Don't turn this on by default, if vlans are ** created on another pseudo device (eg. lagg) ** then vlan events are not passed thru, breaking ** operation, but with HW FILTER off it works. If ** using vlans directly on the em driver you can ** enable this and get full hardware tag filtering. */ ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif /* Enable only WOL MAGIC by default */ if (adapter->wol) { ifp->if_capabilities |= IFCAP_WOL; ifp->if_capenable |= IFCAP_WOL_MAGIC; } /* * Specify the media types supported by this adapter and register * callbacks to update media and link information */ ifmedia_init(&adapter->media, IFM_IMASK, em_media_change, em_media_status); if ((adapter->hw.phy.media_type == e1000_media_type_fiber) || (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) { u_char fiber_type = IFM_1000_SX; /* default type */ ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL); } else { ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL); if (adapter->hw.phy.type != e1000_phy_ife) { ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL); } } ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); return (0); } /* * Manage DMA'able memory. */ static void em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { if (error) return; *(bus_addr_t *) arg = segs[0].ds_addr; } static int em_dma_malloc(struct adapter *adapter, bus_size_t size, struct em_dma_alloc *dma, int mapflags) { int error; error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */ EM_DBA_ALIGN, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ size, /* maxsize */ 1, /* nsegments */ size, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &dma->dma_tag); if (error) { device_printf(adapter->dev, "%s: bus_dma_tag_create failed: %d\n", __func__, error); goto fail_0; } error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map); if (error) { device_printf(adapter->dev, "%s: bus_dmamem_alloc(%ju) failed: %d\n", __func__, (uintmax_t)size, error); goto fail_2; } dma->dma_paddr = 0; error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, size, em_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT); if (error || dma->dma_paddr == 0) { device_printf(adapter->dev, "%s: bus_dmamap_load failed: %d\n", __func__, error); goto fail_3; } return (0); fail_3: bus_dmamap_unload(dma->dma_tag, dma->dma_map); fail_2: bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); bus_dma_tag_destroy(dma->dma_tag); fail_0: dma->dma_tag = NULL; return (error); } static void em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma) { if (dma->dma_tag == NULL) return; if (dma->dma_paddr != 0) { bus_dmamap_sync(dma->dma_tag, dma->dma_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(dma->dma_tag, dma->dma_map); dma->dma_paddr = 0; } if (dma->dma_vaddr != NULL) { bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); dma->dma_vaddr = NULL; } bus_dma_tag_destroy(dma->dma_tag); dma->dma_tag = NULL; } /********************************************************************* * * Allocate memory for the transmit and receive rings, and then * the descriptors associated with each, called only once at attach. * **********************************************************************/ static int em_allocate_queues(struct adapter *adapter) { device_t dev = adapter->dev; struct tx_ring *txr = NULL; struct rx_ring *rxr = NULL; int rsize, tsize, error = E1000_SUCCESS; int txconf = 0, rxconf = 0; /* Allocate the TX ring struct memory */ if (!(adapter->tx_rings = (struct tx_ring *) malloc(sizeof(struct tx_ring) * adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate TX ring memory\n"); error = ENOMEM; goto fail; } /* Now allocate the RX */ if (!(adapter->rx_rings = (struct rx_ring *) malloc(sizeof(struct rx_ring) * adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate RX ring memory\n"); error = ENOMEM; goto rx_fail; } tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc), EM_DBA_ALIGN); /* * Now set up the TX queues, txconf is needed to handle the * possibility that things fail midcourse and we need to * undo memory gracefully */ for (int i = 0; i < adapter->num_queues; i++, txconf++) { /* Set up some basics */ txr = &adapter->tx_rings[i]; txr->adapter = adapter; txr->me = i; /* Initialize the TX lock */ snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)", device_get_nameunit(dev), txr->me); mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF); if (em_dma_malloc(adapter, tsize, &txr->txdma, BUS_DMA_NOWAIT)) { device_printf(dev, "Unable to allocate TX Descriptor memory\n"); error = ENOMEM; goto err_tx_desc; } txr->tx_base = (struct e1000_tx_desc *)txr->txdma.dma_vaddr; bzero((void *)txr->tx_base, tsize); if (em_allocate_transmit_buffers(txr)) { device_printf(dev, "Critical Failure setting up transmit buffers\n"); error = ENOMEM; goto err_tx_desc; } #if __FreeBSD_version >= 800000 /* Allocate a buf ring */ txr->br = buf_ring_alloc(4096, M_DEVBUF, M_WAITOK, &txr->tx_mtx); #endif } /* * Next the RX queues... */ rsize = roundup2(adapter->num_rx_desc * sizeof(union e1000_rx_desc_extended), EM_DBA_ALIGN); for (int i = 0; i < adapter->num_queues; i++, rxconf++) { rxr = &adapter->rx_rings[i]; rxr->adapter = adapter; rxr->me = i; /* Initialize the RX lock */ snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)", device_get_nameunit(dev), txr->me); mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF); if (em_dma_malloc(adapter, rsize, &rxr->rxdma, BUS_DMA_NOWAIT)) { device_printf(dev, "Unable to allocate RxDescriptor memory\n"); error = ENOMEM; goto err_rx_desc; } rxr->rx_base = (union e1000_rx_desc_extended *)rxr->rxdma.dma_vaddr; bzero((void *)rxr->rx_base, rsize); /* Allocate receive buffers for the ring*/ if (em_allocate_receive_buffers(rxr)) { device_printf(dev, "Critical Failure setting up receive buffers\n"); error = ENOMEM; goto err_rx_desc; } } return (0); err_rx_desc: for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--) em_dma_free(adapter, &rxr->rxdma); err_tx_desc: for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--) em_dma_free(adapter, &txr->txdma); free(adapter->rx_rings, M_DEVBUF); rx_fail: #if __FreeBSD_version >= 800000 buf_ring_free(txr->br, M_DEVBUF); #endif free(adapter->tx_rings, M_DEVBUF); fail: return (error); } /********************************************************************* * * Allocate memory for tx_buffer structures. The tx_buffer stores all * the information needed to transmit a packet on the wire. This is * called only once at attach, setup is done every reset. * **********************************************************************/ static int em_allocate_transmit_buffers(struct tx_ring *txr) { struct adapter *adapter = txr->adapter; device_t dev = adapter->dev; struct em_txbuffer *txbuf; int error, i; /* * Setup DMA descriptor areas. */ if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ EM_TSO_SIZE, /* maxsize */ EM_MAX_SCATTER, /* nsegments */ PAGE_SIZE, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &txr->txtag))) { device_printf(dev,"Unable to allocate TX DMA tag\n"); goto fail; } if (!(txr->tx_buffers = (struct em_txbuffer *) malloc(sizeof(struct em_txbuffer) * adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate tx_buffer memory\n"); error = ENOMEM; goto fail; } /* Create the descriptor buffer dma maps */ txbuf = txr->tx_buffers; for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) { error = bus_dmamap_create(txr->txtag, 0, &txbuf->map); if (error != 0) { device_printf(dev, "Unable to create TX DMA map\n"); goto fail; } } return 0; fail: /* We free all, it handles case where we are in the middle */ em_free_transmit_structures(adapter); return (error); } /********************************************************************* * * Initialize a transmit ring. * **********************************************************************/ static void em_setup_transmit_ring(struct tx_ring *txr) { struct adapter *adapter = txr->adapter; struct em_txbuffer *txbuf; int i; #ifdef DEV_NETMAP struct netmap_adapter *na = NA(adapter->ifp); struct netmap_slot *slot; #endif /* DEV_NETMAP */ /* Clear the old descriptor contents */ EM_TX_LOCK(txr); #ifdef DEV_NETMAP slot = netmap_reset(na, NR_TX, txr->me, 0); #endif /* DEV_NETMAP */ bzero((void *)txr->tx_base, (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc); /* Reset indices */ txr->next_avail_desc = 0; txr->next_to_clean = 0; /* Free any existing tx buffers. */ txbuf = txr->tx_buffers; for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) { if (txbuf->m_head != NULL) { bus_dmamap_sync(txr->txtag, txbuf->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txr->txtag, txbuf->map); m_freem(txbuf->m_head); txbuf->m_head = NULL; } #ifdef DEV_NETMAP if (slot) { int si = netmap_idx_n2k(&na->tx_rings[txr->me], i); uint64_t paddr; void *addr; addr = PNMB(na, slot + si, &paddr); txr->tx_base[i].buffer_addr = htole64(paddr); /* reload the map for netmap mode */ netmap_load_map(na, txr->txtag, txbuf->map, addr); } #endif /* DEV_NETMAP */ /* clear the watch index */ txbuf->next_eop = -1; } /* Set number of descriptors available */ txr->tx_avail = adapter->num_tx_desc; txr->busy = EM_TX_IDLE; /* Clear checksum offload context. */ txr->last_hw_offload = 0; txr->last_hw_ipcss = 0; txr->last_hw_ipcso = 0; txr->last_hw_tucss = 0; txr->last_hw_tucso = 0; bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); EM_TX_UNLOCK(txr); } /********************************************************************* * * Initialize all transmit rings. * **********************************************************************/ static void em_setup_transmit_structures(struct adapter *adapter) { struct tx_ring *txr = adapter->tx_rings; for (int i = 0; i < adapter->num_queues; i++, txr++) em_setup_transmit_ring(txr); return; } /********************************************************************* * * Enable transmit unit. * **********************************************************************/ static void em_initialize_transmit_unit(struct adapter *adapter) { struct tx_ring *txr = adapter->tx_rings; struct e1000_hw *hw = &adapter->hw; u32 tctl, txdctl = 0, tarc, tipg = 0; INIT_DEBUGOUT("em_initialize_transmit_unit: begin"); for (int i = 0; i < adapter->num_queues; i++, txr++) { u64 bus_addr = txr->txdma.dma_paddr; /* Base and Len of TX Ring */ E1000_WRITE_REG(hw, E1000_TDLEN(i), adapter->num_tx_desc * sizeof(struct e1000_tx_desc)); E1000_WRITE_REG(hw, E1000_TDBAH(i), (u32)(bus_addr >> 32)); E1000_WRITE_REG(hw, E1000_TDBAL(i), (u32)bus_addr); /* Init the HEAD/TAIL indices */ E1000_WRITE_REG(hw, E1000_TDT(i), 0); E1000_WRITE_REG(hw, E1000_TDH(i), 0); HW_DEBUGOUT2("Base = %x, Length = %x\n", E1000_READ_REG(&adapter->hw, E1000_TDBAL(i)), E1000_READ_REG(&adapter->hw, E1000_TDLEN(i))); txr->busy = EM_TX_IDLE; txdctl = 0; /* clear txdctl */ txdctl |= 0x1f; /* PTHRESH */ txdctl |= 1 << 8; /* HTHRESH */ txdctl |= 1 << 16;/* WTHRESH */ txdctl |= 1 << 22; /* Reserved bit 22 must always be 1 */ txdctl |= E1000_TXDCTL_GRAN; txdctl |= 1 << 25; /* LWTHRESH */ E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl); } /* Set the default values for the Tx Inter Packet Gap timer */ switch (adapter->hw.mac.type) { case e1000_80003es2lan: tipg = DEFAULT_82543_TIPG_IPGR1; tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; break; default: if ((adapter->hw.phy.media_type == e1000_media_type_fiber) || (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) tipg = DEFAULT_82543_TIPG_IPGT_FIBER; else tipg = DEFAULT_82543_TIPG_IPGT_COPPER; tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; } E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg); E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value); if(adapter->hw.mac.type >= e1000_82540) E1000_WRITE_REG(&adapter->hw, E1000_TADV, adapter->tx_abs_int_delay.value); if ((adapter->hw.mac.type == e1000_82571) || (adapter->hw.mac.type == e1000_82572)) { tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0)); tarc |= TARC_SPEED_MODE_BIT; E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc); } else if (adapter->hw.mac.type == e1000_80003es2lan) { /* errata: program both queues to unweighted RR */ tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0)); tarc |= 1; E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc); tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(1)); tarc |= 1; E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc); } else if (adapter->hw.mac.type == e1000_82574) { tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0)); tarc |= TARC_ERRATA_BIT; if ( adapter->num_queues > 1) { tarc |= (TARC_COMPENSATION_MODE | TARC_MQ_FIX); E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc); E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc); } else E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc); } adapter->txd_cmd = E1000_TXD_CMD_IFCS; if (adapter->tx_int_delay.value > 0) adapter->txd_cmd |= E1000_TXD_CMD_IDE; /* Program the Transmit Control Register */ tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL); tctl &= ~E1000_TCTL_CT; tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT)); if (adapter->hw.mac.type >= e1000_82571) tctl |= E1000_TCTL_MULR; /* This write will effectively turn on the transmit unit. */ E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl); + if (hw->mac.type == e1000_pch_spt) { + u32 reg; + reg = E1000_READ_REG(hw, E1000_IOSFPC); + reg |= E1000_RCTL_RDMTS_HEX; + E1000_WRITE_REG(hw, E1000_IOSFPC, reg); + reg = E1000_READ_REG(hw, E1000_TARC(0)); + reg |= E1000_TARC0_CB_MULTIQ_3_REQ; + E1000_WRITE_REG(hw, E1000_TARC(0), reg); + } } /********************************************************************* * * Free all transmit rings. * **********************************************************************/ static void em_free_transmit_structures(struct adapter *adapter) { struct tx_ring *txr = adapter->tx_rings; for (int i = 0; i < adapter->num_queues; i++, txr++) { EM_TX_LOCK(txr); em_free_transmit_buffers(txr); em_dma_free(adapter, &txr->txdma); EM_TX_UNLOCK(txr); EM_TX_LOCK_DESTROY(txr); } free(adapter->tx_rings, M_DEVBUF); } /********************************************************************* * * Free transmit ring related data structures. * **********************************************************************/ static void em_free_transmit_buffers(struct tx_ring *txr) { struct adapter *adapter = txr->adapter; struct em_txbuffer *txbuf; INIT_DEBUGOUT("free_transmit_ring: begin"); if (txr->tx_buffers == NULL) return; for (int i = 0; i < adapter->num_tx_desc; i++) { txbuf = &txr->tx_buffers[i]; if (txbuf->m_head != NULL) { bus_dmamap_sync(txr->txtag, txbuf->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txr->txtag, txbuf->map); m_freem(txbuf->m_head); txbuf->m_head = NULL; if (txbuf->map != NULL) { bus_dmamap_destroy(txr->txtag, txbuf->map); txbuf->map = NULL; } } else if (txbuf->map != NULL) { bus_dmamap_unload(txr->txtag, txbuf->map); bus_dmamap_destroy(txr->txtag, txbuf->map); txbuf->map = NULL; } } #if __FreeBSD_version >= 800000 if (txr->br != NULL) buf_ring_free(txr->br, M_DEVBUF); #endif if (txr->tx_buffers != NULL) { free(txr->tx_buffers, M_DEVBUF); txr->tx_buffers = NULL; } if (txr->txtag != NULL) { bus_dma_tag_destroy(txr->txtag); txr->txtag = NULL; } return; } /********************************************************************* * The offload context is protocol specific (TCP/UDP) and thus * only needs to be set when the protocol changes. The occasion * of a context change can be a performance detriment, and * might be better just disabled. The reason arises in the way * in which the controller supports pipelined requests from the * Tx data DMA. Up to four requests can be pipelined, and they may * belong to the same packet or to multiple packets. However all * requests for one packet are issued before a request is issued * for a subsequent packet and if a request for the next packet * requires a context change, that request will be stalled * until the previous request completes. This means setting up * a new context effectively disables pipelined Tx data DMA which * in turn greatly slow down performance to send small sized * frames. **********************************************************************/ static void em_transmit_checksum_setup(struct tx_ring *txr, struct mbuf *mp, int ip_off, struct ip *ip, u32 *txd_upper, u32 *txd_lower) { struct adapter *adapter = txr->adapter; struct e1000_context_desc *TXD = NULL; struct em_txbuffer *tx_buffer; int cur, hdr_len; u32 cmd = 0; u16 offload = 0; u8 ipcso, ipcss, tucso, tucss; ipcss = ipcso = tucss = tucso = 0; hdr_len = ip_off + (ip->ip_hl << 2); cur = txr->next_avail_desc; /* Setup of IP header checksum. */ if (mp->m_pkthdr.csum_flags & CSUM_IP) { *txd_upper |= E1000_TXD_POPTS_IXSM << 8; offload |= CSUM_IP; ipcss = ip_off; ipcso = ip_off + offsetof(struct ip, ip_sum); /* * Start offset for header checksum calculation. * End offset for header checksum calculation. * Offset of place to put the checksum. */ TXD = (struct e1000_context_desc *)&txr->tx_base[cur]; TXD->lower_setup.ip_fields.ipcss = ipcss; TXD->lower_setup.ip_fields.ipcse = htole16(hdr_len); TXD->lower_setup.ip_fields.ipcso = ipcso; cmd |= E1000_TXD_CMD_IP; } if (mp->m_pkthdr.csum_flags & CSUM_TCP) { *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; *txd_upper |= E1000_TXD_POPTS_TXSM << 8; offload |= CSUM_TCP; tucss = hdr_len; tucso = hdr_len + offsetof(struct tcphdr, th_sum); /* * The 82574L can only remember the *last* context used * regardless of queue that it was use for. We cannot reuse * contexts on this hardware platform and must generate a new * context every time. 82574L hardware spec, section 7.2.6, * second note. */ if (adapter->num_queues < 2) { /* * Setting up new checksum offload context for every * frames takes a lot of processing time for hardware. * This also reduces performance a lot for small sized * frames so avoid it if driver can use previously * configured checksum offload context. */ if (txr->last_hw_offload == offload) { if (offload & CSUM_IP) { if (txr->last_hw_ipcss == ipcss && txr->last_hw_ipcso == ipcso && txr->last_hw_tucss == tucss && txr->last_hw_tucso == tucso) return; } else { if (txr->last_hw_tucss == tucss && txr->last_hw_tucso == tucso) return; } } txr->last_hw_offload = offload; txr->last_hw_tucss = tucss; txr->last_hw_tucso = tucso; } /* * Start offset for payload checksum calculation. * End offset for payload checksum calculation. * Offset of place to put the checksum. */ TXD = (struct e1000_context_desc *)&txr->tx_base[cur]; TXD->upper_setup.tcp_fields.tucss = hdr_len; TXD->upper_setup.tcp_fields.tucse = htole16(0); TXD->upper_setup.tcp_fields.tucso = tucso; cmd |= E1000_TXD_CMD_TCP; } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) { *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; *txd_upper |= E1000_TXD_POPTS_TXSM << 8; tucss = hdr_len; tucso = hdr_len + offsetof(struct udphdr, uh_sum); /* * The 82574L can only remember the *last* context used * regardless of queue that it was use for. We cannot reuse * contexts on this hardware platform and must generate a new * context every time. 82574L hardware spec, section 7.2.6, * second note. */ if (adapter->num_queues < 2) { /* * Setting up new checksum offload context for every * frames takes a lot of processing time for hardware. * This also reduces performance a lot for small sized * frames so avoid it if driver can use previously * configured checksum offload context. */ if (txr->last_hw_offload == offload) { if (offload & CSUM_IP) { if (txr->last_hw_ipcss == ipcss && txr->last_hw_ipcso == ipcso && txr->last_hw_tucss == tucss && txr->last_hw_tucso == tucso) return; } else { if (txr->last_hw_tucss == tucss && txr->last_hw_tucso == tucso) return; } } txr->last_hw_offload = offload; txr->last_hw_tucss = tucss; txr->last_hw_tucso = tucso; } /* * Start offset for header checksum calculation. * End offset for header checksum calculation. * Offset of place to put the checksum. */ TXD = (struct e1000_context_desc *)&txr->tx_base[cur]; TXD->upper_setup.tcp_fields.tucss = tucss; TXD->upper_setup.tcp_fields.tucse = htole16(0); TXD->upper_setup.tcp_fields.tucso = tucso; } if (offload & CSUM_IP) { txr->last_hw_ipcss = ipcss; txr->last_hw_ipcso = ipcso; } TXD->tcp_seg_setup.data = htole32(0); TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd); tx_buffer = &txr->tx_buffers[cur]; tx_buffer->m_head = NULL; tx_buffer->next_eop = -1; if (++cur == adapter->num_tx_desc) cur = 0; txr->tx_avail--; txr->next_avail_desc = cur; } /********************************************************************** * * Setup work for hardware segmentation offload (TSO) * **********************************************************************/ static void em_tso_setup(struct tx_ring *txr, struct mbuf *mp, int ip_off, struct ip *ip, struct tcphdr *tp, u32 *txd_upper, u32 *txd_lower) { struct adapter *adapter = txr->adapter; struct e1000_context_desc *TXD; struct em_txbuffer *tx_buffer; int cur, hdr_len; /* * In theory we can use the same TSO context if and only if * frame is the same type(IP/TCP) and the same MSS. However * checking whether a frame has the same IP/TCP structure is * hard thing so just ignore that and always restablish a * new TSO context. */ hdr_len = ip_off + (ip->ip_hl << 2) + (tp->th_off << 2); *txd_lower = (E1000_TXD_CMD_DEXT | /* Extended descr type */ E1000_TXD_DTYP_D | /* Data descr type */ E1000_TXD_CMD_TSE); /* Do TSE on this packet */ /* IP and/or TCP header checksum calculation and insertion. */ *txd_upper = (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8; cur = txr->next_avail_desc; tx_buffer = &txr->tx_buffers[cur]; TXD = (struct e1000_context_desc *) &txr->tx_base[cur]; /* * Start offset for header checksum calculation. * End offset for header checksum calculation. * Offset of place put the checksum. */ TXD->lower_setup.ip_fields.ipcss = ip_off; TXD->lower_setup.ip_fields.ipcse = htole16(ip_off + (ip->ip_hl << 2) - 1); TXD->lower_setup.ip_fields.ipcso = ip_off + offsetof(struct ip, ip_sum); /* * Start offset for payload checksum calculation. * End offset for payload checksum calculation. * Offset of place to put the checksum. */ TXD->upper_setup.tcp_fields.tucss = ip_off + (ip->ip_hl << 2); TXD->upper_setup.tcp_fields.tucse = 0; TXD->upper_setup.tcp_fields.tucso = ip_off + (ip->ip_hl << 2) + offsetof(struct tcphdr, th_sum); /* * Payload size per packet w/o any headers. * Length of all headers up to payload. */ TXD->tcp_seg_setup.fields.mss = htole16(mp->m_pkthdr.tso_segsz); TXD->tcp_seg_setup.fields.hdr_len = hdr_len; TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | /* Extended descr */ E1000_TXD_CMD_TSE | /* TSE context */ E1000_TXD_CMD_IP | /* Do IP csum */ E1000_TXD_CMD_TCP | /* Do TCP checksum */ (mp->m_pkthdr.len - (hdr_len))); /* Total len */ tx_buffer->m_head = NULL; tx_buffer->next_eop = -1; if (++cur == adapter->num_tx_desc) cur = 0; txr->tx_avail--; txr->next_avail_desc = cur; txr->tx_tso = TRUE; } /********************************************************************** * * Examine each tx_buffer in the used queue. If the hardware is done * processing the packet then free associated resources. The * tx_buffer is put back on the free queue. * **********************************************************************/ static void em_txeof(struct tx_ring *txr) { struct adapter *adapter = txr->adapter; int first, last, done, processed; struct em_txbuffer *tx_buffer; struct e1000_tx_desc *tx_desc, *eop_desc; struct ifnet *ifp = adapter->ifp; EM_TX_LOCK_ASSERT(txr); #ifdef DEV_NETMAP if (netmap_tx_irq(ifp, txr->me)) return; #endif /* DEV_NETMAP */ /* No work, make sure hang detection is disabled */ if (txr->tx_avail == adapter->num_tx_desc) { txr->busy = EM_TX_IDLE; return; } processed = 0; first = txr->next_to_clean; tx_desc = &txr->tx_base[first]; tx_buffer = &txr->tx_buffers[first]; last = tx_buffer->next_eop; eop_desc = &txr->tx_base[last]; /* * What this does is get the index of the * first descriptor AFTER the EOP of the * first packet, that way we can do the * simple comparison on the inner while loop. */ if (++last == adapter->num_tx_desc) last = 0; done = last; bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, BUS_DMASYNC_POSTREAD); while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) { /* We clean the range of the packet */ while (first != done) { tx_desc->upper.data = 0; tx_desc->lower.data = 0; tx_desc->buffer_addr = 0; ++txr->tx_avail; ++processed; if (tx_buffer->m_head) { bus_dmamap_sync(txr->txtag, tx_buffer->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txr->txtag, tx_buffer->map); m_freem(tx_buffer->m_head); tx_buffer->m_head = NULL; } tx_buffer->next_eop = -1; if (++first == adapter->num_tx_desc) first = 0; tx_buffer = &txr->tx_buffers[first]; tx_desc = &txr->tx_base[first]; } ++ifp->if_opackets; /* See if we can continue to the next packet */ last = tx_buffer->next_eop; if (last != -1) { eop_desc = &txr->tx_base[last]; /* Get new done point */ if (++last == adapter->num_tx_desc) last = 0; done = last; } else break; } bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); txr->next_to_clean = first; /* ** Hang detection: we know there's work outstanding ** or the entry return would have been taken, so no ** descriptor processed here indicates a potential hang. ** The local timer will examine this and do a reset if needed. */ if (processed == 0) { if (txr->busy != EM_TX_HUNG) ++txr->busy; } else /* At least one descriptor was cleaned */ txr->busy = EM_TX_BUSY; /* note this clears HUNG */ /* * If we have a minimum free, clear IFF_DRV_OACTIVE * to tell the stack that it is OK to send packets. * Notice that all writes of OACTIVE happen under the * TX lock which, with a single queue, guarantees * sanity. */ if (txr->tx_avail >= EM_MAX_SCATTER) { ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } /* Disable hang detection if all clean */ if (txr->tx_avail == adapter->num_tx_desc) txr->busy = EM_TX_IDLE; } /********************************************************************* * * Refresh RX descriptor mbufs from system mbuf buffer pool. * **********************************************************************/ static void em_refresh_mbufs(struct rx_ring *rxr, int limit) { struct adapter *adapter = rxr->adapter; struct mbuf *m; bus_dma_segment_t segs; struct em_rxbuffer *rxbuf; int i, j, error, nsegs; bool cleaned = FALSE; i = j = rxr->next_to_refresh; /* ** Get one descriptor beyond ** our work mark to control ** the loop. */ if (++j == adapter->num_rx_desc) j = 0; while (j != limit) { rxbuf = &rxr->rx_buffers[i]; if (rxbuf->m_head == NULL) { m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz); /* ** If we have a temporary resource shortage ** that causes a failure, just abort refresh ** for now, we will return to this point when ** reinvoked from em_rxeof. */ if (m == NULL) goto update; } else m = rxbuf->m_head; m->m_len = m->m_pkthdr.len = adapter->rx_mbuf_sz; m->m_flags |= M_PKTHDR; m->m_data = m->m_ext.ext_buf; /* Use bus_dma machinery to setup the memory mapping */ error = bus_dmamap_load_mbuf_sg(rxr->rxtag, rxbuf->map, m, &segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { printf("Refresh mbufs: hdr dmamap load" " failure - %d\n", error); m_free(m); rxbuf->m_head = NULL; goto update; } rxbuf->m_head = m; rxbuf->paddr = segs.ds_addr; bus_dmamap_sync(rxr->rxtag, rxbuf->map, BUS_DMASYNC_PREREAD); em_setup_rxdesc(&rxr->rx_base[i], rxbuf); cleaned = TRUE; i = j; /* Next is precalulated for us */ rxr->next_to_refresh = i; /* Calculate next controlling index */ if (++j == adapter->num_rx_desc) j = 0; } update: /* ** Update the tail pointer only if, ** and as far as we have refreshed. */ if (cleaned) E1000_WRITE_REG(&adapter->hw, E1000_RDT(rxr->me), rxr->next_to_refresh); return; } /********************************************************************* * * Allocate memory for rx_buffer structures. Since we use one * rx_buffer per received packet, the maximum number of rx_buffer's * that we'll need is equal to the number of receive descriptors * that we've allocated. * **********************************************************************/ static int em_allocate_receive_buffers(struct rx_ring *rxr) { struct adapter *adapter = rxr->adapter; device_t dev = adapter->dev; struct em_rxbuffer *rxbuf; int error; rxr->rx_buffers = malloc(sizeof(struct em_rxbuffer) * adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO); if (rxr->rx_buffers == NULL) { device_printf(dev, "Unable to allocate rx_buffer memory\n"); return (ENOMEM); } error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MJUM9BYTES, /* maxsize */ 1, /* nsegments */ MJUM9BYTES, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &rxr->rxtag); if (error) { device_printf(dev, "%s: bus_dma_tag_create failed %d\n", __func__, error); goto fail; } rxbuf = rxr->rx_buffers; for (int i = 0; i < adapter->num_rx_desc; i++, rxbuf++) { rxbuf = &rxr->rx_buffers[i]; error = bus_dmamap_create(rxr->rxtag, 0, &rxbuf->map); if (error) { device_printf(dev, "%s: bus_dmamap_create failed: %d\n", __func__, error); goto fail; } } return (0); fail: em_free_receive_structures(adapter); return (error); } /********************************************************************* * * Initialize a receive ring and its buffers. * **********************************************************************/ static int em_setup_receive_ring(struct rx_ring *rxr) { struct adapter *adapter = rxr->adapter; struct em_rxbuffer *rxbuf; bus_dma_segment_t seg[1]; int rsize, nsegs, error = 0; #ifdef DEV_NETMAP struct netmap_adapter *na = NA(adapter->ifp); struct netmap_slot *slot; #endif /* Clear the ring contents */ EM_RX_LOCK(rxr); rsize = roundup2(adapter->num_rx_desc * sizeof(union e1000_rx_desc_extended), EM_DBA_ALIGN); bzero((void *)rxr->rx_base, rsize); #ifdef DEV_NETMAP slot = netmap_reset(na, NR_RX, 0, 0); #endif /* ** Free current RX buffer structs and their mbufs */ for (int i = 0; i < adapter->num_rx_desc; i++) { rxbuf = &rxr->rx_buffers[i]; if (rxbuf->m_head != NULL) { bus_dmamap_sync(rxr->rxtag, rxbuf->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rxr->rxtag, rxbuf->map); m_freem(rxbuf->m_head); rxbuf->m_head = NULL; /* mark as freed */ } } /* Now replenish the mbufs */ for (int j = 0; j != adapter->num_rx_desc; ++j) { rxbuf = &rxr->rx_buffers[j]; #ifdef DEV_NETMAP if (slot) { int si = netmap_idx_n2k(&na->rx_rings[rxr->me], j); uint64_t paddr; void *addr; addr = PNMB(na, slot + si, &paddr); netmap_load_map(na, rxr->rxtag, rxbuf->map, addr); em_setup_rxdesc(&rxr->rx_base[j], rxbuf); continue; } #endif /* DEV_NETMAP */ rxbuf->m_head = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz); if (rxbuf->m_head == NULL) { error = ENOBUFS; goto fail; } rxbuf->m_head->m_len = adapter->rx_mbuf_sz; rxbuf->m_head->m_flags &= ~M_HASFCS; /* we strip it */ rxbuf->m_head->m_pkthdr.len = adapter->rx_mbuf_sz; /* Get the memory mapping */ error = bus_dmamap_load_mbuf_sg(rxr->rxtag, rxbuf->map, rxbuf->m_head, seg, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { m_freem(rxbuf->m_head); rxbuf->m_head = NULL; goto fail; } bus_dmamap_sync(rxr->rxtag, rxbuf->map, BUS_DMASYNC_PREREAD); rxbuf->paddr = seg[0].ds_addr; em_setup_rxdesc(&rxr->rx_base[j], rxbuf); } rxr->next_to_check = 0; rxr->next_to_refresh = 0; bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); fail: EM_RX_UNLOCK(rxr); return (error); } /********************************************************************* * * Initialize all receive rings. * **********************************************************************/ static int em_setup_receive_structures(struct adapter *adapter) { struct rx_ring *rxr = adapter->rx_rings; int q; for (q = 0; q < adapter->num_queues; q++, rxr++) if (em_setup_receive_ring(rxr)) goto fail; return (0); fail: /* * Free RX buffers allocated so far, we will only handle * the rings that completed, the failing case will have * cleaned up for itself. 'q' failed, so its the terminus. */ for (int i = 0; i < q; ++i) { rxr = &adapter->rx_rings[i]; for (int n = 0; n < adapter->num_rx_desc; n++) { struct em_rxbuffer *rxbuf; rxbuf = &rxr->rx_buffers[n]; if (rxbuf->m_head != NULL) { bus_dmamap_sync(rxr->rxtag, rxbuf->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rxr->rxtag, rxbuf->map); m_freem(rxbuf->m_head); rxbuf->m_head = NULL; } } rxr->next_to_check = 0; rxr->next_to_refresh = 0; } return (ENOBUFS); } /********************************************************************* * * Free all receive rings. * **********************************************************************/ static void em_free_receive_structures(struct adapter *adapter) { struct rx_ring *rxr = adapter->rx_rings; for (int i = 0; i < adapter->num_queues; i++, rxr++) { em_free_receive_buffers(rxr); /* Free the ring memory as well */ em_dma_free(adapter, &rxr->rxdma); EM_RX_LOCK_DESTROY(rxr); } free(adapter->rx_rings, M_DEVBUF); } /********************************************************************* * * Free receive ring data structures * **********************************************************************/ static void em_free_receive_buffers(struct rx_ring *rxr) { struct adapter *adapter = rxr->adapter; struct em_rxbuffer *rxbuf = NULL; INIT_DEBUGOUT("free_receive_buffers: begin"); if (rxr->rx_buffers != NULL) { for (int i = 0; i < adapter->num_rx_desc; i++) { rxbuf = &rxr->rx_buffers[i]; if (rxbuf->map != NULL) { bus_dmamap_sync(rxr->rxtag, rxbuf->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rxr->rxtag, rxbuf->map); bus_dmamap_destroy(rxr->rxtag, rxbuf->map); } if (rxbuf->m_head != NULL) { m_freem(rxbuf->m_head); rxbuf->m_head = NULL; } } free(rxr->rx_buffers, M_DEVBUF); rxr->rx_buffers = NULL; rxr->next_to_check = 0; rxr->next_to_refresh = 0; } if (rxr->rxtag != NULL) { bus_dma_tag_destroy(rxr->rxtag); rxr->rxtag = NULL; } return; } /********************************************************************* * * Enable receive unit. * **********************************************************************/ static void em_initialize_receive_unit(struct adapter *adapter) { struct rx_ring *rxr = adapter->rx_rings; struct ifnet *ifp = adapter->ifp; struct e1000_hw *hw = &adapter->hw; u32 rctl, rxcsum, rfctl; INIT_DEBUGOUT("em_initialize_receive_units: begin"); /* * Make sure receives are disabled while setting * up the descriptor ring */ rctl = E1000_READ_REG(hw, E1000_RCTL); /* Do not disable if ever enabled on this hardware */ if ((hw->mac.type != e1000_82574) && (hw->mac.type != e1000_82583)) E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); /* Setup the Receive Control Register */ rctl &= ~(3 << E1000_RCTL_MO_SHIFT); rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); /* Do not store bad packets */ rctl &= ~E1000_RCTL_SBP; /* Enable Long Packet receive */ if (ifp->if_mtu > ETHERMTU) rctl |= E1000_RCTL_LPE; else rctl &= ~E1000_RCTL_LPE; /* Strip the CRC */ if (!em_disable_crc_stripping) rctl |= E1000_RCTL_SECRC; E1000_WRITE_REG(&adapter->hw, E1000_RADV, adapter->rx_abs_int_delay.value); E1000_WRITE_REG(&adapter->hw, E1000_RDTR, adapter->rx_int_delay.value); /* * Set the interrupt throttling rate. Value is calculated * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) */ E1000_WRITE_REG(hw, E1000_ITR, DEFAULT_ITR); /* Use extended rx descriptor formats */ rfctl = E1000_READ_REG(hw, E1000_RFCTL); rfctl |= E1000_RFCTL_EXTEN; /* ** When using MSIX interrupts we need to throttle ** using the EITR register (82574 only) */ if (hw->mac.type == e1000_82574) { for (int i = 0; i < 4; i++) E1000_WRITE_REG(hw, E1000_EITR_82574(i), DEFAULT_ITR); /* Disable accelerated acknowledge */ rfctl |= E1000_RFCTL_ACK_DIS; } E1000_WRITE_REG(hw, E1000_RFCTL, rfctl); rxcsum = E1000_READ_REG(hw, E1000_RXCSUM); if (ifp->if_capenable & IFCAP_RXCSUM) { #ifdef EM_MULTIQUEUE rxcsum |= E1000_RXCSUM_TUOFL | E1000_RXCSUM_IPOFL | E1000_RXCSUM_PCSD; #else rxcsum |= E1000_RXCSUM_TUOFL; #endif } else rxcsum &= ~E1000_RXCSUM_TUOFL; E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum); #ifdef EM_MULTIQUEUE #define RSSKEYLEN 10 if (adapter->num_queues > 1) { uint8_t rss_key[4 * RSSKEYLEN]; uint32_t reta = 0; int i; /* * Configure RSS key */ arc4rand(rss_key, sizeof(rss_key), 0); for (i = 0; i < RSSKEYLEN; ++i) { uint32_t rssrk = 0; rssrk = EM_RSSRK_VAL(rss_key, i); E1000_WRITE_REG(hw,E1000_RSSRK(i), rssrk); } /* * Configure RSS redirect table in following fashion: * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)] */ for (i = 0; i < sizeof(reta); ++i) { uint32_t q; q = (i % adapter->num_queues) << 7; reta |= q << (8 * i); } for (i = 0; i < 32; ++i) { E1000_WRITE_REG(hw, E1000_RETA(i), reta); } E1000_WRITE_REG(hw, E1000_MRQC, E1000_MRQC_RSS_ENABLE_2Q | E1000_MRQC_RSS_FIELD_IPV4_TCP | E1000_MRQC_RSS_FIELD_IPV4 | E1000_MRQC_RSS_FIELD_IPV6_TCP_EX | E1000_MRQC_RSS_FIELD_IPV6_EX | E1000_MRQC_RSS_FIELD_IPV6); } #endif /* ** XXX TEMPORARY WORKAROUND: on some systems with 82573 ** long latencies are observed, like Lenovo X60. This ** change eliminates the problem, but since having positive ** values in RDTR is a known source of problems on other ** platforms another solution is being sought. */ if (hw->mac.type == e1000_82573) E1000_WRITE_REG(hw, E1000_RDTR, 0x20); for (int i = 0; i < adapter->num_queues; i++, rxr++) { /* Setup the Base and Length of the Rx Descriptor Ring */ u64 bus_addr = rxr->rxdma.dma_paddr; u32 rdt = adapter->num_rx_desc - 1; /* default */ E1000_WRITE_REG(hw, E1000_RDLEN(i), adapter->num_rx_desc * sizeof(union e1000_rx_desc_extended)); E1000_WRITE_REG(hw, E1000_RDBAH(i), (u32)(bus_addr >> 32)); E1000_WRITE_REG(hw, E1000_RDBAL(i), (u32)bus_addr); /* Setup the Head and Tail Descriptor Pointers */ E1000_WRITE_REG(hw, E1000_RDH(i), 0); #ifdef DEV_NETMAP /* * an init() while a netmap client is active must * preserve the rx buffers passed to userspace. */ if (ifp->if_capenable & IFCAP_NETMAP) rdt -= nm_kr_rxspace(&NA(adapter->ifp)->rx_rings[i]); #endif /* DEV_NETMAP */ E1000_WRITE_REG(hw, E1000_RDT(i), rdt); } /* * Set PTHRESH for improved jumbo performance * According to 10.2.5.11 of Intel 82574 Datasheet, * RXDCTL(1) is written whenever RXDCTL(0) is written. * Only write to RXDCTL(1) if there is a need for different * settings. */ if (((adapter->hw.mac.type == e1000_ich9lan) || (adapter->hw.mac.type == e1000_pch2lan) || (adapter->hw.mac.type == e1000_ich10lan)) && (ifp->if_mtu > ETHERMTU)) { u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0)); E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3); } else if (adapter->hw.mac.type == e1000_82574) { for (int i = 0; i < adapter->num_queues; i++) { u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i)); rxdctl |= 0x20; /* PTHRESH */ rxdctl |= 4 << 8; /* HTHRESH */ rxdctl |= 4 << 16;/* WTHRESH */ rxdctl |= 1 << 24; /* Switch to granularity */ E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); } } if (adapter->hw.mac.type >= e1000_pch2lan) { if (ifp->if_mtu > ETHERMTU) e1000_lv_jumbo_workaround_ich8lan(hw, TRUE); else e1000_lv_jumbo_workaround_ich8lan(hw, FALSE); } /* Make sure VLAN Filters are off */ rctl &= ~E1000_RCTL_VFE; if (adapter->rx_mbuf_sz == MCLBYTES) rctl |= E1000_RCTL_SZ_2048; else if (adapter->rx_mbuf_sz == MJUMPAGESIZE) rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX; else if (adapter->rx_mbuf_sz > MJUMPAGESIZE) rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX; /* ensure we clear use DTYPE of 00 here */ rctl &= ~0x00000C00; /* Write out the settings */ E1000_WRITE_REG(hw, E1000_RCTL, rctl); return; } /********************************************************************* * * This routine executes in interrupt context. It replenishes * the mbufs in the descriptor and sends data which has been * dma'ed into host memory to upper layer. * * We loop at most count times if count is > 0, or until done if * count < 0. * * For polling we also now return the number of cleaned packets *********************************************************************/ static bool em_rxeof(struct rx_ring *rxr, int count, int *done) { struct adapter *adapter = rxr->adapter; struct ifnet *ifp = adapter->ifp; struct mbuf *mp, *sendmp; u32 status = 0; u16 len; int i, processed, rxdone = 0; bool eop; union e1000_rx_desc_extended *cur; EM_RX_LOCK(rxr); /* Sync the ring */ bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); #ifdef DEV_NETMAP if (netmap_rx_irq(ifp, rxr->me, &processed)) { EM_RX_UNLOCK(rxr); return (FALSE); } #endif /* DEV_NETMAP */ for (i = rxr->next_to_check, processed = 0; count != 0;) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) break; cur = &rxr->rx_base[i]; status = le32toh(cur->wb.upper.status_error); mp = sendmp = NULL; if ((status & E1000_RXD_STAT_DD) == 0) break; len = le16toh(cur->wb.upper.length); eop = (status & E1000_RXD_STAT_EOP) != 0; if ((status & E1000_RXDEXT_ERR_FRAME_ERR_MASK) || (rxr->discard == TRUE)) { adapter->dropped_pkts++; ++rxr->rx_discarded; if (!eop) /* Catch subsequent segs */ rxr->discard = TRUE; else rxr->discard = FALSE; em_rx_discard(rxr, i); goto next_desc; } bus_dmamap_unload(rxr->rxtag, rxr->rx_buffers[i].map); /* Assign correct length to the current fragment */ mp = rxr->rx_buffers[i].m_head; mp->m_len = len; /* Trigger for refresh */ rxr->rx_buffers[i].m_head = NULL; /* First segment? */ if (rxr->fmp == NULL) { mp->m_pkthdr.len = len; rxr->fmp = rxr->lmp = mp; } else { /* Chain mbuf's together */ mp->m_flags &= ~M_PKTHDR; rxr->lmp->m_next = mp; rxr->lmp = mp; rxr->fmp->m_pkthdr.len += len; } if (eop) { --count; sendmp = rxr->fmp; sendmp->m_pkthdr.rcvif = ifp; ifp->if_ipackets++; em_receive_checksum(status, sendmp); #ifndef __NO_STRICT_ALIGNMENT if (adapter->hw.mac.max_frame_size > (MCLBYTES - ETHER_ALIGN) && em_fixup_rx(rxr) != 0) goto skip; #endif if (status & E1000_RXD_STAT_VP) { sendmp->m_pkthdr.ether_vtag = le16toh(cur->wb.upper.vlan); sendmp->m_flags |= M_VLANTAG; } #ifndef __NO_STRICT_ALIGNMENT skip: #endif rxr->fmp = rxr->lmp = NULL; } next_desc: /* Sync the ring */ bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* Zero out the receive descriptors status. */ cur->wb.upper.status_error &= htole32(~0xFF); ++rxdone; /* cumulative for POLL */ ++processed; /* Advance our pointers to the next descriptor. */ if (++i == adapter->num_rx_desc) i = 0; /* Send to the stack */ if (sendmp != NULL) { rxr->next_to_check = i; EM_RX_UNLOCK(rxr); (*ifp->if_input)(ifp, sendmp); EM_RX_LOCK(rxr); i = rxr->next_to_check; } /* Only refresh mbufs every 8 descriptors */ if (processed == 8) { em_refresh_mbufs(rxr, i); processed = 0; } } /* Catch any remaining refresh work */ if (e1000_rx_unrefreshed(rxr)) em_refresh_mbufs(rxr, i); rxr->next_to_check = i; if (done != NULL) *done = rxdone; EM_RX_UNLOCK(rxr); return ((status & E1000_RXD_STAT_DD) ? TRUE : FALSE); } static __inline void em_rx_discard(struct rx_ring *rxr, int i) { struct em_rxbuffer *rbuf; rbuf = &rxr->rx_buffers[i]; bus_dmamap_unload(rxr->rxtag, rbuf->map); /* Free any previous pieces */ if (rxr->fmp != NULL) { rxr->fmp->m_flags |= M_PKTHDR; m_freem(rxr->fmp); rxr->fmp = NULL; rxr->lmp = NULL; } /* ** Free buffer and allow em_refresh_mbufs() ** to clean up and recharge buffer. */ if (rbuf->m_head) { m_free(rbuf->m_head); rbuf->m_head = NULL; } return; } #ifndef __NO_STRICT_ALIGNMENT /* * When jumbo frames are enabled we should realign entire payload on * architecures with strict alignment. This is serious design mistake of 8254x * as it nullifies DMA operations. 8254x just allows RX buffer size to be * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its * payload. On architecures without strict alignment restrictions 8254x still * performs unaligned memory access which would reduce the performance too. * To avoid copying over an entire frame to align, we allocate a new mbuf and * copy ethernet header to the new mbuf. The new mbuf is prepended into the * existing mbuf chain. * * Be aware, best performance of the 8254x is achived only when jumbo frame is * not used at all on architectures with strict alignment. */ static int em_fixup_rx(struct rx_ring *rxr) { struct adapter *adapter = rxr->adapter; struct mbuf *m, *n; int error; error = 0; m = rxr->fmp; if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) { bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len); m->m_data += ETHER_HDR_LEN; } else { MGETHDR(n, M_NOWAIT, MT_DATA); if (n != NULL) { bcopy(m->m_data, n->m_data, ETHER_HDR_LEN); m->m_data += ETHER_HDR_LEN; m->m_len -= ETHER_HDR_LEN; n->m_len = ETHER_HDR_LEN; M_MOVE_PKTHDR(n, m); n->m_next = m; rxr->fmp = n; } else { adapter->dropped_pkts++; m_freem(rxr->fmp); rxr->fmp = NULL; error = ENOMEM; } } return (error); } #endif static void em_setup_rxdesc(union e1000_rx_desc_extended *rxd, const struct em_rxbuffer *rxbuf) { rxd->read.buffer_addr = htole64(rxbuf->paddr); /* DD bits must be cleared */ rxd->wb.upper.status_error= 0; } /********************************************************************* * * Verify that the hardware indicated that the checksum is valid. * Inform the stack about the status of checksum so that stack * doesn't spend time verifying the checksum. * *********************************************************************/ static void em_receive_checksum(uint32_t status, struct mbuf *mp) { mp->m_pkthdr.csum_flags = 0; /* Ignore Checksum bit is set */ if (status & E1000_RXD_STAT_IXSM) return; /* If the IP checksum exists and there is no IP Checksum error */ if ((status & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) == E1000_RXD_STAT_IPCS) { mp->m_pkthdr.csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID); } /* TCP or UDP checksum */ if ((status & (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == E1000_RXD_STAT_TCPCS) { mp->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); mp->m_pkthdr.csum_data = htons(0xffff); } if (status & E1000_RXD_STAT_UDPCS) { mp->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); mp->m_pkthdr.csum_data = htons(0xffff); } } /* * This routine is run via an vlan * config EVENT */ static void em_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) { struct adapter *adapter = ifp->if_softc; u32 index, bit; if (ifp->if_softc != arg) /* Not our event */ return; if ((vtag == 0) || (vtag > 4095)) /* Invalid ID */ return; EM_CORE_LOCK(adapter); index = (vtag >> 5) & 0x7F; bit = vtag & 0x1F; adapter->shadow_vfta[index] |= (1 << bit); ++adapter->num_vlans; /* Re-init to load the changes */ if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) em_init_locked(adapter); EM_CORE_UNLOCK(adapter); } /* * This routine is run via an vlan * unconfig EVENT */ static void em_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) { struct adapter *adapter = ifp->if_softc; u32 index, bit; if (ifp->if_softc != arg) return; if ((vtag == 0) || (vtag > 4095)) /* Invalid */ return; EM_CORE_LOCK(adapter); index = (vtag >> 5) & 0x7F; bit = vtag & 0x1F; adapter->shadow_vfta[index] &= ~(1 << bit); --adapter->num_vlans; /* Re-init to load the changes */ if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) em_init_locked(adapter); EM_CORE_UNLOCK(adapter); } static void em_setup_vlan_hw_support(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 reg; /* ** We get here thru init_locked, meaning ** a soft reset, this has already cleared ** the VFTA and other state, so if there ** have been no vlan's registered do nothing. */ if (adapter->num_vlans == 0) return; /* ** A soft reset zero's out the VFTA, so ** we need to repopulate it now. */ for (int i = 0; i < EM_VFTA_SIZE; i++) if (adapter->shadow_vfta[i] != 0) E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, adapter->shadow_vfta[i]); reg = E1000_READ_REG(hw, E1000_CTRL); reg |= E1000_CTRL_VME; E1000_WRITE_REG(hw, E1000_CTRL, reg); /* Enable the Filter Table */ reg = E1000_READ_REG(hw, E1000_RCTL); reg &= ~E1000_RCTL_CFIEN; reg |= E1000_RCTL_VFE; E1000_WRITE_REG(hw, E1000_RCTL, reg); } static void em_enable_intr(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 ims_mask = IMS_ENABLE_MASK; if (hw->mac.type == e1000_82574) { E1000_WRITE_REG(hw, EM_EIAC, adapter->ims); ims_mask |= adapter->ims; } E1000_WRITE_REG(hw, E1000_IMS, ims_mask); } static void em_disable_intr(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; if (hw->mac.type == e1000_82574) E1000_WRITE_REG(hw, EM_EIAC, 0); E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff); } /* * Bit of a misnomer, what this really means is * to enable OS management of the system... aka * to disable special hardware management features */ static void em_init_manageability(struct adapter *adapter) { /* A shared code workaround */ #define E1000_82542_MANC2H E1000_MANC2H if (adapter->has_manage) { int manc2h = E1000_READ_REG(&adapter->hw, E1000_MANC2H); int manc = E1000_READ_REG(&adapter->hw, E1000_MANC); /* disable hardware interception of ARP */ manc &= ~(E1000_MANC_ARP_EN); /* enable receiving management packets to the host */ manc |= E1000_MANC_EN_MNG2HOST; #define E1000_MNG2HOST_PORT_623 (1 << 5) #define E1000_MNG2HOST_PORT_664 (1 << 6) manc2h |= E1000_MNG2HOST_PORT_623; manc2h |= E1000_MNG2HOST_PORT_664; E1000_WRITE_REG(&adapter->hw, E1000_MANC2H, manc2h); E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc); } } /* * Give control back to hardware management * controller if there is one. */ static void em_release_manageability(struct adapter *adapter) { if (adapter->has_manage) { int manc = E1000_READ_REG(&adapter->hw, E1000_MANC); /* re-enable hardware interception of ARP */ manc |= E1000_MANC_ARP_EN; manc &= ~E1000_MANC_EN_MNG2HOST; E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc); } } /* * em_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit. * For ASF and Pass Through versions of f/w this means * that the driver is loaded. For AMT version type f/w * this means that the network i/f is open. */ static void em_get_hw_control(struct adapter *adapter) { u32 ctrl_ext, swsm; if (adapter->hw.mac.type == e1000_82573) { swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM); E1000_WRITE_REG(&adapter->hw, E1000_SWSM, swsm | E1000_SWSM_DRV_LOAD); return; } /* else */ ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); return; } /* * em_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit. * For ASF and Pass Through versions of f/w this means that * the driver is no longer loaded. For AMT versions of the * f/w this means that the network i/f is closed. */ static void em_release_hw_control(struct adapter *adapter) { u32 ctrl_ext, swsm; if (!adapter->has_manage) return; if (adapter->hw.mac.type == e1000_82573) { swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM); E1000_WRITE_REG(&adapter->hw, E1000_SWSM, swsm & ~E1000_SWSM_DRV_LOAD); return; } /* else */ ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); return; } static int em_is_valid_ether_addr(u8 *addr) { char zero_addr[6] = { 0, 0, 0, 0, 0, 0 }; if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) { return (FALSE); } return (TRUE); } /* ** Parse the interface capabilities with regard ** to both system management and wake-on-lan for ** later use. */ static void em_get_wakeup(device_t dev) { struct adapter *adapter = device_get_softc(dev); u16 eeprom_data = 0, device_id, apme_mask; adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw); apme_mask = EM_EEPROM_APME; switch (adapter->hw.mac.type) { case e1000_82573: case e1000_82583: adapter->has_amt = TRUE; /* Falls thru */ case e1000_82571: case e1000_82572: case e1000_80003es2lan: if (adapter->hw.bus.func == 1) { e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); break; } else e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); break; case e1000_ich8lan: case e1000_ich9lan: case e1000_ich10lan: case e1000_pchlan: case e1000_pch2lan: apme_mask = E1000_WUC_APME; adapter->has_amt = TRUE; eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC); break; default: e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); break; } if (eeprom_data & apme_mask) adapter->wol = (E1000_WUFC_MAG | E1000_WUFC_MC); /* * We have the eeprom settings, now apply the special cases * where the eeprom may be wrong or the board won't support * wake on lan on a particular port */ device_id = pci_get_device(dev); switch (device_id) { case E1000_DEV_ID_82571EB_FIBER: /* Wake events only supported on port A for dual fiber * regardless of eeprom setting */ if (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_FUNC_1) adapter->wol = 0; break; case E1000_DEV_ID_82571EB_QUAD_COPPER: case E1000_DEV_ID_82571EB_QUAD_FIBER: case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: /* if quad port adapter, disable WoL on all but port A */ if (global_quad_port_a != 0) adapter->wol = 0; /* Reset for multiple quad port adapters */ if (++global_quad_port_a == 4) global_quad_port_a = 0; break; } return; } /* * Enable PCI Wake On Lan capability */ static void em_enable_wakeup(device_t dev) { struct adapter *adapter = device_get_softc(dev); struct ifnet *ifp = adapter->ifp; u32 pmc, ctrl, ctrl_ext, rctl; u16 status; if ((pci_find_cap(dev, PCIY_PMG, &pmc) != 0)) return; /* Advertise the wakeup capability */ ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL); ctrl |= (E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN3); E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl); E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN); if ((adapter->hw.mac.type == e1000_ich8lan) || (adapter->hw.mac.type == e1000_pchlan) || (adapter->hw.mac.type == e1000_ich9lan) || (adapter->hw.mac.type == e1000_ich10lan)) e1000_suspend_workarounds_ich8lan(&adapter->hw); /* Keep the laser running on Fiber adapters */ if (adapter->hw.phy.media_type == e1000_media_type_fiber || adapter->hw.phy.media_type == e1000_media_type_internal_serdes) { ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA; E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext); } /* ** Determine type of Wakeup: note that wol ** is set with all bits on by default. */ if ((ifp->if_capenable & IFCAP_WOL_MAGIC) == 0) adapter->wol &= ~E1000_WUFC_MAG; if ((ifp->if_capenable & IFCAP_WOL_MCAST) == 0) adapter->wol &= ~E1000_WUFC_MC; else { rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); rctl |= E1000_RCTL_MPE; E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl); } if ((adapter->hw.mac.type == e1000_pchlan) || (adapter->hw.mac.type == e1000_pch2lan)) { if (em_enable_phy_wakeup(adapter)) return; } else { E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN); E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol); } if (adapter->hw.phy.type == e1000_phy_igp_3) e1000_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); /* Request PME */ status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2); status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); if (ifp->if_capenable & IFCAP_WOL) status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2); return; } /* ** WOL in the newer chipset interfaces (pchlan) ** require thing to be copied into the phy */ static int em_enable_phy_wakeup(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 mreg, ret = 0; u16 preg; /* copy MAC RARs to PHY RARs */ e1000_copy_rx_addrs_to_phy_ich8lan(hw); /* copy MAC MTA to PHY MTA */ for (int i = 0; i < adapter->hw.mac.mta_reg_count; i++) { mreg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i); e1000_write_phy_reg(hw, BM_MTA(i), (u16)(mreg & 0xFFFF)); e1000_write_phy_reg(hw, BM_MTA(i) + 1, (u16)((mreg >> 16) & 0xFFFF)); } /* configure PHY Rx Control register */ e1000_read_phy_reg(&adapter->hw, BM_RCTL, &preg); mreg = E1000_READ_REG(hw, E1000_RCTL); if (mreg & E1000_RCTL_UPE) preg |= BM_RCTL_UPE; if (mreg & E1000_RCTL_MPE) preg |= BM_RCTL_MPE; preg &= ~(BM_RCTL_MO_MASK); if (mreg & E1000_RCTL_MO_3) preg |= (((mreg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT) << BM_RCTL_MO_SHIFT); if (mreg & E1000_RCTL_BAM) preg |= BM_RCTL_BAM; if (mreg & E1000_RCTL_PMCF) preg |= BM_RCTL_PMCF; mreg = E1000_READ_REG(hw, E1000_CTRL); if (mreg & E1000_CTRL_RFCE) preg |= BM_RCTL_RFCE; e1000_write_phy_reg(&adapter->hw, BM_RCTL, preg); /* enable PHY wakeup in MAC register */ E1000_WRITE_REG(hw, E1000_WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN); E1000_WRITE_REG(hw, E1000_WUFC, adapter->wol); /* configure and enable PHY wakeup in PHY registers */ e1000_write_phy_reg(&adapter->hw, BM_WUFC, adapter->wol); e1000_write_phy_reg(&adapter->hw, BM_WUC, E1000_WUC_PME_EN); /* activate PHY wakeup */ ret = hw->phy.ops.acquire(hw); if (ret) { printf("Could not acquire PHY\n"); return ret; } e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, (BM_WUC_ENABLE_PAGE << IGP_PAGE_SHIFT)); ret = e1000_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, &preg); if (ret) { printf("Could not read PHY page 769\n"); goto out; } preg |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT; ret = e1000_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, preg); if (ret) printf("Could not set PHY Host Wakeup bit\n"); out: hw->phy.ops.release(hw); return ret; } static void em_led_func(void *arg, int onoff) { struct adapter *adapter = arg; EM_CORE_LOCK(adapter); if (onoff) { e1000_setup_led(&adapter->hw); e1000_led_on(&adapter->hw); } else { e1000_led_off(&adapter->hw); e1000_cleanup_led(&adapter->hw); } EM_CORE_UNLOCK(adapter); } /* ** Disable the L0S and L1 LINK states */ static void em_disable_aspm(struct adapter *adapter) { int base, reg; u16 link_cap,link_ctrl; device_t dev = adapter->dev; switch (adapter->hw.mac.type) { case e1000_82573: case e1000_82574: case e1000_82583: break; default: return; } if (pci_find_cap(dev, PCIY_EXPRESS, &base) != 0) return; reg = base + PCIER_LINK_CAP; link_cap = pci_read_config(dev, reg, 2); if ((link_cap & PCIEM_LINK_CAP_ASPM) == 0) return; reg = base + PCIER_LINK_CTL; link_ctrl = pci_read_config(dev, reg, 2); link_ctrl &= ~PCIEM_LINK_CTL_ASPMC; pci_write_config(dev, reg, link_ctrl, 2); return; } /********************************************************************** * * Update the board statistics counters. * **********************************************************************/ static void em_update_stats_counters(struct adapter *adapter) { struct ifnet *ifp; if(adapter->hw.phy.media_type == e1000_media_type_copper || (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) { adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS); adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC); } adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS); adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC); adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC); adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL); adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC); adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL); adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC); adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC); adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC); adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC); adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC); adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC); adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC); adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC); adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64); adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127); adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255); adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511); adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023); adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522); adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC); adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC); adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC); adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC); /* For the 64-bit byte counters the low dword must be read first. */ /* Both registers clear on the read of the high dword */ adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCL) + ((u64)E1000_READ_REG(&adapter->hw, E1000_GORCH) << 32); adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCL) + ((u64)E1000_READ_REG(&adapter->hw, E1000_GOTCH) << 32); adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC); adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC); adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC); adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC); adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC); adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH); adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH); adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR); adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT); adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64); adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127); adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255); adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511); adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023); adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522); adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC); adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC); /* Interrupt Counts */ adapter->stats.iac += E1000_READ_REG(&adapter->hw, E1000_IAC); adapter->stats.icrxptc += E1000_READ_REG(&adapter->hw, E1000_ICRXPTC); adapter->stats.icrxatc += E1000_READ_REG(&adapter->hw, E1000_ICRXATC); adapter->stats.ictxptc += E1000_READ_REG(&adapter->hw, E1000_ICTXPTC); adapter->stats.ictxatc += E1000_READ_REG(&adapter->hw, E1000_ICTXATC); adapter->stats.ictxqec += E1000_READ_REG(&adapter->hw, E1000_ICTXQEC); adapter->stats.ictxqmtc += E1000_READ_REG(&adapter->hw, E1000_ICTXQMTC); adapter->stats.icrxdmtc += E1000_READ_REG(&adapter->hw, E1000_ICRXDMTC); adapter->stats.icrxoc += E1000_READ_REG(&adapter->hw, E1000_ICRXOC); if (adapter->hw.mac.type >= e1000_82543) { adapter->stats.algnerrc += E1000_READ_REG(&adapter->hw, E1000_ALGNERRC); adapter->stats.rxerrc += E1000_READ_REG(&adapter->hw, E1000_RXERRC); adapter->stats.tncrs += E1000_READ_REG(&adapter->hw, E1000_TNCRS); adapter->stats.cexterr += E1000_READ_REG(&adapter->hw, E1000_CEXTERR); adapter->stats.tsctc += E1000_READ_REG(&adapter->hw, E1000_TSCTC); adapter->stats.tsctfc += E1000_READ_REG(&adapter->hw, E1000_TSCTFC); } ifp = adapter->ifp; ifp->if_collisions = adapter->stats.colc; /* Rx Errors */ ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc + adapter->stats.crcerrs + adapter->stats.algnerrc + adapter->stats.ruc + adapter->stats.roc + adapter->stats.mpc + adapter->stats.cexterr; /* Tx Errors */ ifp->if_oerrors = adapter->stats.ecol + adapter->stats.latecol + adapter->watchdog_events; } /* Export a single 32-bit register via a read-only sysctl. */ static int em_sysctl_reg_handler(SYSCTL_HANDLER_ARGS) { struct adapter *adapter; u_int val; adapter = oidp->oid_arg1; val = E1000_READ_REG(&adapter->hw, oidp->oid_arg2); return (sysctl_handle_int(oidp, &val, 0, req)); } /* * Add sysctl variables, one per statistic, to the system. */ static void em_add_hw_stats(struct adapter *adapter) { device_t dev = adapter->dev; struct tx_ring *txr = adapter->tx_rings; struct rx_ring *rxr = adapter->rx_rings; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); struct sysctl_oid *tree = device_get_sysctl_tree(dev); struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); struct e1000_hw_stats *stats = &adapter->stats; struct sysctl_oid *stat_node, *queue_node, *int_node; struct sysctl_oid_list *stat_list, *queue_list, *int_list; #define QUEUE_NAME_LEN 32 char namebuf[QUEUE_NAME_LEN]; /* Driver Statistics */ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq", CTLFLAG_RD, &adapter->link_irq, "Link MSIX IRQ Handled"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_fail", CTLFLAG_RD, &adapter->mbuf_defrag_failed, "Defragmenting mbuf chain failed"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_dma_fail", CTLFLAG_RD, &adapter->no_tx_dma_setup, "Driver tx dma failure in xmit"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns", CTLFLAG_RD, &adapter->rx_overruns, "RX overruns"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts", CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "device_control", CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_CTRL, em_sysctl_reg_handler, "IU", "Device Control Register"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_control", CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RCTL, em_sysctl_reg_handler, "IU", "Receiver Control Register"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water", CTLFLAG_RD, &adapter->hw.fc.high_water, 0, "Flow Control High Watermark"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water", CTLFLAG_RD, &adapter->hw.fc.low_water, 0, "Flow Control Low Watermark"); for (int i = 0; i < adapter->num_queues; i++, txr++, rxr++) { snprintf(namebuf, QUEUE_NAME_LEN, "queue_tx_%d", i); queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "TX Queue Name"); queue_list = SYSCTL_CHILDREN(queue_node); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDH(txr->me), em_sysctl_reg_handler, "IU", "Transmit Descriptor Head"); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDT(txr->me), em_sysctl_reg_handler, "IU", "Transmit Descriptor Tail"); SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tx_irq", CTLFLAG_RD, &txr->tx_irq, "Queue MSI-X Transmit Interrupts"); SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_desc_avail", CTLFLAG_RD, &txr->no_desc_avail, "Queue No Descriptor Available"); snprintf(namebuf, QUEUE_NAME_LEN, "queue_rx_%d", i); queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "RX Queue Name"); queue_list = SYSCTL_CHILDREN(queue_node); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDH(rxr->me), em_sysctl_reg_handler, "IU", "Receive Descriptor Head"); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDT(rxr->me), em_sysctl_reg_handler, "IU", "Receive Descriptor Tail"); SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "rx_irq", CTLFLAG_RD, &rxr->rx_irq, "Queue MSI-X Receive Interrupts"); } /* MAC stats get their own sub node */ stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", CTLFLAG_RD, NULL, "Statistics"); stat_list = SYSCTL_CHILDREN(stat_node); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "excess_coll", CTLFLAG_RD, &stats->ecol, "Excessive collisions"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "single_coll", CTLFLAG_RD, &stats->scc, "Single collisions"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "multiple_coll", CTLFLAG_RD, &stats->mcc, "Multiple collisions"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "late_coll", CTLFLAG_RD, &stats->latecol, "Late collisions"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "collision_count", CTLFLAG_RD, &stats->colc, "Collision Count"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "symbol_errors", CTLFLAG_RD, &adapter->stats.symerrs, "Symbol Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "sequence_errors", CTLFLAG_RD, &adapter->stats.sec, "Sequence Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "defer_count", CTLFLAG_RD, &adapter->stats.dc, "Defer Count"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "missed_packets", CTLFLAG_RD, &adapter->stats.mpc, "Missed Packets"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_no_buff", CTLFLAG_RD, &adapter->stats.rnbc, "Receive No Buffers"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersize", CTLFLAG_RD, &adapter->stats.ruc, "Receive Undersize"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented", CTLFLAG_RD, &adapter->stats.rfc, "Fragmented Packets Received "); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversize", CTLFLAG_RD, &adapter->stats.roc, "Oversized Packets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabber", CTLFLAG_RD, &adapter->stats.rjc, "Recevied Jabber"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_errs", CTLFLAG_RD, &adapter->stats.rxerrc, "Receive Errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs", CTLFLAG_RD, &adapter->stats.crcerrs, "CRC errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "alignment_errs", CTLFLAG_RD, &adapter->stats.algnerrc, "Alignment Errors"); /* On 82575 these are collision counts */ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs", CTLFLAG_RD, &adapter->stats.cexterr, "Collision/Carrier extension errors"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd", CTLFLAG_RD, &adapter->stats.xonrxc, "XON Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd", CTLFLAG_RD, &adapter->stats.xontxc, "XON Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd", CTLFLAG_RD, &adapter->stats.xoffrxc, "XOFF Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd", CTLFLAG_RD, &adapter->stats.xofftxc, "XOFF Transmitted"); /* Packet Reception Stats */ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd", CTLFLAG_RD, &adapter->stats.tpr, "Total Packets Received "); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd", CTLFLAG_RD, &adapter->stats.gprc, "Good Packets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd", CTLFLAG_RD, &adapter->stats.bprc, "Broadcast Packets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd", CTLFLAG_RD, &adapter->stats.mprc, "Multicast Packets Received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64", CTLFLAG_RD, &adapter->stats.prc64, "64 byte frames received "); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127", CTLFLAG_RD, &adapter->stats.prc127, "65-127 byte frames received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255", CTLFLAG_RD, &adapter->stats.prc255, "128-255 byte frames received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511", CTLFLAG_RD, &adapter->stats.prc511, "256-511 byte frames received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023", CTLFLAG_RD, &adapter->stats.prc1023, "512-1023 byte frames received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522", CTLFLAG_RD, &adapter->stats.prc1522, "1023-1522 byte frames received"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd", CTLFLAG_RD, &adapter->stats.gorc, "Good Octets Received"); /* Packet Transmission Stats */ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", CTLFLAG_RD, &adapter->stats.gotc, "Good Octets Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd", CTLFLAG_RD, &adapter->stats.tpt, "Total Packets Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", CTLFLAG_RD, &adapter->stats.gptc, "Good Packets Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd", CTLFLAG_RD, &adapter->stats.bptc, "Broadcast Packets Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd", CTLFLAG_RD, &adapter->stats.mptc, "Multicast Packets Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64", CTLFLAG_RD, &adapter->stats.ptc64, "64 byte frames transmitted "); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127", CTLFLAG_RD, &adapter->stats.ptc127, "65-127 byte frames transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255", CTLFLAG_RD, &adapter->stats.ptc255, "128-255 byte frames transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511", CTLFLAG_RD, &adapter->stats.ptc511, "256-511 byte frames transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023", CTLFLAG_RD, &adapter->stats.ptc1023, "512-1023 byte frames transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522", CTLFLAG_RD, &adapter->stats.ptc1522, "1024-1522 byte frames transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_txd", CTLFLAG_RD, &adapter->stats.tsctc, "TSO Contexts Transmitted"); SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail", CTLFLAG_RD, &adapter->stats.tsctfc, "TSO Contexts Failed"); /* Interrupt Stats */ int_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "interrupts", CTLFLAG_RD, NULL, "Interrupt Statistics"); int_list = SYSCTL_CHILDREN(int_node); SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "asserts", CTLFLAG_RD, &adapter->stats.iac, "Interrupt Assertion Count"); SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_pkt_timer", CTLFLAG_RD, &adapter->stats.icrxptc, "Interrupt Cause Rx Pkt Timer Expire Count"); SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_abs_timer", CTLFLAG_RD, &adapter->stats.icrxatc, "Interrupt Cause Rx Abs Timer Expire Count"); SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_pkt_timer", CTLFLAG_RD, &adapter->stats.ictxptc, "Interrupt Cause Tx Pkt Timer Expire Count"); SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_abs_timer", CTLFLAG_RD, &adapter->stats.ictxatc, "Interrupt Cause Tx Abs Timer Expire Count"); SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_queue_empty", CTLFLAG_RD, &adapter->stats.ictxqec, "Interrupt Cause Tx Queue Empty Count"); SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_queue_min_thresh", CTLFLAG_RD, &adapter->stats.ictxqmtc, "Interrupt Cause Tx Queue Min Thresh Count"); SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_desc_min_thresh", CTLFLAG_RD, &adapter->stats.icrxdmtc, "Interrupt Cause Rx Desc Min Thresh Count"); SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_overrun", CTLFLAG_RD, &adapter->stats.icrxoc, "Interrupt Cause Receiver Overrun Count"); } /********************************************************************** * * This routine provides a way to dump out the adapter eeprom, * often a useful debug/service tool. This only dumps the first * 32 words, stuff that matters is in that extent. * **********************************************************************/ static int em_sysctl_nvm_info(SYSCTL_HANDLER_ARGS) { struct adapter *adapter = (struct adapter *)arg1; int error; int result; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); /* * This value will cause a hex dump of the * first 32 16-bit words of the EEPROM to * the screen. */ if (result == 1) em_print_nvm_info(adapter); return (error); } static void em_print_nvm_info(struct adapter *adapter) { u16 eeprom_data; int i, j, row = 0; /* Its a bit crude, but it gets the job done */ printf("\nInterface EEPROM Dump:\n"); printf("Offset\n0x0000 "); for (i = 0, j = 0; i < 32; i++, j++) { if (j == 8) { /* Make the offset block */ j = 0; ++row; printf("\n0x00%x0 ",row); } e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data); printf("%04x ", eeprom_data); } printf("\n"); } static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS) { struct em_int_delay_info *info; struct adapter *adapter; u32 regval; int error, usecs, ticks; info = (struct em_int_delay_info *)arg1; usecs = info->value; error = sysctl_handle_int(oidp, &usecs, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535)) return (EINVAL); info->value = usecs; ticks = EM_USECS_TO_TICKS(usecs); if (info->offset == E1000_ITR) /* units are 256ns here */ ticks *= 4; adapter = info->adapter; EM_CORE_LOCK(adapter); regval = E1000_READ_OFFSET(&adapter->hw, info->offset); regval = (regval & ~0xffff) | (ticks & 0xffff); /* Handle a few special cases. */ switch (info->offset) { case E1000_RDTR: break; case E1000_TIDV: if (ticks == 0) { adapter->txd_cmd &= ~E1000_TXD_CMD_IDE; /* Don't write 0 into the TIDV register. */ regval++; } else adapter->txd_cmd |= E1000_TXD_CMD_IDE; break; } E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval); EM_CORE_UNLOCK(adapter); return (0); } static void em_add_int_delay_sysctl(struct adapter *adapter, const char *name, const char *description, struct em_int_delay_info *info, int offset, int value) { info->adapter = adapter; info->offset = offset; info->value = value; SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev), SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)), OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, info, 0, em_sysctl_int_delay, "I", description); } static void em_set_sysctl_value(struct adapter *adapter, const char *name, const char *description, int *limit, int value) { *limit = value; SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev), SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)), OID_AUTO, name, CTLFLAG_RW, limit, value, description); } /* ** Set flow control using sysctl: ** Flow control values: ** 0 - off ** 1 - rx pause ** 2 - tx pause ** 3 - full */ static int em_set_flowcntl(SYSCTL_HANDLER_ARGS) { int error; static int input = 3; /* default is full */ struct adapter *adapter = (struct adapter *) arg1; error = sysctl_handle_int(oidp, &input, 0, req); if ((error) || (req->newptr == NULL)) return (error); if (input == adapter->fc) /* no change? */ return (error); switch (input) { case e1000_fc_rx_pause: case e1000_fc_tx_pause: case e1000_fc_full: case e1000_fc_none: adapter->hw.fc.requested_mode = input; adapter->fc = input; break; default: /* Do nothing */ return (error); } adapter->hw.fc.current_mode = adapter->hw.fc.requested_mode; e1000_force_mac_fc(&adapter->hw); return (error); } /* ** Manage Energy Efficient Ethernet: ** Control values: ** 0/1 - enabled/disabled */ static int em_sysctl_eee(SYSCTL_HANDLER_ARGS) { struct adapter *adapter = (struct adapter *) arg1; int error, value; value = adapter->hw.dev_spec.ich8lan.eee_disable; error = sysctl_handle_int(oidp, &value, 0, req); if (error || req->newptr == NULL) return (error); EM_CORE_LOCK(adapter); adapter->hw.dev_spec.ich8lan.eee_disable = (value != 0); em_init_locked(adapter); EM_CORE_UNLOCK(adapter); return (0); } static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS) { struct adapter *adapter; int error; int result; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (result == 1) { adapter = (struct adapter *)arg1; em_print_debug_info(adapter); } return (error); } /* ** This routine is meant to be fluid, add whatever is ** needed for debugging a problem. -jfv */ static void em_print_debug_info(struct adapter *adapter) { device_t dev = adapter->dev; struct tx_ring *txr = adapter->tx_rings; struct rx_ring *rxr = adapter->rx_rings; if (adapter->ifp->if_drv_flags & IFF_DRV_RUNNING) printf("Interface is RUNNING "); else printf("Interface is NOT RUNNING\n"); if (adapter->ifp->if_drv_flags & IFF_DRV_OACTIVE) printf("and INACTIVE\n"); else printf("and ACTIVE\n"); for (int i = 0; i < adapter->num_queues; i++, txr++, rxr++) { device_printf(dev, "TX Queue %d ------\n", i); device_printf(dev, "hw tdh = %d, hw tdt = %d\n", E1000_READ_REG(&adapter->hw, E1000_TDH(i)), E1000_READ_REG(&adapter->hw, E1000_TDT(i))); device_printf(dev, "Tx Queue Status = %d\n", txr->busy); device_printf(dev, "TX descriptors avail = %d\n", txr->tx_avail); device_printf(dev, "Tx Descriptors avail failure = %ld\n", txr->no_desc_avail); device_printf(dev, "RX Queue %d ------\n", i); device_printf(dev, "hw rdh = %d, hw rdt = %d\n", E1000_READ_REG(&adapter->hw, E1000_RDH(i)), E1000_READ_REG(&adapter->hw, E1000_RDT(i))); device_printf(dev, "RX discarded packets = %ld\n", rxr->rx_discarded); device_printf(dev, "RX Next to Check = %d\n", rxr->next_to_check); device_printf(dev, "RX Next to Refresh = %d\n", rxr->next_to_refresh); } } #ifdef EM_MULTIQUEUE /* * 82574 only: * Write a new value to the EEPROM increasing the number of MSIX * vectors from 3 to 5, for proper multiqueue support. */ static void em_enable_vectors_82574(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; device_t dev = adapter->dev; u16 edata; e1000_read_nvm(hw, EM_NVM_PCIE_CTRL, 1, &edata); printf("Current cap: %#06x\n", edata); if (((edata & EM_NVM_MSIX_N_MASK) >> EM_NVM_MSIX_N_SHIFT) != 4) { device_printf(dev, "Writing to eeprom: increasing " "reported MSIX vectors from 3 to 5...\n"); edata &= ~(EM_NVM_MSIX_N_MASK); edata |= 4 << EM_NVM_MSIX_N_SHIFT; e1000_write_nvm(hw, EM_NVM_PCIE_CTRL, 1, &edata); e1000_update_nvm_checksum(hw); device_printf(dev, "Writing to eeprom: done\n"); } } #endif #ifdef DDB DB_COMMAND(em_reset_dev, em_ddb_reset_dev) { devclass_t dc; int max_em; dc = devclass_find("em"); max_em = devclass_get_maxunit(dc); for (int index = 0; index < (max_em - 1); index++) { device_t dev; dev = devclass_get_device(dc, index); if (device_get_driver(dev) == &em_driver) { struct adapter *adapter = device_get_softc(dev); EM_CORE_LOCK(adapter); em_init_locked(adapter); EM_CORE_UNLOCK(adapter); } } } DB_COMMAND(em_dump_queue, em_ddb_dump_queue) { devclass_t dc; int max_em; dc = devclass_find("em"); max_em = devclass_get_maxunit(dc); for (int index = 0; index < (max_em - 1); index++) { device_t dev; dev = devclass_get_device(dc, index); if (device_get_driver(dev) == &em_driver) em_print_debug_info(device_get_softc(dev)); } } #endif Index: stable/10/sys/dev/e1000/if_em.h =================================================================== --- stable/10/sys/dev/e1000/if_em.h (revision 296054) +++ stable/10/sys/dev/e1000/if_em.h (revision 296055) @@ -1,555 +1,558 @@ /****************************************************************************** Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _EM_H_DEFINED_ #define _EM_H_DEFINED_ /* Tunables */ /* * EM_TXD: Maximum number of Transmit Descriptors * Valid Range: 80-256 for 82542 and 82543-based adapters * 80-4096 for others * Default Value: 256 * This value is the number of transmit descriptors allocated by the driver. * Increasing this value allows the driver to queue more transmits. Each * descriptor is 16 bytes. * Since TDLEN should be multiple of 128bytes, the number of transmit * desscriptors should meet the following condition. * (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0 */ #define EM_MIN_TXD 80 #define EM_MAX_TXD 4096 #ifdef EM_MULTIQUEUE #define EM_DEFAULT_TXD 4096 #else #define EM_DEFAULT_TXD 1024 #endif /* * EM_RXD - Maximum number of receive Descriptors * Valid Range: 80-256 for 82542 and 82543-based adapters * 80-4096 for others * Default Value: 256 * This value is the number of receive descriptors allocated by the driver. * Increasing this value allows the driver to buffer more incoming packets. * Each descriptor is 16 bytes. A receive buffer is also allocated for each * descriptor. The maximum MTU size is 16110. * Since TDLEN should be multiple of 128bytes, the number of transmit * desscriptors should meet the following condition. * (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0 */ #define EM_MIN_RXD 80 #define EM_MAX_RXD 4096 #ifdef EM_MULTIQUEUE #define EM_DEFAULT_RXD 4096 #else #define EM_DEFAULT_RXD 1024 #endif /* * EM_TIDV - Transmit Interrupt Delay Value * Valid Range: 0-65535 (0=off) * Default Value: 64 * This value delays the generation of transmit interrupts in units of * 1.024 microseconds. Transmit interrupt reduction can improve CPU * efficiency if properly tuned for specific network traffic. If the * system is reporting dropped transmits, this value may be set too high * causing the driver to run out of available transmit descriptors. */ #define EM_TIDV 64 /* * EM_TADV - Transmit Absolute Interrupt Delay Value * (Not valid for 82542/82543/82544) * Valid Range: 0-65535 (0=off) * Default Value: 64 * This value, in units of 1.024 microseconds, limits the delay in which a * transmit interrupt is generated. Useful only if EM_TIDV is non-zero, * this value ensures that an interrupt is generated after the initial * packet is sent on the wire within the set amount of time. Proper tuning, * along with EM_TIDV, may improve traffic throughput in specific * network conditions. */ #define EM_TADV 64 /* * EM_RDTR - Receive Interrupt Delay Timer (Packet Timer) * Valid Range: 0-65535 (0=off) * Default Value: 0 * This value delays the generation of receive interrupts in units of 1.024 * microseconds. Receive interrupt reduction can improve CPU efficiency if * properly tuned for specific network traffic. Increasing this value adds * extra latency to frame reception and can end up decreasing the throughput * of TCP traffic. If the system is reporting dropped receives, this value * may be set too high, causing the driver to run out of available receive * descriptors. * * CAUTION: When setting EM_RDTR to a value other than 0, adapters * may hang (stop transmitting) under certain network conditions. * If this occurs a WATCHDOG message is logged in the system * event log. In addition, the controller is automatically reset, * restoring the network connection. To eliminate the potential * for the hang ensure that EM_RDTR is set to 0. */ #ifdef EM_MULTIQUEUE #define EM_RDTR 64 #else #define EM_RDTR 0 #endif /* * Receive Interrupt Absolute Delay Timer (Not valid for 82542/82543/82544) * Valid Range: 0-65535 (0=off) * Default Value: 64 * This value, in units of 1.024 microseconds, limits the delay in which a * receive interrupt is generated. Useful only if EM_RDTR is non-zero, * this value ensures that an interrupt is generated after the initial * packet is received within the set amount of time. Proper tuning, * along with EM_RDTR, may improve traffic throughput in specific network * conditions. */ #ifdef EM_MULTIQUEUE #define EM_RADV 128 #else #define EM_RADV 64 #endif /* * This parameter controls the max duration of transmit watchdog. */ #define EM_WATCHDOG (10 * hz) /* * This parameter controls when the driver calls the routine to reclaim * transmit descriptors. */ #define EM_TX_CLEANUP_THRESHOLD (adapter->num_tx_desc / 8) /* * This parameter controls whether or not autonegotation is enabled. * 0 - Disable autonegotiation * 1 - Enable autonegotiation */ #define DO_AUTO_NEG 1 /* * This parameter control whether or not the driver will wait for * autonegotiation to complete. * 1 - Wait for autonegotiation to complete * 0 - Don't wait for autonegotiation to complete */ #define WAIT_FOR_AUTO_NEG_DEFAULT 0 /* Tunables -- End */ #define AUTONEG_ADV_DEFAULT (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ ADVERTISE_100_HALF | ADVERTISE_100_FULL | \ ADVERTISE_1000_FULL) #define AUTO_ALL_MODES 0 /* PHY master/slave setting */ #define EM_MASTER_SLAVE e1000_ms_hw_default /* * Micellaneous constants */ #define EM_VENDOR_ID 0x8086 #define EM_FLASH 0x0014 #define EM_JUMBO_PBA 0x00000028 #define EM_DEFAULT_PBA 0x00000030 #define EM_SMARTSPEED_DOWNSHIFT 3 #define EM_SMARTSPEED_MAX 15 #define EM_MAX_LOOP 10 #define MAX_NUM_MULTICAST_ADDRESSES 128 #define PCI_ANY_ID (~0U) #define ETHER_ALIGN 2 #define EM_FC_PAUSE_TIME 0x0680 #define EM_EEPROM_APME 0x400; #define EM_82544_APME 0x0004; /* * Driver state logic for the detection of a hung state * in hardware. Set TX_HUNG whenever a TX packet is used * (data is sent) and clear it when txeof() is invoked if * any descriptors from the ring are cleaned/reclaimed. * Increment internal counter if no descriptors are cleaned * and compare to TX_MAXTRIES. When counter > TX_MAXTRIES, * reset adapter. */ #define EM_TX_IDLE 0x00000000 #define EM_TX_BUSY 0x00000001 #define EM_TX_HUNG 0x80000000 #define EM_TX_MAXTRIES 10 +#define PCICFG_DESC_RING_STATUS 0xe4 +#define FLUSH_DESC_REQUIRED 0x100 + /* * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will * also optimize cache line size effect. H/W supports up to cache line size 128. */ #define EM_DBA_ALIGN 128 /* * See Intel 82574 Driver Programming Interface Manual, Section 10.2.6.9 */ #define TARC_COMPENSATION_MODE (1 << 7) /* Compensation Mode */ #define TARC_SPEED_MODE_BIT (1 << 21) /* On PCI-E MACs only */ #define TARC_MQ_FIX (1 << 23) | \ (1 << 24) | \ (1 << 25) /* Handle errata in MQ mode */ #define TARC_ERRATA_BIT (1 << 26) /* Note from errata on 82574 */ /* PCI Config defines */ #define EM_BAR_TYPE(v) ((v) & EM_BAR_TYPE_MASK) #define EM_BAR_TYPE_MASK 0x00000001 #define EM_BAR_TYPE_MMEM 0x00000000 #define EM_BAR_TYPE_FLASH 0x0014 #define EM_BAR_MEM_TYPE(v) ((v) & EM_BAR_MEM_TYPE_MASK) #define EM_BAR_MEM_TYPE_MASK 0x00000006 #define EM_BAR_MEM_TYPE_32BIT 0x00000000 #define EM_BAR_MEM_TYPE_64BIT 0x00000004 #define EM_MSIX_BAR 3 /* On 82575 */ /* More backward compatibility */ #if __FreeBSD_version < 900000 #define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD #endif /* Defines for printing debug information */ #define DEBUG_INIT 0 #define DEBUG_IOCTL 0 #define DEBUG_HW 0 #define INIT_DEBUGOUT(S) if (DEBUG_INIT) printf(S "\n") #define INIT_DEBUGOUT1(S, A) if (DEBUG_INIT) printf(S "\n", A) #define INIT_DEBUGOUT2(S, A, B) if (DEBUG_INIT) printf(S "\n", A, B) #define IOCTL_DEBUGOUT(S) if (DEBUG_IOCTL) printf(S "\n") #define IOCTL_DEBUGOUT1(S, A) if (DEBUG_IOCTL) printf(S "\n", A) #define IOCTL_DEBUGOUT2(S, A, B) if (DEBUG_IOCTL) printf(S "\n", A, B) #define HW_DEBUGOUT(S) if (DEBUG_HW) printf(S "\n") #define HW_DEBUGOUT1(S, A) if (DEBUG_HW) printf(S "\n", A) #define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B) #define EM_MAX_SCATTER 64 #define EM_VFTA_SIZE 128 #define EM_TSO_SIZE (65535 + sizeof(struct ether_vlan_header)) #define EM_TSO_SEG_SIZE 4096 /* Max dma segment size */ #define EM_MSIX_MASK 0x01F00000 /* For 82574 use */ #define EM_MSIX_LINK 0x01000000 /* For 82574 use */ #define ETH_ZLEN 60 #define ETH_ADDR_LEN 6 #define CSUM_OFFLOAD 7 /* Offload bits in mbuf flag */ /* * 82574 has a nonstandard address for EIAC * and since its only used in MSIX, and in * the em driver only 82574 uses MSIX we can * solve it just using this define. */ #define EM_EIAC 0x000DC /* * 82574 only reports 3 MSI-X vectors by default; * defines assisting with making it report 5 are * located here. */ #define EM_NVM_PCIE_CTRL 0x1B #define EM_NVM_MSIX_N_MASK (0x7 << EM_NVM_MSIX_N_SHIFT) #define EM_NVM_MSIX_N_SHIFT 7 /* * Bus dma allocation structure used by * e1000_dma_malloc and e1000_dma_free. */ struct em_dma_alloc { bus_addr_t dma_paddr; caddr_t dma_vaddr; bus_dma_tag_t dma_tag; bus_dmamap_t dma_map; bus_dma_segment_t dma_seg; int dma_nseg; }; struct adapter; struct em_int_delay_info { struct adapter *adapter; /* Back-pointer to the adapter struct */ int offset; /* Register offset to read/write */ int value; /* Current value in usecs */ }; /* * The transmit ring, one per tx queue */ struct tx_ring { struct adapter *adapter; struct mtx tx_mtx; char mtx_name[16]; u32 me; u32 msix; u32 ims; int busy; struct em_dma_alloc txdma; struct e1000_tx_desc *tx_base; struct task tx_task; struct taskqueue *tq; u32 next_avail_desc; u32 next_to_clean; struct em_txbuffer *tx_buffers; volatile u16 tx_avail; u32 tx_tso; /* last tx was tso */ u16 last_hw_offload; u8 last_hw_ipcso; u8 last_hw_ipcss; u8 last_hw_tucso; u8 last_hw_tucss; #if __FreeBSD_version >= 800000 struct buf_ring *br; #endif /* Interrupt resources */ bus_dma_tag_t txtag; void *tag; struct resource *res; unsigned long tx_irq; unsigned long no_desc_avail; }; /* * The Receive ring, one per rx queue */ struct rx_ring { struct adapter *adapter; u32 me; u32 msix; u32 ims; struct mtx rx_mtx; char mtx_name[16]; u32 payload; struct task rx_task; struct taskqueue *tq; union e1000_rx_desc_extended *rx_base; struct em_dma_alloc rxdma; u32 next_to_refresh; u32 next_to_check; struct em_rxbuffer *rx_buffers; struct mbuf *fmp; struct mbuf *lmp; /* Interrupt resources */ void *tag; struct resource *res; bus_dma_tag_t rxtag; bool discard; /* Soft stats */ unsigned long rx_irq; unsigned long rx_discarded; unsigned long rx_packets; unsigned long rx_bytes; }; /* Our adapter structure */ struct adapter { struct ifnet *ifp; struct e1000_hw hw; /* FreeBSD operating-system-specific structures. */ struct e1000_osdep osdep; struct device *dev; struct cdev *led_dev; struct resource *memory; struct resource *flash; struct resource *msix_mem; struct resource *res; void *tag; u32 linkvec; u32 ivars; struct ifmedia media; struct callout timer; int msix; int if_flags; int max_frame_size; int min_frame_size; struct mtx core_mtx; int em_insert_vlan_header; u32 ims; bool in_detach; /* Task for FAST handling */ struct task link_task; struct task que_task; struct taskqueue *tq; /* private task queue */ eventhandler_tag vlan_attach; eventhandler_tag vlan_detach; u16 num_vlans; u8 num_queues; /* * Transmit rings: * Allocated at run time, an array of rings. */ struct tx_ring *tx_rings; int num_tx_desc; u32 txd_cmd; /* * Receive rings: * Allocated at run time, an array of rings. */ struct rx_ring *rx_rings; int num_rx_desc; u32 rx_process_limit; u32 rx_mbuf_sz; /* Management and WOL features */ u32 wol; bool has_manage; bool has_amt; /* Multicast array memory */ u8 *mta; /* ** Shadow VFTA table, this is needed because ** the real vlan filter table gets cleared during ** a soft reset and the driver needs to be able ** to repopulate it. */ u32 shadow_vfta[EM_VFTA_SIZE]; /* Info about the interface */ u16 link_active; u16 fc; u16 link_speed; u16 link_duplex; u32 smartspeed; struct em_int_delay_info tx_int_delay; struct em_int_delay_info tx_abs_int_delay; struct em_int_delay_info rx_int_delay; struct em_int_delay_info rx_abs_int_delay; struct em_int_delay_info tx_itr; /* Misc stats maintained by the driver */ unsigned long dropped_pkts; unsigned long link_irq; unsigned long mbuf_defrag_failed; unsigned long no_tx_dma_setup; unsigned long no_tx_map_avail; unsigned long rx_overruns; unsigned long watchdog_events; struct e1000_hw_stats stats; }; /******************************************************************************** * vendor_info_array * * This array contains the list of Subvendor/Subdevice IDs on which the driver * should load. * ********************************************************************************/ typedef struct _em_vendor_info_t { unsigned int vendor_id; unsigned int device_id; unsigned int subvendor_id; unsigned int subdevice_id; unsigned int index; } em_vendor_info_t; struct em_txbuffer { int next_eop; /* Index of the desc to watch */ struct mbuf *m_head; bus_dmamap_t map; /* bus_dma map for packet */ }; struct em_rxbuffer { int next_eop; /* Index of the desc to watch */ struct mbuf *m_head; bus_dmamap_t map; /* bus_dma map for packet */ bus_addr_t paddr; }; /* ** Find the number of unrefreshed RX descriptors */ static inline u16 e1000_rx_unrefreshed(struct rx_ring *rxr) { struct adapter *adapter = rxr->adapter; if (rxr->next_to_check > rxr->next_to_refresh) return (rxr->next_to_check - rxr->next_to_refresh - 1); else return ((adapter->num_rx_desc + rxr->next_to_check) - rxr->next_to_refresh - 1); } #define EM_CORE_LOCK_INIT(_sc, _name) \ mtx_init(&(_sc)->core_mtx, _name, "EM Core Lock", MTX_DEF) #define EM_TX_LOCK_INIT(_sc, _name) \ mtx_init(&(_sc)->tx_mtx, _name, "EM TX Lock", MTX_DEF) #define EM_RX_LOCK_INIT(_sc, _name) \ mtx_init(&(_sc)->rx_mtx, _name, "EM RX Lock", MTX_DEF) #define EM_CORE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->core_mtx) #define EM_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->tx_mtx) #define EM_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_mtx) #define EM_CORE_LOCK(_sc) mtx_lock(&(_sc)->core_mtx) #define EM_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_mtx) #define EM_TX_TRYLOCK(_sc) mtx_trylock(&(_sc)->tx_mtx) #define EM_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_mtx) #define EM_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->core_mtx) #define EM_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_mtx) #define EM_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_mtx) #define EM_CORE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->core_mtx, MA_OWNED) #define EM_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_mtx, MA_OWNED) #define EM_RX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->rx_mtx, MA_OWNED) #define EM_RSSRK_SIZE 4 #define EM_RSSRK_VAL(key, i) (key[(i) * EM_RSSRK_SIZE] | \ key[(i) * EM_RSSRK_SIZE + 1] << 8 | \ key[(i) * EM_RSSRK_SIZE + 2] << 16 | \ key[(i) * EM_RSSRK_SIZE + 3] << 24) #endif /* _EM_H_DEFINED_ */ Index: stable/10/sys/dev/e1000/if_igb.c =================================================================== --- stable/10/sys/dev/e1000/if_igb.c (revision 296054) +++ stable/10/sys/dev/e1000/if_igb.c (revision 296055) @@ -1,6205 +1,6123 @@ /****************************************************************************** Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "opt_inet.h" #include "opt_inet6.h" #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #include "opt_altq.h" #endif -#include -#include -#ifndef IGB_LEGACY_TX -#include -#endif -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include "e1000_api.h" -#include "e1000_82575.h" #include "if_igb.h" /********************************************************************* - * Set this to one to display debug statistics - *********************************************************************/ -int igb_display_debug_stats = 0; - -/********************************************************************* * Driver version: *********************************************************************/ -char igb_driver_version[] = "version - 2.4.0"; +char igb_driver_version[] = "2.5.3-k"; /********************************************************************* * PCI Device ID Table * * Used by probe to select devices to load on * Last field stores an index into e1000_strings * Last entry must be all 0s * * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } *********************************************************************/ static igb_vendor_info_t igb_vendor_info_array[] = { - { 0x8086, E1000_DEV_ID_82575EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, - { 0x8086, E1000_DEV_ID_82575EB_FIBER_SERDES, - PCI_ANY_ID, PCI_ANY_ID, 0}, - { 0x8086, E1000_DEV_ID_82575GB_QUAD_COPPER, - PCI_ANY_ID, PCI_ANY_ID, 0}, - { 0x8086, E1000_DEV_ID_82576, PCI_ANY_ID, PCI_ANY_ID, 0}, - { 0x8086, E1000_DEV_ID_82576_NS, PCI_ANY_ID, PCI_ANY_ID, 0}, - { 0x8086, E1000_DEV_ID_82576_NS_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, - { 0x8086, E1000_DEV_ID_82576_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, - { 0x8086, E1000_DEV_ID_82576_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, - { 0x8086, E1000_DEV_ID_82576_SERDES_QUAD, - PCI_ANY_ID, PCI_ANY_ID, 0}, - { 0x8086, E1000_DEV_ID_82576_QUAD_COPPER, - PCI_ANY_ID, PCI_ANY_ID, 0}, - { 0x8086, E1000_DEV_ID_82576_QUAD_COPPER_ET2, - PCI_ANY_ID, PCI_ANY_ID, 0}, - { 0x8086, E1000_DEV_ID_82576_VF, PCI_ANY_ID, PCI_ANY_ID, 0}, - { 0x8086, E1000_DEV_ID_82580_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, - { 0x8086, E1000_DEV_ID_82580_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, - { 0x8086, E1000_DEV_ID_82580_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, - { 0x8086, E1000_DEV_ID_82580_SGMII, PCI_ANY_ID, PCI_ANY_ID, 0}, - { 0x8086, E1000_DEV_ID_82580_COPPER_DUAL, - PCI_ANY_ID, PCI_ANY_ID, 0}, - { 0x8086, E1000_DEV_ID_82580_QUAD_FIBER, - PCI_ANY_ID, PCI_ANY_ID, 0}, - { 0x8086, E1000_DEV_ID_DH89XXCC_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, - { 0x8086, E1000_DEV_ID_DH89XXCC_SGMII, PCI_ANY_ID, PCI_ANY_ID, 0}, - { 0x8086, E1000_DEV_ID_DH89XXCC_SFP, PCI_ANY_ID, PCI_ANY_ID, 0}, - { 0x8086, E1000_DEV_ID_DH89XXCC_BACKPLANE, - PCI_ANY_ID, PCI_ANY_ID, 0}, - { 0x8086, E1000_DEV_ID_I350_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, - { 0x8086, E1000_DEV_ID_I350_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, - { 0x8086, E1000_DEV_ID_I350_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, - { 0x8086, E1000_DEV_ID_I350_SGMII, PCI_ANY_ID, PCI_ANY_ID, 0}, - { 0x8086, E1000_DEV_ID_I350_VF, PCI_ANY_ID, PCI_ANY_ID, 0}, - { 0x8086, E1000_DEV_ID_I210_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, - { 0x8086, E1000_DEV_ID_I210_COPPER_IT, PCI_ANY_ID, PCI_ANY_ID, 0}, - { 0x8086, E1000_DEV_ID_I210_COPPER_OEM1, - PCI_ANY_ID, PCI_ANY_ID, 0}, - { 0x8086, E1000_DEV_ID_I210_COPPER_FLASHLESS, - PCI_ANY_ID, PCI_ANY_ID, 0}, - { 0x8086, E1000_DEV_ID_I210_SERDES_FLASHLESS, - PCI_ANY_ID, PCI_ANY_ID, 0}, - { 0x8086, E1000_DEV_ID_I210_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, - { 0x8086, E1000_DEV_ID_I210_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, - { 0x8086, E1000_DEV_ID_I210_SGMII, PCI_ANY_ID, PCI_ANY_ID, 0}, - { 0x8086, E1000_DEV_ID_I211_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, - { 0x8086, E1000_DEV_ID_I354_BACKPLANE_1GBPS, - PCI_ANY_ID, PCI_ANY_ID, 0}, - { 0x8086, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS, - PCI_ANY_ID, PCI_ANY_ID, 0}, - { 0x8086, E1000_DEV_ID_I354_SGMII, PCI_ANY_ID, PCI_ANY_ID, 0}, + {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_82575EB_COPPER, 0, 0, 0}, + {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_82575EB_FIBER_SERDES, 0, 0, 0}, + {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_82575GB_QUAD_COPPER, 0, 0, 0}, + {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_82576, 0, 0, 0}, + {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_82576_NS, 0, 0, 0}, + {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_82576_NS_SERDES, 0, 0, 0}, + {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_82576_FIBER, 0, 0, 0}, + {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_82576_SERDES, 0, 0, 0}, + {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_82576_SERDES_QUAD, 0, 0, 0}, + {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_82576_QUAD_COPPER, 0, 0, 0}, + {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_82576_QUAD_COPPER_ET2, 0, 0, 0}, + {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_82576_VF, 0, 0, 0}, + {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_82580_COPPER, 0, 0, 0}, + {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_82580_FIBER, 0, 0, 0}, + {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_82580_SERDES, 0, 0, 0}, + {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_82580_SGMII, 0, 0, 0}, + {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_82580_COPPER_DUAL, 0, 0, 0}, + {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_82580_QUAD_FIBER, 0, 0, 0}, + {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SERDES, 0, 0, 0}, + {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SGMII, 0, 0, 0}, + {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SFP, 0, 0, 0}, + {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_BACKPLANE, 0, 0, 0}, + {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_I350_COPPER, 0, 0, 0}, + {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_I350_FIBER, 0, 0, 0}, + {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_I350_SERDES, 0, 0, 0}, + {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_I350_SGMII, 0, 0, 0}, + {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_I350_VF, 0, 0, 0}, + {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER, 0, 0, 0}, + {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_IT, 0, 0, 0}, + {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_OEM1, 0, 0, 0}, + {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_FLASHLESS, 0, 0, 0}, + {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SERDES_FLASHLESS, 0, 0, 0}, + {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_I210_FIBER, 0, 0, 0}, + {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SERDES, 0, 0, 0}, + {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SGMII, 0, 0, 0}, + {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_I211_COPPER, 0, 0, 0}, + {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_I354_BACKPLANE_1GBPS, 0, 0, 0}, + {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS, 0, 0, 0}, + {IGB_INTEL_VENDOR_ID, E1000_DEV_ID_I354_SGMII, 0, 0, 0}, /* required last entry */ - { 0, 0, 0, 0, 0} + {0, 0, 0, 0, 0} }; /********************************************************************* * Table of branding strings for all supported NICs. *********************************************************************/ static char *igb_strings[] = { "Intel(R) PRO/1000 Network Connection" }; /********************************************************************* * Function prototypes *********************************************************************/ static int igb_probe(device_t); static int igb_attach(device_t); static int igb_detach(device_t); static int igb_shutdown(device_t); static int igb_suspend(device_t); static int igb_resume(device_t); #ifndef IGB_LEGACY_TX static int igb_mq_start(struct ifnet *, struct mbuf *); static int igb_mq_start_locked(struct ifnet *, struct tx_ring *); static void igb_qflush(struct ifnet *); static void igb_deferred_mq_start(void *, int); #else static void igb_start(struct ifnet *); static void igb_start_locked(struct tx_ring *, struct ifnet *ifp); #endif static int igb_ioctl(struct ifnet *, u_long, caddr_t); static void igb_init(void *); static void igb_init_locked(struct adapter *); static void igb_stop(void *); static void igb_media_status(struct ifnet *, struct ifmediareq *); static int igb_media_change(struct ifnet *); static void igb_identify_hardware(struct adapter *); static int igb_allocate_pci_resources(struct adapter *); static int igb_allocate_msix(struct adapter *); static int igb_allocate_legacy(struct adapter *); static int igb_setup_msix(struct adapter *); static void igb_free_pci_resources(struct adapter *); static void igb_local_timer(void *); static void igb_reset(struct adapter *); static int igb_setup_interface(device_t, struct adapter *); static int igb_allocate_queues(struct adapter *); static void igb_configure_queues(struct adapter *); static int igb_allocate_transmit_buffers(struct tx_ring *); static void igb_setup_transmit_structures(struct adapter *); static void igb_setup_transmit_ring(struct tx_ring *); static void igb_initialize_transmit_units(struct adapter *); static void igb_free_transmit_structures(struct adapter *); static void igb_free_transmit_buffers(struct tx_ring *); static int igb_allocate_receive_buffers(struct rx_ring *); static int igb_setup_receive_structures(struct adapter *); static int igb_setup_receive_ring(struct rx_ring *); static void igb_initialize_receive_units(struct adapter *); static void igb_free_receive_structures(struct adapter *); static void igb_free_receive_buffers(struct rx_ring *); static void igb_free_receive_ring(struct rx_ring *); static void igb_enable_intr(struct adapter *); static void igb_disable_intr(struct adapter *); static void igb_update_stats_counters(struct adapter *); static bool igb_txeof(struct tx_ring *); static __inline void igb_rx_discard(struct rx_ring *, int); static __inline void igb_rx_input(struct rx_ring *, struct ifnet *, struct mbuf *, u32); static bool igb_rxeof(struct igb_queue *, int, int *); static void igb_rx_checksum(u32, struct mbuf *, u32); static int igb_tx_ctx_setup(struct tx_ring *, struct mbuf *, u32 *, u32 *); static int igb_tso_setup(struct tx_ring *, struct mbuf *, u32 *, u32 *); static void igb_set_promisc(struct adapter *); static void igb_disable_promisc(struct adapter *); static void igb_set_multi(struct adapter *); static void igb_update_link_status(struct adapter *); static void igb_refresh_mbufs(struct rx_ring *, int); static void igb_register_vlan(void *, struct ifnet *, u16); static void igb_unregister_vlan(void *, struct ifnet *, u16); static void igb_setup_vlan_hw_support(struct adapter *); static int igb_xmit(struct tx_ring *, struct mbuf **); static int igb_dma_malloc(struct adapter *, bus_size_t, struct igb_dma_alloc *, int); static void igb_dma_free(struct adapter *, struct igb_dma_alloc *); static int igb_sysctl_nvm_info(SYSCTL_HANDLER_ARGS); static void igb_print_nvm_info(struct adapter *); static int igb_is_valid_ether_addr(u8 *); static void igb_add_hw_stats(struct adapter *); static void igb_vf_init_stats(struct adapter *); static void igb_update_vf_stats_counters(struct adapter *); /* Management and WOL Support */ static void igb_init_manageability(struct adapter *); static void igb_release_manageability(struct adapter *); static void igb_get_hw_control(struct adapter *); static void igb_release_hw_control(struct adapter *); static void igb_enable_wakeup(device_t); static void igb_led_func(void *, int); static int igb_irq_fast(void *); static void igb_msix_que(void *); static void igb_msix_link(void *); static void igb_handle_que(void *context, int pending); static void igb_handle_link(void *context, int pending); static void igb_handle_link_locked(struct adapter *); static void igb_set_sysctl_value(struct adapter *, const char *, const char *, int *, int); static int igb_set_flowcntl(SYSCTL_HANDLER_ARGS); static int igb_sysctl_dmac(SYSCTL_HANDLER_ARGS); static int igb_sysctl_eee(SYSCTL_HANDLER_ARGS); #ifdef DEVICE_POLLING static poll_handler_t igb_poll; #endif /* POLLING */ /********************************************************************* * FreeBSD Device Interface Entry Points *********************************************************************/ static device_method_t igb_methods[] = { /* Device interface */ DEVMETHOD(device_probe, igb_probe), DEVMETHOD(device_attach, igb_attach), DEVMETHOD(device_detach, igb_detach), DEVMETHOD(device_shutdown, igb_shutdown), DEVMETHOD(device_suspend, igb_suspend), DEVMETHOD(device_resume, igb_resume), DEVMETHOD_END }; static driver_t igb_driver = { "igb", igb_methods, sizeof(struct adapter), }; static devclass_t igb_devclass; DRIVER_MODULE(igb, pci, igb_driver, igb_devclass, 0, 0); MODULE_DEPEND(igb, pci, 1, 1, 1); MODULE_DEPEND(igb, ether, 1, 1, 1); /********************************************************************* * Tunable default values. *********************************************************************/ static SYSCTL_NODE(_hw, OID_AUTO, igb, CTLFLAG_RD, 0, "IGB driver parameters"); /* Descriptor defaults */ static int igb_rxd = IGB_DEFAULT_RXD; static int igb_txd = IGB_DEFAULT_TXD; TUNABLE_INT("hw.igb.rxd", &igb_rxd); TUNABLE_INT("hw.igb.txd", &igb_txd); SYSCTL_INT(_hw_igb, OID_AUTO, rxd, CTLFLAG_RDTUN, &igb_rxd, 0, "Number of receive descriptors per queue"); SYSCTL_INT(_hw_igb, OID_AUTO, txd, CTLFLAG_RDTUN, &igb_txd, 0, "Number of transmit descriptors per queue"); /* ** AIM: Adaptive Interrupt Moderation ** which means that the interrupt rate ** is varied over time based on the ** traffic for that interrupt vector */ static int igb_enable_aim = TRUE; TUNABLE_INT("hw.igb.enable_aim", &igb_enable_aim); SYSCTL_INT(_hw_igb, OID_AUTO, enable_aim, CTLFLAG_RW, &igb_enable_aim, 0, "Enable adaptive interrupt moderation"); /* * MSIX should be the default for best performance, * but this allows it to be forced off for testing. */ static int igb_enable_msix = 1; TUNABLE_INT("hw.igb.enable_msix", &igb_enable_msix); SYSCTL_INT(_hw_igb, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &igb_enable_msix, 0, "Enable MSI-X interrupts"); /* ** Tuneable Interrupt rate */ static int igb_max_interrupt_rate = 8000; TUNABLE_INT("hw.igb.max_interrupt_rate", &igb_max_interrupt_rate); SYSCTL_INT(_hw_igb, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN, &igb_max_interrupt_rate, 0, "Maximum interrupts per second"); #ifndef IGB_LEGACY_TX /* ** Tuneable number of buffers in the buf-ring (drbr_xxx) */ static int igb_buf_ring_size = IGB_BR_SIZE; TUNABLE_INT("hw.igb.buf_ring_size", &igb_buf_ring_size); SYSCTL_INT(_hw_igb, OID_AUTO, buf_ring_size, CTLFLAG_RDTUN, &igb_buf_ring_size, 0, "Size of the bufring"); #endif /* ** Header split causes the packet header to ** be dma'd to a seperate mbuf from the payload. ** this can have memory alignment benefits. But ** another plus is that small packets often fit ** into the header and thus use no cluster. Its ** a very workload dependent type feature. */ static int igb_header_split = FALSE; TUNABLE_INT("hw.igb.hdr_split", &igb_header_split); SYSCTL_INT(_hw_igb, OID_AUTO, header_split, CTLFLAG_RDTUN, &igb_header_split, 0, "Enable receive mbuf header split"); /* ** This will autoconfigure based on the ** number of CPUs and max supported ** MSIX messages if left at 0. */ static int igb_num_queues = 0; TUNABLE_INT("hw.igb.num_queues", &igb_num_queues); SYSCTL_INT(_hw_igb, OID_AUTO, num_queues, CTLFLAG_RDTUN, &igb_num_queues, 0, "Number of queues to configure, 0 indicates autoconfigure"); /* ** Global variable to store last used CPU when binding queues ** to CPUs in igb_allocate_msix. Starts at CPU_FIRST and increments when a ** queue is bound to a cpu. */ static int igb_last_bind_cpu = -1; /* How many packets rxeof tries to clean at a time */ static int igb_rx_process_limit = 100; TUNABLE_INT("hw.igb.rx_process_limit", &igb_rx_process_limit); SYSCTL_INT(_hw_igb, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN, &igb_rx_process_limit, 0, "Maximum number of received packets to process at a time, -1 means unlimited"); /* How many packets txeof tries to clean at a time */ static int igb_tx_process_limit = -1; TUNABLE_INT("hw.igb.tx_process_limit", &igb_tx_process_limit); SYSCTL_INT(_hw_igb, OID_AUTO, tx_process_limit, CTLFLAG_RDTUN, &igb_tx_process_limit, 0, "Maximum number of sent packets to process at a time, -1 means unlimited"); #ifdef DEV_NETMAP /* see ixgbe.c for details */ #include #endif /* DEV_NETMAP */ /********************************************************************* * Device identification routine * * igb_probe determines if the driver should be loaded on * adapter based on PCI vendor/device id of the adapter. * * return BUS_PROBE_DEFAULT on success, positive on failure *********************************************************************/ static int igb_probe(device_t dev) { - char adapter_name[60]; + char adapter_name[256]; uint16_t pci_vendor_id = 0; uint16_t pci_device_id = 0; uint16_t pci_subvendor_id = 0; uint16_t pci_subdevice_id = 0; igb_vendor_info_t *ent; INIT_DEBUGOUT("igb_probe: begin"); pci_vendor_id = pci_get_vendor(dev); - if (pci_vendor_id != IGB_VENDOR_ID) + if (pci_vendor_id != IGB_INTEL_VENDOR_ID) return (ENXIO); pci_device_id = pci_get_device(dev); pci_subvendor_id = pci_get_subvendor(dev); pci_subdevice_id = pci_get_subdevice(dev); ent = igb_vendor_info_array; while (ent->vendor_id != 0) { if ((pci_vendor_id == ent->vendor_id) && (pci_device_id == ent->device_id) && ((pci_subvendor_id == ent->subvendor_id) || - (ent->subvendor_id == PCI_ANY_ID)) && + (ent->subvendor_id == 0)) && ((pci_subdevice_id == ent->subdevice_id) || - (ent->subdevice_id == PCI_ANY_ID))) { - sprintf(adapter_name, "%s %s", + (ent->subdevice_id == 0))) { + sprintf(adapter_name, "%s, Version - %s", igb_strings[ent->index], igb_driver_version); device_set_desc_copy(dev, adapter_name); return (BUS_PROBE_DEFAULT); } ent++; } - return (ENXIO); } /********************************************************************* * Device initialization routine * * The attach entry point is called when the driver is being loaded. * This routine identifies the type of hardware, allocates all resources * and initializes the hardware. * * return 0 on success, positive on failure *********************************************************************/ static int igb_attach(device_t dev) { struct adapter *adapter; int error = 0; u16 eeprom_data; INIT_DEBUGOUT("igb_attach: begin"); if (resource_disabled("igb", device_get_unit(dev))) { device_printf(dev, "Disabled by device hint\n"); return (ENXIO); } adapter = device_get_softc(dev); adapter->dev = adapter->osdep.dev = dev; IGB_CORE_LOCK_INIT(adapter, device_get_nameunit(dev)); - /* SYSCTL stuff */ + /* SYSCTLs */ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, igb_sysctl_nvm_info, "I", "NVM Information"); igb_set_sysctl_value(adapter, "enable_aim", "Interrupt Moderation", &adapter->enable_aim, igb_enable_aim); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "fc", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, igb_set_flowcntl, "I", "Flow Control"); callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0); /* Determine hardware and mac info */ igb_identify_hardware(adapter); /* Setup PCI resources */ if (igb_allocate_pci_resources(adapter)) { device_printf(dev, "Allocation of PCI resources failed\n"); error = ENXIO; goto err_pci; } /* Do Shared Code initialization */ if (e1000_setup_init_funcs(&adapter->hw, TRUE)) { device_printf(dev, "Setup of Shared code failed\n"); error = ENXIO; goto err_pci; } e1000_get_bus_info(&adapter->hw); /* Sysctls for limiting the amount of work done in the taskqueues */ igb_set_sysctl_value(adapter, "rx_processing_limit", "max number of rx packets to process", &adapter->rx_process_limit, igb_rx_process_limit); igb_set_sysctl_value(adapter, "tx_processing_limit", "max number of tx packets to process", &adapter->tx_process_limit, igb_tx_process_limit); /* * Validate number of transmit and receive descriptors. It * must not exceed hardware maximum, and must be multiple * of E1000_DBA_ALIGN. */ if (((igb_txd * sizeof(struct e1000_tx_desc)) % IGB_DBA_ALIGN) != 0 || (igb_txd > IGB_MAX_TXD) || (igb_txd < IGB_MIN_TXD)) { device_printf(dev, "Using %d TX descriptors instead of %d!\n", IGB_DEFAULT_TXD, igb_txd); adapter->num_tx_desc = IGB_DEFAULT_TXD; } else adapter->num_tx_desc = igb_txd; if (((igb_rxd * sizeof(struct e1000_rx_desc)) % IGB_DBA_ALIGN) != 0 || (igb_rxd > IGB_MAX_RXD) || (igb_rxd < IGB_MIN_RXD)) { device_printf(dev, "Using %d RX descriptors instead of %d!\n", IGB_DEFAULT_RXD, igb_rxd); adapter->num_rx_desc = IGB_DEFAULT_RXD; } else adapter->num_rx_desc = igb_rxd; adapter->hw.mac.autoneg = DO_AUTO_NEG; adapter->hw.phy.autoneg_wait_to_complete = FALSE; adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; /* Copper options */ if (adapter->hw.phy.media_type == e1000_media_type_copper) { adapter->hw.phy.mdix = AUTO_ALL_MODES; adapter->hw.phy.disable_polarity_correction = FALSE; adapter->hw.phy.ms_type = IGB_MASTER_SLAVE; } /* * Set the frame limits assuming * standard ethernet sized frames. */ adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE; /* ** Allocate and Setup Queues */ if (igb_allocate_queues(adapter)) { error = ENOMEM; goto err_pci; } /* Allocate the appropriate stats memory */ if (adapter->vf_ifp) { adapter->stats = (struct e1000_vf_stats *)malloc(sizeof \ (struct e1000_vf_stats), M_DEVBUF, M_NOWAIT | M_ZERO); igb_vf_init_stats(adapter); } else adapter->stats = (struct e1000_hw_stats *)malloc(sizeof \ (struct e1000_hw_stats), M_DEVBUF, M_NOWAIT | M_ZERO); if (adapter->stats == NULL) { device_printf(dev, "Can not allocate stats memory\n"); error = ENOMEM; goto err_late; } /* Allocate multicast array memory. */ adapter->mta = malloc(sizeof(u8) * ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT); if (adapter->mta == NULL) { device_printf(dev, "Can not allocate multicast setup array\n"); error = ENOMEM; goto err_late; } /* Some adapter-specific advanced features */ if (adapter->hw.mac.type >= e1000_i350) { SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "dmac", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, igb_sysctl_dmac, "I", "DMA Coalesce"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "eee_disabled", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, igb_sysctl_eee, "I", "Disable Energy Efficient Ethernet"); if (adapter->hw.phy.media_type == e1000_media_type_copper) { if (adapter->hw.mac.type == e1000_i354) - e1000_set_eee_i354(&adapter->hw); + e1000_set_eee_i354(&adapter->hw, TRUE, TRUE); else - e1000_set_eee_i350(&adapter->hw); + e1000_set_eee_i350(&adapter->hw, TRUE, TRUE); } } /* ** Start from a known state, this is ** important in reading the nvm and ** mac from that. */ e1000_reset_hw(&adapter->hw); /* Make sure we have a good EEPROM before we read from it */ if (((adapter->hw.mac.type != e1000_i210) && (adapter->hw.mac.type != e1000_i211)) && (e1000_validate_nvm_checksum(&adapter->hw) < 0)) { /* ** Some PCI-E parts fail the first check due to ** the link being in sleep state, call it again, ** if it fails a second time its a real issue. */ if (e1000_validate_nvm_checksum(&adapter->hw) < 0) { device_printf(dev, "The EEPROM Checksum Is Not Valid\n"); error = EIO; goto err_late; } } /* ** Copy the permanent MAC address out of the EEPROM */ if (e1000_read_mac_addr(&adapter->hw) < 0) { device_printf(dev, "EEPROM read error while reading MAC" " address\n"); error = EIO; goto err_late; } /* Check its sanity */ if (!igb_is_valid_ether_addr(adapter->hw.mac.addr)) { device_printf(dev, "Invalid MAC address\n"); error = EIO; goto err_late; } /* Setup OS specific network interface */ if (igb_setup_interface(dev, adapter) != 0) goto err_late; /* Now get a good starting state */ igb_reset(adapter); /* Initialize statistics */ igb_update_stats_counters(adapter); adapter->hw.mac.get_link_status = 1; igb_update_link_status(adapter); /* Indicate SOL/IDER usage */ if (e1000_check_reset_block(&adapter->hw)) device_printf(dev, "PHY reset is blocked due to SOL/IDER session.\n"); /* Determine if we have to control management hardware */ adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw); /* * Setup Wake-on-Lan */ /* APME bit in EEPROM is mapped to WUC.APME */ eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC) & E1000_WUC_APME; if (eeprom_data) adapter->wol = E1000_WUFC_MAG; /* Register for VLAN events */ adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, igb_register_vlan, adapter, EVENTHANDLER_PRI_FIRST); adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, igb_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); igb_add_hw_stats(adapter); /* Tell the stack that the interface is not active */ adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING; adapter->ifp->if_drv_flags |= IFF_DRV_OACTIVE; adapter->led_dev = led_create(igb_led_func, adapter, device_get_nameunit(dev)); /* ** Configure Interrupts */ if ((adapter->msix > 1) && (igb_enable_msix)) error = igb_allocate_msix(adapter); else /* MSI or Legacy */ error = igb_allocate_legacy(adapter); if (error) goto err_late; #ifdef DEV_NETMAP igb_netmap_attach(adapter); #endif /* DEV_NETMAP */ INIT_DEBUGOUT("igb_attach: end"); return (0); err_late: igb_detach(dev); igb_free_transmit_structures(adapter); igb_free_receive_structures(adapter); igb_release_hw_control(adapter); err_pci: igb_free_pci_resources(adapter); if (adapter->ifp != NULL) if_free(adapter->ifp); free(adapter->mta, M_DEVBUF); IGB_CORE_LOCK_DESTROY(adapter); return (error); } /********************************************************************* * Device removal routine * * The detach entry point is called when the driver is being removed. * This routine stops the adapter and deallocates all the resources * that were allocated for driver operation. * * return 0 on success, positive on failure *********************************************************************/ static int igb_detach(device_t dev) { struct adapter *adapter = device_get_softc(dev); struct ifnet *ifp = adapter->ifp; INIT_DEBUGOUT("igb_detach: begin"); /* Make sure VLANS are not using driver */ if (adapter->ifp->if_vlantrunk != NULL) { device_printf(dev,"Vlan in use, detach first\n"); return (EBUSY); } ether_ifdetach(adapter->ifp); if (adapter->led_dev != NULL) led_destroy(adapter->led_dev); #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(ifp); #endif IGB_CORE_LOCK(adapter); adapter->in_detach = 1; igb_stop(adapter); IGB_CORE_UNLOCK(adapter); e1000_phy_hw_reset(&adapter->hw); /* Give control back to firmware */ igb_release_manageability(adapter); igb_release_hw_control(adapter); if (adapter->wol) { E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN); E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol); igb_enable_wakeup(dev); } /* Unregister VLAN events */ if (adapter->vlan_attach != NULL) EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach); if (adapter->vlan_detach != NULL) EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach); callout_drain(&adapter->timer); #ifdef DEV_NETMAP netmap_detach(adapter->ifp); #endif /* DEV_NETMAP */ igb_free_pci_resources(adapter); bus_generic_detach(dev); if_free(ifp); igb_free_transmit_structures(adapter); igb_free_receive_structures(adapter); if (adapter->mta != NULL) free(adapter->mta, M_DEVBUF); IGB_CORE_LOCK_DESTROY(adapter); return (0); } /********************************************************************* * * Shutdown entry point * **********************************************************************/ static int igb_shutdown(device_t dev) { return igb_suspend(dev); } /* * Suspend/resume device methods. */ static int igb_suspend(device_t dev) { struct adapter *adapter = device_get_softc(dev); IGB_CORE_LOCK(adapter); igb_stop(adapter); igb_release_manageability(adapter); igb_release_hw_control(adapter); if (adapter->wol) { E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN); E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol); igb_enable_wakeup(dev); } IGB_CORE_UNLOCK(adapter); return bus_generic_suspend(dev); } static int igb_resume(device_t dev) { struct adapter *adapter = device_get_softc(dev); struct tx_ring *txr = adapter->tx_rings; struct ifnet *ifp = adapter->ifp; IGB_CORE_LOCK(adapter); igb_init_locked(adapter); igb_init_manageability(adapter); if ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING) && adapter->link_active) { for (int i = 0; i < adapter->num_queues; i++, txr++) { IGB_TX_LOCK(txr); #ifndef IGB_LEGACY_TX /* Process the stack queue only if not depleted */ if (((txr->queue_status & IGB_QUEUE_DEPLETED) == 0) && !drbr_empty(ifp, txr->br)) igb_mq_start_locked(ifp, txr); #else if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) igb_start_locked(txr, ifp); #endif IGB_TX_UNLOCK(txr); } } IGB_CORE_UNLOCK(adapter); return bus_generic_resume(dev); } #ifdef IGB_LEGACY_TX /********************************************************************* * Transmit entry point * * igb_start is called by the stack to initiate a transmit. * The driver will remain in this routine as long as there are * packets to transmit and transmit resources are available. * In case resources are not available stack is notified and * the packet is requeued. **********************************************************************/ static void igb_start_locked(struct tx_ring *txr, struct ifnet *ifp) { struct adapter *adapter = ifp->if_softc; struct mbuf *m_head; IGB_TX_LOCK_ASSERT(txr); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return; if (!adapter->link_active) return; /* Call cleanup if number of TX descriptors low */ if (txr->tx_avail <= IGB_TX_CLEANUP_THRESHOLD) igb_txeof(txr); while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { if (txr->tx_avail <= IGB_MAX_SCATTER) { txr->queue_status |= IGB_QUEUE_DEPLETED; break; } IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* * Encapsulation can modify our pointer, and or make it * NULL on failure. In that event, we can't requeue. */ if (igb_xmit(txr, &m_head)) { if (m_head != NULL) IFQ_DRV_PREPEND(&ifp->if_snd, m_head); if (txr->tx_avail <= IGB_MAX_SCATTER) txr->queue_status |= IGB_QUEUE_DEPLETED; break; } /* Send a copy of the frame to the BPF listener */ ETHER_BPF_MTAP(ifp, m_head); /* Set watchdog on */ txr->watchdog_time = ticks; txr->queue_status |= IGB_QUEUE_WORKING; } } /* * Legacy TX driver routine, called from the * stack, always uses tx[0], and spins for it. * Should not be used with multiqueue tx */ static void igb_start(struct ifnet *ifp) { struct adapter *adapter = ifp->if_softc; struct tx_ring *txr = adapter->tx_rings; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { IGB_TX_LOCK(txr); igb_start_locked(txr, ifp); IGB_TX_UNLOCK(txr); } return; } #else /* ~IGB_LEGACY_TX */ /* ** Multiqueue Transmit Entry: ** quick turnaround to the stack ** */ static int igb_mq_start(struct ifnet *ifp, struct mbuf *m) { struct adapter *adapter = ifp->if_softc; struct igb_queue *que; struct tx_ring *txr; int i, err = 0; /* Which queue to use */ if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) i = m->m_pkthdr.flowid % adapter->num_queues; else i = curcpu % adapter->num_queues; txr = &adapter->tx_rings[i]; que = &adapter->queues[i]; err = drbr_enqueue(ifp, txr->br, m); if (err) return (err); if (IGB_TX_TRYLOCK(txr)) { igb_mq_start_locked(ifp, txr); IGB_TX_UNLOCK(txr); } else taskqueue_enqueue(que->tq, &txr->txq_task); return (0); } static int igb_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr) { struct adapter *adapter = txr->adapter; struct mbuf *next; int err = 0, enq = 0; IGB_TX_LOCK_ASSERT(txr); if (((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) || adapter->link_active == 0) return (ENETDOWN); - /* Process the queue */ while ((next = drbr_peek(ifp, txr->br)) != NULL) { if ((err = igb_xmit(txr, &next)) != 0) { if (next == NULL) { /* It was freed, move forward */ drbr_advance(ifp, txr->br); } else { /* * Still have one left, it may not be * the same since the transmit function * may have changed it. */ drbr_putback(ifp, txr->br, next); } break; } drbr_advance(ifp, txr->br); enq++; ifp->if_obytes += next->m_pkthdr.len; if (next->m_flags & M_MCAST) ifp->if_omcasts++; ETHER_BPF_MTAP(ifp, next); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) break; } if (enq > 0) { /* Set the watchdog */ txr->queue_status |= IGB_QUEUE_WORKING; txr->watchdog_time = ticks; } if (txr->tx_avail <= IGB_TX_CLEANUP_THRESHOLD) igb_txeof(txr); if (txr->tx_avail <= IGB_MAX_SCATTER) txr->queue_status |= IGB_QUEUE_DEPLETED; return (err); } /* * Called from a taskqueue to drain queued transmit packets. */ static void igb_deferred_mq_start(void *arg, int pending) { struct tx_ring *txr = arg; struct adapter *adapter = txr->adapter; struct ifnet *ifp = adapter->ifp; IGB_TX_LOCK(txr); if (!drbr_empty(ifp, txr->br)) igb_mq_start_locked(ifp, txr); IGB_TX_UNLOCK(txr); } /* ** Flush all ring buffers */ static void igb_qflush(struct ifnet *ifp) { struct adapter *adapter = ifp->if_softc; struct tx_ring *txr = adapter->tx_rings; struct mbuf *m; for (int i = 0; i < adapter->num_queues; i++, txr++) { IGB_TX_LOCK(txr); while ((m = buf_ring_dequeue_sc(txr->br)) != NULL) m_freem(m); IGB_TX_UNLOCK(txr); } if_qflush(ifp); } #endif /* ~IGB_LEGACY_TX */ /********************************************************************* * Ioctl entry point * * igb_ioctl is called when the user wants to configure the * interface. * * return 0 on success, positive on failure **********************************************************************/ static int igb_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct adapter *adapter = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; #if defined(INET) || defined(INET6) struct ifaddr *ifa = (struct ifaddr *)data; #endif bool avoid_reset = FALSE; int error = 0; if (adapter->in_detach) return (error); switch (command) { case SIOCSIFADDR: #ifdef INET if (ifa->ifa_addr->sa_family == AF_INET) avoid_reset = TRUE; #endif #ifdef INET6 if (ifa->ifa_addr->sa_family == AF_INET6) avoid_reset = TRUE; #endif /* ** Calling init results in link renegotiation, ** so we avoid doing it when possible. */ if (avoid_reset) { ifp->if_flags |= IFF_UP; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) igb_init(adapter); #ifdef INET if (!(ifp->if_flags & IFF_NOARP)) arp_ifinit(ifp, ifa); #endif } else error = ether_ioctl(ifp, command, data); break; case SIOCSIFMTU: { int max_frame_size; IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)"); IGB_CORE_LOCK(adapter); max_frame_size = 9234; if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - ETHER_CRC_LEN) { IGB_CORE_UNLOCK(adapter); error = EINVAL; break; } ifp->if_mtu = ifr->ifr_mtu; adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; igb_init_locked(adapter); IGB_CORE_UNLOCK(adapter); break; } case SIOCSIFFLAGS: IOCTL_DEBUGOUT("ioctl rcv'd:\ SIOCSIFFLAGS (Set Interface Flags)"); IGB_CORE_LOCK(adapter); if (ifp->if_flags & IFF_UP) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { if ((ifp->if_flags ^ adapter->if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) { igb_disable_promisc(adapter); igb_set_promisc(adapter); } } else igb_init_locked(adapter); } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) igb_stop(adapter); adapter->if_flags = ifp->if_flags; IGB_CORE_UNLOCK(adapter); break; case SIOCADDMULTI: case SIOCDELMULTI: IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI"); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { IGB_CORE_LOCK(adapter); igb_disable_intr(adapter); igb_set_multi(adapter); #ifdef DEVICE_POLLING if (!(ifp->if_capenable & IFCAP_POLLING)) #endif igb_enable_intr(adapter); IGB_CORE_UNLOCK(adapter); } break; case SIOCSIFMEDIA: /* Check SOL/IDER usage */ IGB_CORE_LOCK(adapter); if (e1000_check_reset_block(&adapter->hw)) { IGB_CORE_UNLOCK(adapter); device_printf(adapter->dev, "Media change is" " blocked due to SOL/IDER session.\n"); break; } IGB_CORE_UNLOCK(adapter); case SIOCGIFMEDIA: IOCTL_DEBUGOUT("ioctl rcv'd: \ SIOCxIFMEDIA (Get/Set Interface Media)"); error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); break; case SIOCSIFCAP: { int mask, reinit; IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)"); reinit = 0; mask = ifr->ifr_reqcap ^ ifp->if_capenable; #ifdef DEVICE_POLLING if (mask & IFCAP_POLLING) { if (ifr->ifr_reqcap & IFCAP_POLLING) { error = ether_poll_register(igb_poll, ifp); if (error) return (error); IGB_CORE_LOCK(adapter); igb_disable_intr(adapter); ifp->if_capenable |= IFCAP_POLLING; IGB_CORE_UNLOCK(adapter); } else { error = ether_poll_deregister(ifp); /* Enable interrupt even in error case */ IGB_CORE_LOCK(adapter); igb_enable_intr(adapter); ifp->if_capenable &= ~IFCAP_POLLING; IGB_CORE_UNLOCK(adapter); } } #endif if (mask & IFCAP_HWCSUM) { ifp->if_capenable ^= IFCAP_HWCSUM; reinit = 1; } if (mask & IFCAP_TSO4) { ifp->if_capenable ^= IFCAP_TSO4; reinit = 1; } if (mask & IFCAP_TSO6) { ifp->if_capenable ^= IFCAP_TSO6; reinit = 1; } if (mask & IFCAP_VLAN_HWTAGGING) { ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; reinit = 1; } if (mask & IFCAP_VLAN_HWFILTER) { ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; reinit = 1; } if (mask & IFCAP_VLAN_HWTSO) { ifp->if_capenable ^= IFCAP_VLAN_HWTSO; reinit = 1; } if (mask & IFCAP_LRO) { ifp->if_capenable ^= IFCAP_LRO; reinit = 1; } if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) igb_init(adapter); VLAN_CAPABILITIES(ifp); break; } default: error = ether_ioctl(ifp, command, data); break; } return (error); } /********************************************************************* * Init entry point * * This routine is used in two ways. It is used by the stack as * init entry point in network interface structure. It is also used * by the driver as a hw/sw initialization routine to get to a * consistent state. * * return 0 on success, positive on failure **********************************************************************/ static void igb_init_locked(struct adapter *adapter) { struct ifnet *ifp = adapter->ifp; device_t dev = adapter->dev; INIT_DEBUGOUT("igb_init: begin"); IGB_CORE_LOCK_ASSERT(adapter); igb_disable_intr(adapter); callout_stop(&adapter->timer); /* Get the latest mac address, User can use a LAA */ bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr, ETHER_ADDR_LEN); /* Put the address into the Receive Address Array */ e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); igb_reset(adapter); igb_update_link_status(adapter); E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN); /* Set hardware offload abilities */ ifp->if_hwassist = 0; if (ifp->if_capenable & IFCAP_TXCSUM) { ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); #if __FreeBSD_version >= 800000 if ((adapter->hw.mac.type == e1000_82576) || (adapter->hw.mac.type == e1000_82580)) ifp->if_hwassist |= CSUM_SCTP; #endif } if (ifp->if_capenable & IFCAP_TSO) ifp->if_hwassist |= CSUM_TSO; + /* Clear bad data from Rx FIFOs */ + e1000_rx_fifo_flush_82575(&adapter->hw); + /* Configure for OS presence */ igb_init_manageability(adapter); /* Prepare transmit descriptors and buffers */ igb_setup_transmit_structures(adapter); igb_initialize_transmit_units(adapter); /* Setup Multicast table */ igb_set_multi(adapter); /* ** Figure out the desired mbuf pool ** for doing jumbo/packetsplit */ if (adapter->max_frame_size <= 2048) adapter->rx_mbuf_sz = MCLBYTES; else if (adapter->max_frame_size <= 4096) adapter->rx_mbuf_sz = MJUMPAGESIZE; else adapter->rx_mbuf_sz = MJUM9BYTES; /* Prepare receive descriptors and buffers */ if (igb_setup_receive_structures(adapter)) { device_printf(dev, "Could not setup receive structures\n"); return; } igb_initialize_receive_units(adapter); /* Enable VLAN support */ if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) igb_setup_vlan_hw_support(adapter); /* Don't lose promiscuous settings */ igb_set_promisc(adapter); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; callout_reset(&adapter->timer, hz, igb_local_timer, adapter); e1000_clear_hw_cntrs_base_generic(&adapter->hw); if (adapter->msix > 1) /* Set up queue routing */ igb_configure_queues(adapter); /* this clears any pending interrupts */ E1000_READ_REG(&adapter->hw, E1000_ICR); #ifdef DEVICE_POLLING /* * Only enable interrupts if we are not polling, make sure * they are off otherwise. */ if (ifp->if_capenable & IFCAP_POLLING) igb_disable_intr(adapter); else #endif /* DEVICE_POLLING */ { igb_enable_intr(adapter); E1000_WRITE_REG(&adapter->hw, E1000_ICS, E1000_ICS_LSC); } /* Set Energy Efficient Ethernet */ if (adapter->hw.phy.media_type == e1000_media_type_copper) { if (adapter->hw.mac.type == e1000_i354) - e1000_set_eee_i354(&adapter->hw); + e1000_set_eee_i354(&adapter->hw, TRUE, TRUE); else - e1000_set_eee_i350(&adapter->hw); + e1000_set_eee_i350(&adapter->hw, TRUE, TRUE); } } static void igb_init(void *arg) { struct adapter *adapter = arg; IGB_CORE_LOCK(adapter); igb_init_locked(adapter); IGB_CORE_UNLOCK(adapter); } static void igb_handle_que(void *context, int pending) { struct igb_queue *que = context; struct adapter *adapter = que->adapter; struct tx_ring *txr = que->txr; struct ifnet *ifp = adapter->ifp; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { bool more; more = igb_rxeof(que, adapter->rx_process_limit, NULL); IGB_TX_LOCK(txr); igb_txeof(txr); #ifndef IGB_LEGACY_TX /* Process the stack queue only if not depleted */ if (((txr->queue_status & IGB_QUEUE_DEPLETED) == 0) && !drbr_empty(ifp, txr->br)) igb_mq_start_locked(ifp, txr); #else if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) igb_start_locked(txr, ifp); #endif IGB_TX_UNLOCK(txr); /* Do we need another? */ if (more) { taskqueue_enqueue(que->tq, &que->que_task); return; } } #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) return; #endif /* Reenable this interrupt */ if (que->eims) E1000_WRITE_REG(&adapter->hw, E1000_EIMS, que->eims); else igb_enable_intr(adapter); } /* Deal with link in a sleepable context */ static void igb_handle_link(void *context, int pending) { struct adapter *adapter = context; IGB_CORE_LOCK(adapter); igb_handle_link_locked(adapter); IGB_CORE_UNLOCK(adapter); } static void igb_handle_link_locked(struct adapter *adapter) { struct tx_ring *txr = adapter->tx_rings; struct ifnet *ifp = adapter->ifp; IGB_CORE_LOCK_ASSERT(adapter); adapter->hw.mac.get_link_status = 1; igb_update_link_status(adapter); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && adapter->link_active) { for (int i = 0; i < adapter->num_queues; i++, txr++) { IGB_TX_LOCK(txr); #ifndef IGB_LEGACY_TX /* Process the stack queue only if not depleted */ if (((txr->queue_status & IGB_QUEUE_DEPLETED) == 0) && !drbr_empty(ifp, txr->br)) igb_mq_start_locked(ifp, txr); #else if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) igb_start_locked(txr, ifp); #endif IGB_TX_UNLOCK(txr); } } } /********************************************************************* * * MSI/Legacy Deferred * Interrupt Service routine * *********************************************************************/ static int igb_irq_fast(void *arg) { struct adapter *adapter = arg; struct igb_queue *que = adapter->queues; u32 reg_icr; reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); /* Hot eject? */ if (reg_icr == 0xffffffff) return FILTER_STRAY; /* Definitely not our interrupt. */ if (reg_icr == 0x0) return FILTER_STRAY; if ((reg_icr & E1000_ICR_INT_ASSERTED) == 0) return FILTER_STRAY; /* * Mask interrupts until the taskqueue is finished running. This is * cheap, just assume that it is needed. This also works around the * MSI message reordering errata on certain systems. */ igb_disable_intr(adapter); taskqueue_enqueue(que->tq, &que->que_task); /* Link status change */ if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) taskqueue_enqueue(que->tq, &adapter->link_task); if (reg_icr & E1000_ICR_RXO) adapter->rx_overruns++; return FILTER_HANDLED; } #ifdef DEVICE_POLLING #if __FreeBSD_version >= 800000 #define POLL_RETURN_COUNT(a) (a) static int #else #define POLL_RETURN_COUNT(a) static void #endif igb_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct adapter *adapter = ifp->if_softc; struct igb_queue *que; struct tx_ring *txr; u32 reg_icr, rx_done = 0; u32 loop = IGB_MAX_LOOP; bool more; IGB_CORE_LOCK(adapter); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { IGB_CORE_UNLOCK(adapter); return POLL_RETURN_COUNT(rx_done); } if (cmd == POLL_AND_CHECK_STATUS) { reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); /* Link status change */ if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) igb_handle_link_locked(adapter); if (reg_icr & E1000_ICR_RXO) adapter->rx_overruns++; } IGB_CORE_UNLOCK(adapter); for (int i = 0; i < adapter->num_queues; i++) { que = &adapter->queues[i]; txr = que->txr; igb_rxeof(que, count, &rx_done); IGB_TX_LOCK(txr); do { more = igb_txeof(txr); } while (loop-- && more); #ifndef IGB_LEGACY_TX if (!drbr_empty(ifp, txr->br)) igb_mq_start_locked(ifp, txr); #else if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) igb_start_locked(txr, ifp); #endif IGB_TX_UNLOCK(txr); } return POLL_RETURN_COUNT(rx_done); } #endif /* DEVICE_POLLING */ /********************************************************************* * * MSIX Que Interrupt Service routine * **********************************************************************/ static void igb_msix_que(void *arg) { struct igb_queue *que = arg; struct adapter *adapter = que->adapter; struct ifnet *ifp = adapter->ifp; struct tx_ring *txr = que->txr; struct rx_ring *rxr = que->rxr; u32 newitr = 0; bool more_rx; /* Ignore spurious interrupts */ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; E1000_WRITE_REG(&adapter->hw, E1000_EIMC, que->eims); ++que->irqs; IGB_TX_LOCK(txr); igb_txeof(txr); #ifndef IGB_LEGACY_TX /* Process the stack queue only if not depleted */ if (((txr->queue_status & IGB_QUEUE_DEPLETED) == 0) && !drbr_empty(ifp, txr->br)) igb_mq_start_locked(ifp, txr); #else if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) igb_start_locked(txr, ifp); #endif IGB_TX_UNLOCK(txr); more_rx = igb_rxeof(que, adapter->rx_process_limit, NULL); if (adapter->enable_aim == FALSE) goto no_calc; /* ** Do Adaptive Interrupt Moderation: ** - Write out last calculated setting ** - Calculate based on average size over ** the last interval. */ if (que->eitr_setting) E1000_WRITE_REG(&adapter->hw, E1000_EITR(que->msix), que->eitr_setting); que->eitr_setting = 0; /* Idle, do nothing */ if ((txr->bytes == 0) && (rxr->bytes == 0)) goto no_calc; /* Used half Default if sub-gig */ if (adapter->link_speed != 1000) newitr = IGB_DEFAULT_ITR / 2; else { if ((txr->bytes) && (txr->packets)) newitr = txr->bytes/txr->packets; if ((rxr->bytes) && (rxr->packets)) newitr = max(newitr, (rxr->bytes / rxr->packets)); newitr += 24; /* account for hardware frame, crc */ /* set an upper boundary */ newitr = min(newitr, 3000); /* Be nice to the mid range */ if ((newitr > 300) && (newitr < 1200)) newitr = (newitr / 3); else newitr = (newitr / 2); } newitr &= 0x7FFC; /* Mask invalid bits */ if (adapter->hw.mac.type == e1000_82575) newitr |= newitr << 16; else newitr |= E1000_EITR_CNT_IGNR; /* save for next interrupt */ que->eitr_setting = newitr; /* Reset state */ txr->bytes = 0; txr->packets = 0; rxr->bytes = 0; rxr->packets = 0; no_calc: /* Schedule a clean task if needed*/ if (more_rx) taskqueue_enqueue(que->tq, &que->que_task); else /* Reenable this interrupt */ E1000_WRITE_REG(&adapter->hw, E1000_EIMS, que->eims); return; } /********************************************************************* * * MSIX Link Interrupt Service routine * **********************************************************************/ static void igb_msix_link(void *arg) { struct adapter *adapter = arg; u32 icr; ++adapter->link_irq; icr = E1000_READ_REG(&adapter->hw, E1000_ICR); if (!(icr & E1000_ICR_LSC)) goto spurious; igb_handle_link(adapter, 0); spurious: /* Rearm */ E1000_WRITE_REG(&adapter->hw, E1000_IMS, E1000_IMS_LSC); E1000_WRITE_REG(&adapter->hw, E1000_EIMS, adapter->link_mask); return; } /********************************************************************* * * Media Ioctl callback * * This routine is called whenever the user queries the status of * the interface using ifconfig. * **********************************************************************/ static void igb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) { struct adapter *adapter = ifp->if_softc; INIT_DEBUGOUT("igb_media_status: begin"); IGB_CORE_LOCK(adapter); igb_update_link_status(adapter); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (!adapter->link_active) { IGB_CORE_UNLOCK(adapter); return; } ifmr->ifm_status |= IFM_ACTIVE; switch (adapter->link_speed) { case 10: ifmr->ifm_active |= IFM_10_T; break; case 100: /* ** Support for 100Mb SFP - these are Fiber ** but the media type appears as serdes */ if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) ifmr->ifm_active |= IFM_100_FX; else ifmr->ifm_active |= IFM_100_TX; break; case 1000: ifmr->ifm_active |= IFM_1000_T; break; case 2500: ifmr->ifm_active |= IFM_2500_SX; break; } if (adapter->link_duplex == FULL_DUPLEX) ifmr->ifm_active |= IFM_FDX; else ifmr->ifm_active |= IFM_HDX; IGB_CORE_UNLOCK(adapter); } /********************************************************************* * * Media Ioctl callback * * This routine is called when the user changes speed/duplex using * media/mediopt option with ifconfig. * **********************************************************************/ static int igb_media_change(struct ifnet *ifp) { struct adapter *adapter = ifp->if_softc; struct ifmedia *ifm = &adapter->media; INIT_DEBUGOUT("igb_media_change: begin"); if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); IGB_CORE_LOCK(adapter); switch (IFM_SUBTYPE(ifm->ifm_media)) { case IFM_AUTO: adapter->hw.mac.autoneg = DO_AUTO_NEG; adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; break; case IFM_1000_LX: case IFM_1000_SX: case IFM_1000_T: adapter->hw.mac.autoneg = DO_AUTO_NEG; adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; break; case IFM_100_TX: adapter->hw.mac.autoneg = FALSE; adapter->hw.phy.autoneg_advertised = 0; if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; else adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; break; case IFM_10_T: adapter->hw.mac.autoneg = FALSE; adapter->hw.phy.autoneg_advertised = 0; if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; else adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; break; default: device_printf(adapter->dev, "Unsupported media type\n"); } igb_init_locked(adapter); IGB_CORE_UNLOCK(adapter); return (0); } /********************************************************************* * * This routine maps the mbufs to Advanced TX descriptors. * **********************************************************************/ static int igb_xmit(struct tx_ring *txr, struct mbuf **m_headp) { struct adapter *adapter = txr->adapter; u32 olinfo_status = 0, cmd_type_len; int i, j, error, nsegs; int first; bool remap = TRUE; struct mbuf *m_head; bus_dma_segment_t segs[IGB_MAX_SCATTER]; bus_dmamap_t map; struct igb_tx_buf *txbuf; union e1000_adv_tx_desc *txd = NULL; m_head = *m_headp; /* Basic descriptor defines */ cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT); if (m_head->m_flags & M_VLANTAG) cmd_type_len |= E1000_ADVTXD_DCMD_VLE; /* * Important to capture the first descriptor * used because it will contain the index of * the one we tell the hardware to report back */ first = txr->next_avail_desc; txbuf = &txr->tx_buffers[first]; map = txbuf->map; /* * Map the packet for DMA. */ retry: error = bus_dmamap_load_mbuf_sg(txr->txtag, map, *m_headp, segs, &nsegs, BUS_DMA_NOWAIT); if (__predict_false(error)) { struct mbuf *m; switch (error) { case EFBIG: /* Try it again? - one try */ if (remap == TRUE) { remap = FALSE; m = m_collapse(*m_headp, M_NOWAIT, IGB_MAX_SCATTER); if (m == NULL) { adapter->mbuf_defrag_failed++; m_freem(*m_headp); *m_headp = NULL; return (ENOBUFS); } *m_headp = m; goto retry; } else return (error); default: txr->no_tx_dma_setup++; m_freem(*m_headp); *m_headp = NULL; return (error); } } /* Make certain there are enough descriptors */ if (nsegs > txr->tx_avail - 2) { txr->no_desc_avail++; bus_dmamap_unload(txr->txtag, map); return (ENOBUFS); } m_head = *m_headp; /* ** Set up the appropriate offload context ** this will consume the first descriptor */ error = igb_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status); if (__predict_false(error)) { m_freem(*m_headp); *m_headp = NULL; return (error); } /* 82575 needs the queue index added */ if (adapter->hw.mac.type == e1000_82575) olinfo_status |= txr->me << 4; i = txr->next_avail_desc; for (j = 0; j < nsegs; j++) { bus_size_t seglen; bus_addr_t segaddr; txbuf = &txr->tx_buffers[i]; txd = &txr->tx_base[i]; seglen = segs[j].ds_len; segaddr = htole64(segs[j].ds_addr); txd->read.buffer_addr = segaddr; txd->read.cmd_type_len = htole32(E1000_TXD_CMD_IFCS | cmd_type_len | seglen); txd->read.olinfo_status = htole32(olinfo_status); if (++i == txr->num_desc) i = 0; } txd->read.cmd_type_len |= htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS); txr->tx_avail -= nsegs; txr->next_avail_desc = i; txbuf->m_head = m_head; /* ** Here we swap the map so the last descriptor, ** which gets the completion interrupt has the ** real map, and the first descriptor gets the ** unused map from this descriptor. */ txr->tx_buffers[first].map = txbuf->map; txbuf->map = map; bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE); /* Set the EOP descriptor that will be marked done */ txbuf = &txr->tx_buffers[first]; txbuf->eop = txd; bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* * Advance the Transmit Descriptor Tail (Tdt), this tells the * hardware that this frame is available to transmit. */ ++txr->total_packets; E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), i); return (0); } static void igb_set_promisc(struct adapter *adapter) { struct ifnet *ifp = adapter->ifp; struct e1000_hw *hw = &adapter->hw; u32 reg; if (adapter->vf_ifp) { e1000_promisc_set_vf(hw, e1000_promisc_enabled); return; } reg = E1000_READ_REG(hw, E1000_RCTL); if (ifp->if_flags & IFF_PROMISC) { reg |= (E1000_RCTL_UPE | E1000_RCTL_MPE); E1000_WRITE_REG(hw, E1000_RCTL, reg); } else if (ifp->if_flags & IFF_ALLMULTI) { reg |= E1000_RCTL_MPE; reg &= ~E1000_RCTL_UPE; E1000_WRITE_REG(hw, E1000_RCTL, reg); } } static void igb_disable_promisc(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct ifnet *ifp = adapter->ifp; u32 reg; int mcnt = 0; if (adapter->vf_ifp) { e1000_promisc_set_vf(hw, e1000_promisc_disabled); return; } reg = E1000_READ_REG(hw, E1000_RCTL); reg &= (~E1000_RCTL_UPE); if (ifp->if_flags & IFF_ALLMULTI) mcnt = MAX_NUM_MULTICAST_ADDRESSES; else { struct ifmultiaddr *ifma; #if __FreeBSD_version < 800000 IF_ADDR_LOCK(ifp); #else if_maddr_rlock(ifp); #endif TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) break; mcnt++; } #if __FreeBSD_version < 800000 IF_ADDR_UNLOCK(ifp); #else if_maddr_runlock(ifp); #endif } /* Don't disable if in MAX groups */ if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) reg &= (~E1000_RCTL_MPE); E1000_WRITE_REG(hw, E1000_RCTL, reg); } /********************************************************************* * Multicast Update * * This routine is called whenever multicast address list is updated. * **********************************************************************/ static void igb_set_multi(struct adapter *adapter) { struct ifnet *ifp = adapter->ifp; struct ifmultiaddr *ifma; u32 reg_rctl = 0; u8 *mta; int mcnt = 0; IOCTL_DEBUGOUT("igb_set_multi: begin"); mta = adapter->mta; bzero(mta, sizeof(uint8_t) * ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES); #if __FreeBSD_version < 800000 IF_ADDR_LOCK(ifp); #else if_maddr_rlock(ifp); #endif TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) break; bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN); mcnt++; } #if __FreeBSD_version < 800000 IF_ADDR_UNLOCK(ifp); #else if_maddr_runlock(ifp); #endif if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) { reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); reg_rctl |= E1000_RCTL_MPE; E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); } else e1000_update_mc_addr_list(&adapter->hw, mta, mcnt); } /********************************************************************* * Timer routine: * This routine checks for link status, * updates statistics, and does the watchdog. * **********************************************************************/ static void igb_local_timer(void *arg) { struct adapter *adapter = arg; device_t dev = adapter->dev; struct ifnet *ifp = adapter->ifp; struct tx_ring *txr = adapter->tx_rings; struct igb_queue *que = adapter->queues; int hung = 0, busy = 0; IGB_CORE_LOCK_ASSERT(adapter); igb_update_link_status(adapter); igb_update_stats_counters(adapter); /* ** Check the TX queues status ** - central locked handling of OACTIVE ** - watchdog only if all queues show hung */ for (int i = 0; i < adapter->num_queues; i++, que++, txr++) { if ((txr->queue_status & IGB_QUEUE_HUNG) && (adapter->pause_frames == 0)) ++hung; if (txr->queue_status & IGB_QUEUE_DEPLETED) ++busy; if ((txr->queue_status & IGB_QUEUE_IDLE) == 0) taskqueue_enqueue(que->tq, &que->que_task); } if (hung == adapter->num_queues) goto timeout; if (busy == adapter->num_queues) ifp->if_drv_flags |= IFF_DRV_OACTIVE; else if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) && (busy < adapter->num_queues)) ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; adapter->pause_frames = 0; callout_reset(&adapter->timer, hz, igb_local_timer, adapter); #ifndef DEVICE_POLLING /* Schedule all queue interrupts - deadlock protection */ E1000_WRITE_REG(&adapter->hw, E1000_EICS, adapter->que_mask); #endif return; timeout: device_printf(adapter->dev, "Watchdog timeout -- resetting\n"); device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me, E1000_READ_REG(&adapter->hw, E1000_TDH(txr->me)), E1000_READ_REG(&adapter->hw, E1000_TDT(txr->me))); device_printf(dev,"TX(%d) desc avail = %d," "Next TX to Clean = %d\n", txr->me, txr->tx_avail, txr->next_to_clean); adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING; adapter->watchdog_events++; igb_init_locked(adapter); } static void igb_update_link_status(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct e1000_fc_info *fc = &hw->fc; struct ifnet *ifp = adapter->ifp; device_t dev = adapter->dev; struct tx_ring *txr = adapter->tx_rings; u32 link_check, thstat, ctrl; char *flowctl = NULL; link_check = thstat = ctrl = 0; /* Get the cached link value or read for real */ switch (hw->phy.media_type) { case e1000_media_type_copper: if (hw->mac.get_link_status) { /* Do the work to read phy */ e1000_check_for_link(hw); link_check = !hw->mac.get_link_status; } else link_check = TRUE; break; case e1000_media_type_fiber: e1000_check_for_link(hw); link_check = (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU); break; case e1000_media_type_internal_serdes: e1000_check_for_link(hw); link_check = adapter->hw.mac.serdes_has_link; break; /* VF device is type_unknown */ case e1000_media_type_unknown: e1000_check_for_link(hw); link_check = !hw->mac.get_link_status; /* Fall thru */ default: break; } /* Check for thermal downshift or shutdown */ if (hw->mac.type == e1000_i350) { thstat = E1000_READ_REG(hw, E1000_THSTAT); ctrl = E1000_READ_REG(hw, E1000_CTRL_EXT); } /* Get the flow control for display */ switch (fc->current_mode) { case e1000_fc_rx_pause: flowctl = "RX"; break; case e1000_fc_tx_pause: flowctl = "TX"; break; case e1000_fc_full: flowctl = "Full"; break; case e1000_fc_none: default: flowctl = "None"; break; } /* Now we check if a transition has happened */ if (link_check && (adapter->link_active == 0)) { e1000_get_speed_and_duplex(&adapter->hw, &adapter->link_speed, &adapter->link_duplex); if (bootverbose) device_printf(dev, "Link is up %d Mbps %s," " Flow Control: %s\n", adapter->link_speed, ((adapter->link_duplex == FULL_DUPLEX) ? "Full Duplex" : "Half Duplex"), flowctl); adapter->link_active = 1; ifp->if_baudrate = adapter->link_speed * 1000000; if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) && (thstat & E1000_THSTAT_LINK_THROTTLE)) device_printf(dev, "Link: thermal downshift\n"); /* Delay Link Up for Phy update */ if (((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) && (hw->phy.id == I210_I_PHY_ID)) msec_delay(I210_LINK_DELAY); /* Reset if the media type changed. */ if (hw->dev_spec._82575.media_changed) { hw->dev_spec._82575.media_changed = false; adapter->flags |= IGB_MEDIA_RESET; igb_reset(adapter); } /* This can sleep */ if_link_state_change(ifp, LINK_STATE_UP); } else if (!link_check && (adapter->link_active == 1)) { ifp->if_baudrate = adapter->link_speed = 0; adapter->link_duplex = 0; if (bootverbose) device_printf(dev, "Link is Down\n"); if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) && (thstat & E1000_THSTAT_PWR_DOWN)) device_printf(dev, "Link: thermal shutdown\n"); adapter->link_active = 0; /* This can sleep */ if_link_state_change(ifp, LINK_STATE_DOWN); /* Reset queue state */ for (int i = 0; i < adapter->num_queues; i++, txr++) txr->queue_status = IGB_QUEUE_IDLE; } } /********************************************************************* * * This routine disables all traffic on the adapter by issuing a * global reset on the MAC and deallocates TX/RX buffers. * **********************************************************************/ static void igb_stop(void *arg) { struct adapter *adapter = arg; struct ifnet *ifp = adapter->ifp; struct tx_ring *txr = adapter->tx_rings; IGB_CORE_LOCK_ASSERT(adapter); INIT_DEBUGOUT("igb_stop: begin"); igb_disable_intr(adapter); callout_stop(&adapter->timer); /* Tell the stack that the interface is no longer active */ ifp->if_drv_flags &= ~IFF_DRV_RUNNING; ifp->if_drv_flags |= IFF_DRV_OACTIVE; /* Disarm watchdog timer. */ for (int i = 0; i < adapter->num_queues; i++, txr++) { IGB_TX_LOCK(txr); txr->queue_status = IGB_QUEUE_IDLE; IGB_TX_UNLOCK(txr); } e1000_reset_hw(&adapter->hw); E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0); e1000_led_off(&adapter->hw); e1000_cleanup_led(&adapter->hw); } /********************************************************************* * * Determine hardware revision. * **********************************************************************/ static void igb_identify_hardware(struct adapter *adapter) { device_t dev = adapter->dev; /* Make sure our PCI config space has the necessary stuff set */ pci_enable_busmaster(dev); adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); /* Save off the information about this board */ adapter->hw.vendor_id = pci_get_vendor(dev); adapter->hw.device_id = pci_get_device(dev); adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2); adapter->hw.subsystem_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); /* Set MAC type early for PCI setup */ e1000_set_mac_type(&adapter->hw); /* Are we a VF device? */ if ((adapter->hw.mac.type == e1000_vfadapt) || (adapter->hw.mac.type == e1000_vfadapt_i350)) adapter->vf_ifp = 1; else adapter->vf_ifp = 0; } static int igb_allocate_pci_resources(struct adapter *adapter) { device_t dev = adapter->dev; int rid; rid = PCIR_BAR(0); adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (adapter->pci_mem == NULL) { device_printf(dev, "Unable to allocate bus resource: memory\n"); return (ENXIO); } adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->pci_mem); adapter->osdep.mem_bus_space_handle = rman_get_bushandle(adapter->pci_mem); adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle; adapter->num_queues = 1; /* Defaults for Legacy or MSI */ /* This will setup either MSI/X or MSI */ adapter->msix = igb_setup_msix(adapter); adapter->hw.back = &adapter->osdep; return (0); } /********************************************************************* * * Setup the Legacy or MSI Interrupt handler * **********************************************************************/ static int igb_allocate_legacy(struct adapter *adapter) { device_t dev = adapter->dev; struct igb_queue *que = adapter->queues; #ifndef IGB_LEGACY_TX struct tx_ring *txr = adapter->tx_rings; #endif int error, rid = 0; /* Turn off all interrupts */ E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff); /* MSI RID is 1 */ if (adapter->msix == 1) rid = 1; /* We allocate a single interrupt resource */ adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (adapter->res == NULL) { device_printf(dev, "Unable to allocate bus resource: " "interrupt\n"); return (ENXIO); } #ifndef IGB_LEGACY_TX TASK_INIT(&txr->txq_task, 0, igb_deferred_mq_start, txr); #endif /* * Try allocating a fast interrupt and the associated deferred * processing contexts. */ TASK_INIT(&que->que_task, 0, igb_handle_que, que); /* Make tasklet for deferred link handling */ TASK_INIT(&adapter->link_task, 0, igb_handle_link, adapter); que->tq = taskqueue_create_fast("igb_taskq", M_NOWAIT, taskqueue_thread_enqueue, &que->tq); taskqueue_start_threads(&que->tq, 1, PI_NET, "%s taskq", device_get_nameunit(adapter->dev)); if ((error = bus_setup_intr(dev, adapter->res, INTR_TYPE_NET | INTR_MPSAFE, igb_irq_fast, NULL, adapter, &adapter->tag)) != 0) { device_printf(dev, "Failed to register fast interrupt " "handler: %d\n", error); taskqueue_free(que->tq); que->tq = NULL; return (error); } return (0); } /********************************************************************* * * Setup the MSIX Queue Interrupt handlers: * **********************************************************************/ static int igb_allocate_msix(struct adapter *adapter) { device_t dev = adapter->dev; struct igb_queue *que = adapter->queues; int error, rid, vector = 0; /* Be sure to start with all interrupts disabled */ E1000_WRITE_REG(&adapter->hw, E1000_IMC, ~0); E1000_WRITE_FLUSH(&adapter->hw); for (int i = 0; i < adapter->num_queues; i++, vector++, que++) { rid = vector +1; que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (que->res == NULL) { device_printf(dev, "Unable to allocate bus resource: " "MSIX Queue Interrupt\n"); return (ENXIO); } error = bus_setup_intr(dev, que->res, INTR_TYPE_NET | INTR_MPSAFE, NULL, igb_msix_que, que, &que->tag); if (error) { que->res = NULL; device_printf(dev, "Failed to register Queue handler"); return (error); } #if __FreeBSD_version >= 800504 bus_describe_intr(dev, que->res, que->tag, "que %d", i); #endif que->msix = vector; if (adapter->hw.mac.type == e1000_82575) que->eims = E1000_EICR_TX_QUEUE0 << i; else que->eims = 1 << vector; /* ** Bind the msix vector, and thus the ** rings to the corresponding cpu. */ if (adapter->num_queues > 1) { if (igb_last_bind_cpu < 0) igb_last_bind_cpu = CPU_FIRST(); bus_bind_intr(dev, que->res, igb_last_bind_cpu); device_printf(dev, "Bound queue %d to cpu %d\n", i,igb_last_bind_cpu); igb_last_bind_cpu = CPU_NEXT(igb_last_bind_cpu); } #ifndef IGB_LEGACY_TX TASK_INIT(&que->txr->txq_task, 0, igb_deferred_mq_start, que->txr); #endif /* Make tasklet for deferred handling */ TASK_INIT(&que->que_task, 0, igb_handle_que, que); que->tq = taskqueue_create("igb_que", M_NOWAIT, taskqueue_thread_enqueue, &que->tq); taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que", device_get_nameunit(adapter->dev)); } /* And Link */ rid = vector + 1; adapter->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (adapter->res == NULL) { device_printf(dev, "Unable to allocate bus resource: " "MSIX Link Interrupt\n"); return (ENXIO); } if ((error = bus_setup_intr(dev, adapter->res, INTR_TYPE_NET | INTR_MPSAFE, NULL, igb_msix_link, adapter, &adapter->tag)) != 0) { device_printf(dev, "Failed to register Link handler"); return (error); } #if __FreeBSD_version >= 800504 bus_describe_intr(dev, adapter->res, adapter->tag, "link"); #endif adapter->linkvec = vector; return (0); } static void igb_configure_queues(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct igb_queue *que; u32 tmp, ivar = 0, newitr = 0; /* First turn on RSS capability */ if (adapter->hw.mac.type != e1000_82575) E1000_WRITE_REG(hw, E1000_GPIE, E1000_GPIE_MSIX_MODE | E1000_GPIE_EIAME | E1000_GPIE_PBA | E1000_GPIE_NSICR); /* Turn on MSIX */ switch (adapter->hw.mac.type) { case e1000_82580: case e1000_i350: case e1000_i354: case e1000_i210: case e1000_i211: case e1000_vfadapt: case e1000_vfadapt_i350: /* RX entries */ for (int i = 0; i < adapter->num_queues; i++) { u32 index = i >> 1; ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); que = &adapter->queues[i]; if (i & 1) { ivar &= 0xFF00FFFF; ivar |= (que->msix | E1000_IVAR_VALID) << 16; } else { ivar &= 0xFFFFFF00; ivar |= que->msix | E1000_IVAR_VALID; } E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); } /* TX entries */ for (int i = 0; i < adapter->num_queues; i++) { u32 index = i >> 1; ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); que = &adapter->queues[i]; if (i & 1) { ivar &= 0x00FFFFFF; ivar |= (que->msix | E1000_IVAR_VALID) << 24; } else { ivar &= 0xFFFF00FF; ivar |= (que->msix | E1000_IVAR_VALID) << 8; } E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); adapter->que_mask |= que->eims; } /* And for the link interrupt */ ivar = (adapter->linkvec | E1000_IVAR_VALID) << 8; adapter->link_mask = 1 << adapter->linkvec; E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar); break; case e1000_82576: /* RX entries */ for (int i = 0; i < adapter->num_queues; i++) { u32 index = i & 0x7; /* Each IVAR has two entries */ ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); que = &adapter->queues[i]; if (i < 8) { ivar &= 0xFFFFFF00; ivar |= que->msix | E1000_IVAR_VALID; } else { ivar &= 0xFF00FFFF; ivar |= (que->msix | E1000_IVAR_VALID) << 16; } E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); adapter->que_mask |= que->eims; } /* TX entries */ for (int i = 0; i < adapter->num_queues; i++) { u32 index = i & 0x7; /* Each IVAR has two entries */ ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); que = &adapter->queues[i]; if (i < 8) { ivar &= 0xFFFF00FF; ivar |= (que->msix | E1000_IVAR_VALID) << 8; } else { ivar &= 0x00FFFFFF; ivar |= (que->msix | E1000_IVAR_VALID) << 24; } E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); adapter->que_mask |= que->eims; } /* And for the link interrupt */ ivar = (adapter->linkvec | E1000_IVAR_VALID) << 8; adapter->link_mask = 1 << adapter->linkvec; E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar); break; case e1000_82575: /* enable MSI-X support*/ tmp = E1000_READ_REG(hw, E1000_CTRL_EXT); tmp |= E1000_CTRL_EXT_PBA_CLR; /* Auto-Mask interrupts upon ICR read. */ tmp |= E1000_CTRL_EXT_EIAME; tmp |= E1000_CTRL_EXT_IRCA; E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp); /* Queues */ for (int i = 0; i < adapter->num_queues; i++) { que = &adapter->queues[i]; tmp = E1000_EICR_RX_QUEUE0 << i; tmp |= E1000_EICR_TX_QUEUE0 << i; que->eims = tmp; E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), i, que->eims); adapter->que_mask |= que->eims; } /* Link */ E1000_WRITE_REG(hw, E1000_MSIXBM(adapter->linkvec), E1000_EIMS_OTHER); adapter->link_mask |= E1000_EIMS_OTHER; default: break; } /* Set the starting interrupt rate */ if (igb_max_interrupt_rate > 0) newitr = (4000000 / igb_max_interrupt_rate) & 0x7FFC; if (hw->mac.type == e1000_82575) newitr |= newitr << 16; else newitr |= E1000_EITR_CNT_IGNR; for (int i = 0; i < adapter->num_queues; i++) { que = &adapter->queues[i]; E1000_WRITE_REG(hw, E1000_EITR(que->msix), newitr); } return; } static void igb_free_pci_resources(struct adapter *adapter) { struct igb_queue *que = adapter->queues; device_t dev = adapter->dev; int rid; /* ** There is a slight possibility of a failure mode ** in attach that will result in entering this function ** before interrupt resources have been initialized, and ** in that case we do not want to execute the loops below ** We can detect this reliably by the state of the adapter ** res pointer. */ if (adapter->res == NULL) goto mem; /* * First release all the interrupt resources: */ for (int i = 0; i < adapter->num_queues; i++, que++) { rid = que->msix + 1; if (que->tag != NULL) { bus_teardown_intr(dev, que->res, que->tag); que->tag = NULL; } if (que->res != NULL) bus_release_resource(dev, SYS_RES_IRQ, rid, que->res); } /* Clean the Legacy or Link interrupt last */ if (adapter->linkvec) /* we are doing MSIX */ rid = adapter->linkvec + 1; else (adapter->msix != 0) ? (rid = 1):(rid = 0); que = adapter->queues; if (adapter->tag != NULL) { taskqueue_drain(que->tq, &adapter->link_task); bus_teardown_intr(dev, adapter->res, adapter->tag); adapter->tag = NULL; } if (adapter->res != NULL) bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res); for (int i = 0; i < adapter->num_queues; i++, que++) { if (que->tq != NULL) { #ifndef IGB_LEGACY_TX taskqueue_drain(que->tq, &que->txr->txq_task); #endif taskqueue_drain(que->tq, &que->que_task); taskqueue_free(que->tq); } } mem: if (adapter->msix) pci_release_msi(dev); if (adapter->msix_mem != NULL) bus_release_resource(dev, SYS_RES_MEMORY, adapter->memrid, adapter->msix_mem); if (adapter->pci_mem != NULL) bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), adapter->pci_mem); } /* * Setup Either MSI/X or MSI */ static int igb_setup_msix(struct adapter *adapter) { device_t dev = adapter->dev; int bar, want, queues, msgs, maxqueues; /* tuneable override */ if (igb_enable_msix == 0) goto msi; /* First try MSI/X */ msgs = pci_msix_count(dev); if (msgs == 0) goto msi; /* ** Some new devices, as with ixgbe, now may ** use a different BAR, so we need to keep ** track of which is used. */ adapter->memrid = PCIR_BAR(IGB_MSIX_BAR); bar = pci_read_config(dev, adapter->memrid, 4); if (bar == 0) /* use next bar */ adapter->memrid += 4; adapter->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &adapter->memrid, RF_ACTIVE); if (adapter->msix_mem == NULL) { /* May not be enabled */ device_printf(adapter->dev, "Unable to map MSIX table \n"); goto msi; } /* Figure out a reasonable auto config value */ queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus; /* Manual override */ if (igb_num_queues != 0) queues = igb_num_queues; /* Sanity check based on HW */ switch (adapter->hw.mac.type) { case e1000_82575: maxqueues = 4; break; case e1000_82576: case e1000_82580: case e1000_i350: case e1000_i354: maxqueues = 8; break; case e1000_i210: maxqueues = 4; break; case e1000_i211: maxqueues = 2; break; default: /* VF interfaces */ maxqueues = 1; break; } if (queues > maxqueues) queues = maxqueues; /* Manual override */ if (igb_num_queues != 0) queues = igb_num_queues; /* ** One vector (RX/TX pair) per queue ** plus an additional for Link interrupt */ want = queues + 1; if (msgs >= want) msgs = want; else { device_printf(adapter->dev, "MSIX Configuration Problem, " "%d vectors configured, but %d queues wanted!\n", msgs, want); goto msi; } if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) { device_printf(adapter->dev, "Using MSIX interrupts with %d vectors\n", msgs); adapter->num_queues = queues; return (msgs); } /* ** If MSIX alloc failed or provided us with ** less than needed, free and fall through to MSI */ pci_release_msi(dev); msi: if (adapter->msix_mem != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(IGB_MSIX_BAR), adapter->msix_mem); adapter->msix_mem = NULL; } msgs = 1; if (pci_alloc_msi(dev, &msgs) == 0) { device_printf(adapter->dev," Using an MSI interrupt\n"); return (msgs); } device_printf(adapter->dev," Using a Legacy interrupt\n"); return (0); } /********************************************************************* * * Initialize the DMA Coalescing feature * **********************************************************************/ static void igb_init_dmac(struct adapter *adapter, u32 pba) { device_t dev = adapter->dev; struct e1000_hw *hw = &adapter->hw; u32 dmac, reg = ~E1000_DMACR_DMAC_EN; u16 hwm; if (hw->mac.type == e1000_i211) return; if (hw->mac.type > e1000_82580) { if (adapter->dmac == 0) { /* Disabling it */ E1000_WRITE_REG(hw, E1000_DMACR, reg); return; } else device_printf(dev, "DMA Coalescing enabled\n"); /* Set starting threshold */ E1000_WRITE_REG(hw, E1000_DMCTXTH, 0); hwm = 64 * pba - adapter->max_frame_size / 16; if (hwm < 64 * (pba - 6)) hwm = 64 * (pba - 6); reg = E1000_READ_REG(hw, E1000_FCRTC); reg &= ~E1000_FCRTC_RTH_COAL_MASK; reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT) & E1000_FCRTC_RTH_COAL_MASK); E1000_WRITE_REG(hw, E1000_FCRTC, reg); dmac = pba - adapter->max_frame_size / 512; if (dmac < pba - 10) dmac = pba - 10; reg = E1000_READ_REG(hw, E1000_DMACR); reg &= ~E1000_DMACR_DMACTHR_MASK; reg = ((dmac << E1000_DMACR_DMACTHR_SHIFT) & E1000_DMACR_DMACTHR_MASK); /* transition to L0x or L1 if available..*/ reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK); /* Check if status is 2.5Gb backplane connection * before configuration of watchdog timer, which is * in msec values in 12.8usec intervals * watchdog timer= msec values in 32usec intervals * for non 2.5Gb connection */ if (hw->mac.type == e1000_i354) { int status = E1000_READ_REG(hw, E1000_STATUS); if ((status & E1000_STATUS_2P5_SKU) && (!(status & E1000_STATUS_2P5_SKU_OVER))) reg |= ((adapter->dmac * 5) >> 6); else reg |= (adapter->dmac >> 5); } else { reg |= (adapter->dmac >> 5); } E1000_WRITE_REG(hw, E1000_DMACR, reg); -#ifdef I210_OBFF_SUPPORT - /* - * Set the OBFF Rx threshold to DMA Coalescing Rx - * threshold - 2KB and enable the feature in the - * hardware for I210. - */ - if (hw->mac.type == e1000_i210) { - int obff = dmac - 2; - reg = E1000_READ_REG(hw, E1000_DOBFFCTL); - reg &= ~E1000_DOBFFCTL_OBFFTHR_MASK; - reg |= (obff & E1000_DOBFFCTL_OBFFTHR_MASK) - | E1000_DOBFFCTL_EXIT_ACT_MASK; - E1000_WRITE_REG(hw, E1000_DOBFFCTL, reg); - } -#endif E1000_WRITE_REG(hw, E1000_DMCRTRH, 0); /* Set the interval before transition */ reg = E1000_READ_REG(hw, E1000_DMCTLX); if (hw->mac.type == e1000_i350) reg |= IGB_DMCTLX_DCFLUSH_DIS; /* ** in 2.5Gb connection, TTLX unit is 0.4 usec ** which is 0x4*2 = 0xA. But delay is still 4 usec */ if (hw->mac.type == e1000_i354) { int status = E1000_READ_REG(hw, E1000_STATUS); if ((status & E1000_STATUS_2P5_SKU) && (!(status & E1000_STATUS_2P5_SKU_OVER))) reg |= 0xA; else reg |= 0x4; } else { reg |= 0x4; } E1000_WRITE_REG(hw, E1000_DMCTLX, reg); /* free space in tx packet buffer to wake from DMA coal */ E1000_WRITE_REG(hw, E1000_DMCTXTH, (IGB_TXPBSIZE - (2 * adapter->max_frame_size)) >> 6); /* make low power state decision controlled by DMA coal */ reg = E1000_READ_REG(hw, E1000_PCIEMISC); reg &= ~E1000_PCIEMISC_LX_DECISION; E1000_WRITE_REG(hw, E1000_PCIEMISC, reg); } else if (hw->mac.type == e1000_82580) { u32 reg = E1000_READ_REG(hw, E1000_PCIEMISC); E1000_WRITE_REG(hw, E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION); E1000_WRITE_REG(hw, E1000_DMACR, 0); } } /********************************************************************* * * Set up an fresh starting state * **********************************************************************/ static void igb_reset(struct adapter *adapter) { device_t dev = adapter->dev; struct e1000_hw *hw = &adapter->hw; struct e1000_fc_info *fc = &hw->fc; struct ifnet *ifp = adapter->ifp; u32 pba = 0; u16 hwm; INIT_DEBUGOUT("igb_reset: begin"); /* Let the firmware know the OS is in control */ igb_get_hw_control(adapter); /* * Packet Buffer Allocation (PBA) * Writing PBA sets the receive portion of the buffer * the remainder is used for the transmit buffer. */ switch (hw->mac.type) { case e1000_82575: pba = E1000_PBA_32K; break; case e1000_82576: case e1000_vfadapt: pba = E1000_READ_REG(hw, E1000_RXPBS); pba &= E1000_RXPBS_SIZE_MASK_82576; break; case e1000_82580: case e1000_i350: case e1000_i354: case e1000_vfadapt_i350: pba = E1000_READ_REG(hw, E1000_RXPBS); pba = e1000_rxpbs_adjust_82580(pba); break; case e1000_i210: case e1000_i211: pba = E1000_PBA_34K; default: break; } /* Special needs in case of Jumbo frames */ if ((hw->mac.type == e1000_82575) && (ifp->if_mtu > ETHERMTU)) { u32 tx_space, min_tx, min_rx; pba = E1000_READ_REG(hw, E1000_PBA); tx_space = pba >> 16; pba &= 0xffff; min_tx = (adapter->max_frame_size + sizeof(struct e1000_tx_desc) - ETHERNET_FCS_SIZE) * 2; min_tx = roundup2(min_tx, 1024); min_tx >>= 10; min_rx = adapter->max_frame_size; min_rx = roundup2(min_rx, 1024); min_rx >>= 10; if (tx_space < min_tx && ((min_tx - tx_space) < pba)) { pba = pba - (min_tx - tx_space); /* * if short on rx space, rx wins * and must trump tx adjustment */ if (pba < min_rx) pba = min_rx; } E1000_WRITE_REG(hw, E1000_PBA, pba); } INIT_DEBUGOUT1("igb_init: pba=%dK",pba); /* * These parameters control the automatic generation (Tx) and * response (Rx) to Ethernet PAUSE frames. * - High water mark should allow for at least two frames to be * received after sending an XOFF. * - Low water mark works best when it is very near the high water mark. * This allows the receiver to restart by sending XON when it has * drained a bit. */ hwm = min(((pba << 10) * 9 / 10), ((pba << 10) - 2 * adapter->max_frame_size)); if (hw->mac.type < e1000_82576) { fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */ fc->low_water = fc->high_water - 8; } else { fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */ fc->low_water = fc->high_water - 16; } fc->pause_time = IGB_FC_PAUSE_TIME; fc->send_xon = TRUE; if (adapter->fc) fc->requested_mode = adapter->fc; else fc->requested_mode = e1000_fc_default; /* Issue a global reset */ e1000_reset_hw(hw); E1000_WRITE_REG(hw, E1000_WUC, 0); /* Reset for AutoMediaDetect */ if (adapter->flags & IGB_MEDIA_RESET) { e1000_setup_init_funcs(hw, TRUE); e1000_get_bus_info(hw); adapter->flags &= ~IGB_MEDIA_RESET; } if (e1000_init_hw(hw) < 0) device_printf(dev, "Hardware Initialization Failed\n"); /* Setup DMA Coalescing */ igb_init_dmac(adapter, pba); E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN); e1000_get_phy_info(hw); e1000_check_for_link(hw); return; } /********************************************************************* * * Setup networking device structure and register an interface. * **********************************************************************/ static int igb_setup_interface(device_t dev, struct adapter *adapter) { struct ifnet *ifp; INIT_DEBUGOUT("igb_setup_interface: begin"); ifp = adapter->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not allocate ifnet structure\n"); return (-1); } if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_init = igb_init; ifp->if_softc = adapter; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = igb_ioctl; #ifndef IGB_LEGACY_TX ifp->if_transmit = igb_mq_start; ifp->if_qflush = igb_qflush; #else ifp->if_start = igb_start; IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1); ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1; IFQ_SET_READY(&ifp->if_snd); #endif ether_ifattach(ifp, adapter->hw.mac.addr); ifp->if_capabilities = ifp->if_capenable = 0; ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM; ifp->if_capabilities |= IFCAP_TSO; ifp->if_capabilities |= IFCAP_JUMBO_MTU; ifp->if_capenable = ifp->if_capabilities; /* Don't enable LRO by default */ ifp->if_capabilities |= IFCAP_LRO; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif /* * Tell the upper layer(s) we * support full VLAN capability. */ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU; ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU; /* ** Don't turn this on by default, if vlans are ** created on another pseudo device (eg. lagg) ** then vlan events are not passed thru, breaking ** operation, but with HW FILTER off it works. If ** using vlans directly on the igb driver you can ** enable this and get full hardware tag filtering. */ ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; /* * Specify the media types supported by this adapter and register * callbacks to update media and link information */ ifmedia_init(&adapter->media, IFM_IMASK, igb_media_change, igb_media_status); if ((adapter->hw.phy.media_type == e1000_media_type_fiber) || (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) { ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL); } else { ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL); if (adapter->hw.phy.type != e1000_phy_ife) { ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL); } } ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); return (0); } /* * Manage DMA'able memory. */ static void igb_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { if (error) return; *(bus_addr_t *) arg = segs[0].ds_addr; } static int igb_dma_malloc(struct adapter *adapter, bus_size_t size, struct igb_dma_alloc *dma, int mapflags) { int error; error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */ IGB_DBA_ALIGN, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ size, /* maxsize */ 1, /* nsegments */ size, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &dma->dma_tag); if (error) { device_printf(adapter->dev, "%s: bus_dma_tag_create failed: %d\n", __func__, error); goto fail_0; } error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map); if (error) { device_printf(adapter->dev, "%s: bus_dmamem_alloc(%ju) failed: %d\n", __func__, (uintmax_t)size, error); goto fail_2; } dma->dma_paddr = 0; error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, size, igb_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT); if (error || dma->dma_paddr == 0) { device_printf(adapter->dev, "%s: bus_dmamap_load failed: %d\n", __func__, error); goto fail_3; } return (0); fail_3: bus_dmamap_unload(dma->dma_tag, dma->dma_map); fail_2: bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); bus_dma_tag_destroy(dma->dma_tag); fail_0: dma->dma_tag = NULL; return (error); } static void igb_dma_free(struct adapter *adapter, struct igb_dma_alloc *dma) { if (dma->dma_tag == NULL) return; if (dma->dma_paddr != 0) { bus_dmamap_sync(dma->dma_tag, dma->dma_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(dma->dma_tag, dma->dma_map); dma->dma_paddr = 0; } if (dma->dma_vaddr != NULL) { bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); dma->dma_vaddr = NULL; } bus_dma_tag_destroy(dma->dma_tag); dma->dma_tag = NULL; } /********************************************************************* * * Allocate memory for the transmit and receive rings, and then * the descriptors associated with each, called only once at attach. * **********************************************************************/ static int igb_allocate_queues(struct adapter *adapter) { device_t dev = adapter->dev; struct igb_queue *que = NULL; struct tx_ring *txr = NULL; struct rx_ring *rxr = NULL; int rsize, tsize, error = E1000_SUCCESS; int txconf = 0, rxconf = 0; /* First allocate the top level queue structs */ if (!(adapter->queues = (struct igb_queue *) malloc(sizeof(struct igb_queue) * adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate queue memory\n"); error = ENOMEM; goto fail; } /* Next allocate the TX ring struct memory */ if (!(adapter->tx_rings = (struct tx_ring *) malloc(sizeof(struct tx_ring) * adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate TX ring memory\n"); error = ENOMEM; goto tx_fail; } /* Now allocate the RX */ if (!(adapter->rx_rings = (struct rx_ring *) malloc(sizeof(struct rx_ring) * adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate RX ring memory\n"); error = ENOMEM; goto rx_fail; } tsize = roundup2(adapter->num_tx_desc * sizeof(union e1000_adv_tx_desc), IGB_DBA_ALIGN); /* * Now set up the TX queues, txconf is needed to handle the * possibility that things fail midcourse and we need to * undo memory gracefully */ for (int i = 0; i < adapter->num_queues; i++, txconf++) { /* Set up some basics */ txr = &adapter->tx_rings[i]; txr->adapter = adapter; txr->me = i; txr->num_desc = adapter->num_tx_desc; /* Initialize the TX lock */ snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)", device_get_nameunit(dev), txr->me); mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF); if (igb_dma_malloc(adapter, tsize, &txr->txdma, BUS_DMA_NOWAIT)) { device_printf(dev, "Unable to allocate TX Descriptor memory\n"); error = ENOMEM; goto err_tx_desc; } txr->tx_base = (union e1000_adv_tx_desc *)txr->txdma.dma_vaddr; bzero((void *)txr->tx_base, tsize); /* Now allocate transmit buffers for the ring */ if (igb_allocate_transmit_buffers(txr)) { device_printf(dev, "Critical Failure setting up transmit buffers\n"); error = ENOMEM; goto err_tx_desc; } #ifndef IGB_LEGACY_TX /* Allocate a buf ring */ txr->br = buf_ring_alloc(igb_buf_ring_size, M_DEVBUF, M_WAITOK, &txr->tx_mtx); #endif } /* * Next the RX queues... */ rsize = roundup2(adapter->num_rx_desc * sizeof(union e1000_adv_rx_desc), IGB_DBA_ALIGN); for (int i = 0; i < adapter->num_queues; i++, rxconf++) { rxr = &adapter->rx_rings[i]; rxr->adapter = adapter; rxr->me = i; /* Initialize the RX lock */ snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)", device_get_nameunit(dev), txr->me); mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF); if (igb_dma_malloc(adapter, rsize, &rxr->rxdma, BUS_DMA_NOWAIT)) { device_printf(dev, "Unable to allocate RxDescriptor memory\n"); error = ENOMEM; goto err_rx_desc; } rxr->rx_base = (union e1000_adv_rx_desc *)rxr->rxdma.dma_vaddr; bzero((void *)rxr->rx_base, rsize); /* Allocate receive buffers for the ring*/ if (igb_allocate_receive_buffers(rxr)) { device_printf(dev, "Critical Failure setting up receive buffers\n"); error = ENOMEM; goto err_rx_desc; } } /* ** Finally set up the queue holding structs */ for (int i = 0; i < adapter->num_queues; i++) { que = &adapter->queues[i]; que->adapter = adapter; que->txr = &adapter->tx_rings[i]; que->rxr = &adapter->rx_rings[i]; } return (0); err_rx_desc: for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--) igb_dma_free(adapter, &rxr->rxdma); err_tx_desc: for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--) igb_dma_free(adapter, &txr->txdma); free(adapter->rx_rings, M_DEVBUF); rx_fail: #ifndef IGB_LEGACY_TX buf_ring_free(txr->br, M_DEVBUF); #endif free(adapter->tx_rings, M_DEVBUF); tx_fail: free(adapter->queues, M_DEVBUF); fail: return (error); } /********************************************************************* * * Allocate memory for tx_buffer structures. The tx_buffer stores all * the information needed to transmit a packet on the wire. This is * called only once at attach, setup is done every reset. * **********************************************************************/ static int igb_allocate_transmit_buffers(struct tx_ring *txr) { struct adapter *adapter = txr->adapter; device_t dev = adapter->dev; struct igb_tx_buf *txbuf; int error, i; /* * Setup DMA descriptor areas. */ if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ IGB_TSO_SIZE, /* maxsize */ IGB_MAX_SCATTER, /* nsegments */ PAGE_SIZE, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &txr->txtag))) { device_printf(dev,"Unable to allocate TX DMA tag\n"); goto fail; } if (!(txr->tx_buffers = (struct igb_tx_buf *) malloc(sizeof(struct igb_tx_buf) * adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate tx_buffer memory\n"); error = ENOMEM; goto fail; } /* Create the descriptor buffer dma maps */ txbuf = txr->tx_buffers; for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) { error = bus_dmamap_create(txr->txtag, 0, &txbuf->map); if (error != 0) { device_printf(dev, "Unable to create TX DMA map\n"); goto fail; } } return 0; fail: /* We free all, it handles case where we are in the middle */ igb_free_transmit_structures(adapter); return (error); } /********************************************************************* * * Initialize a transmit ring. * **********************************************************************/ static void igb_setup_transmit_ring(struct tx_ring *txr) { struct adapter *adapter = txr->adapter; struct igb_tx_buf *txbuf; int i; #ifdef DEV_NETMAP struct netmap_adapter *na = NA(adapter->ifp); struct netmap_slot *slot; #endif /* DEV_NETMAP */ /* Clear the old descriptor contents */ IGB_TX_LOCK(txr); #ifdef DEV_NETMAP slot = netmap_reset(na, NR_TX, txr->me, 0); #endif /* DEV_NETMAP */ bzero((void *)txr->tx_base, (sizeof(union e1000_adv_tx_desc)) * adapter->num_tx_desc); /* Reset indices */ txr->next_avail_desc = 0; txr->next_to_clean = 0; /* Free any existing tx buffers. */ txbuf = txr->tx_buffers; for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) { if (txbuf->m_head != NULL) { bus_dmamap_sync(txr->txtag, txbuf->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txr->txtag, txbuf->map); m_freem(txbuf->m_head); txbuf->m_head = NULL; } #ifdef DEV_NETMAP if (slot) { int si = netmap_idx_n2k(&na->tx_rings[txr->me], i); /* no need to set the address */ netmap_load_map(na, txr->txtag, txbuf->map, NMB(na, slot + si)); } #endif /* DEV_NETMAP */ /* clear the watch index */ txbuf->eop = NULL; } /* Set number of descriptors available */ txr->tx_avail = adapter->num_tx_desc; bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); IGB_TX_UNLOCK(txr); } /********************************************************************* * * Initialize all transmit rings. * **********************************************************************/ static void igb_setup_transmit_structures(struct adapter *adapter) { struct tx_ring *txr = adapter->tx_rings; for (int i = 0; i < adapter->num_queues; i++, txr++) igb_setup_transmit_ring(txr); return; } /********************************************************************* * * Enable transmit unit. * **********************************************************************/ static void igb_initialize_transmit_units(struct adapter *adapter) { struct tx_ring *txr = adapter->tx_rings; struct e1000_hw *hw = &adapter->hw; u32 tctl, txdctl; INIT_DEBUGOUT("igb_initialize_transmit_units: begin"); tctl = txdctl = 0; /* Setup the Tx Descriptor Rings */ for (int i = 0; i < adapter->num_queues; i++, txr++) { u64 bus_addr = txr->txdma.dma_paddr; E1000_WRITE_REG(hw, E1000_TDLEN(i), adapter->num_tx_desc * sizeof(struct e1000_tx_desc)); E1000_WRITE_REG(hw, E1000_TDBAH(i), (uint32_t)(bus_addr >> 32)); E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr); /* Setup the HW Tx Head and Tail descriptor pointers */ E1000_WRITE_REG(hw, E1000_TDT(i), 0); E1000_WRITE_REG(hw, E1000_TDH(i), 0); HW_DEBUGOUT2("Base = %x, Length = %x\n", E1000_READ_REG(hw, E1000_TDBAL(i)), E1000_READ_REG(hw, E1000_TDLEN(i))); txr->queue_status = IGB_QUEUE_IDLE; txdctl |= IGB_TX_PTHRESH; txdctl |= IGB_TX_HTHRESH << 8; txdctl |= IGB_TX_WTHRESH << 16; txdctl |= E1000_TXDCTL_QUEUE_ENABLE; E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl); } if (adapter->vf_ifp) return; e1000_config_collision_dist(hw); /* Program the Transmit Control Register */ tctl = E1000_READ_REG(hw, E1000_TCTL); tctl &= ~E1000_TCTL_CT; tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT)); /* This write will effectively turn on the transmit unit. */ E1000_WRITE_REG(hw, E1000_TCTL, tctl); } /********************************************************************* * * Free all transmit rings. * **********************************************************************/ static void igb_free_transmit_structures(struct adapter *adapter) { struct tx_ring *txr = adapter->tx_rings; for (int i = 0; i < adapter->num_queues; i++, txr++) { IGB_TX_LOCK(txr); igb_free_transmit_buffers(txr); igb_dma_free(adapter, &txr->txdma); IGB_TX_UNLOCK(txr); IGB_TX_LOCK_DESTROY(txr); } free(adapter->tx_rings, M_DEVBUF); } /********************************************************************* * * Free transmit ring related data structures. * **********************************************************************/ static void igb_free_transmit_buffers(struct tx_ring *txr) { struct adapter *adapter = txr->adapter; struct igb_tx_buf *tx_buffer; int i; INIT_DEBUGOUT("free_transmit_ring: begin"); if (txr->tx_buffers == NULL) return; tx_buffer = txr->tx_buffers; for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) { if (tx_buffer->m_head != NULL) { bus_dmamap_sync(txr->txtag, tx_buffer->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txr->txtag, tx_buffer->map); m_freem(tx_buffer->m_head); tx_buffer->m_head = NULL; if (tx_buffer->map != NULL) { bus_dmamap_destroy(txr->txtag, tx_buffer->map); tx_buffer->map = NULL; } } else if (tx_buffer->map != NULL) { bus_dmamap_unload(txr->txtag, tx_buffer->map); bus_dmamap_destroy(txr->txtag, tx_buffer->map); tx_buffer->map = NULL; } } #ifndef IGB_LEGACY_TX if (txr->br != NULL) buf_ring_free(txr->br, M_DEVBUF); #endif if (txr->tx_buffers != NULL) { free(txr->tx_buffers, M_DEVBUF); txr->tx_buffers = NULL; } if (txr->txtag != NULL) { bus_dma_tag_destroy(txr->txtag); txr->txtag = NULL; } return; } /********************************************************************** * * Setup work for hardware segmentation offload (TSO) on * adapters using advanced tx descriptors * **********************************************************************/ static int igb_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *cmd_type_len, u32 *olinfo_status) { struct adapter *adapter = txr->adapter; struct e1000_adv_tx_context_desc *TXD; u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; u32 mss_l4len_idx = 0, paylen; u16 vtag = 0, eh_type; int ctxd, ehdrlen, ip_hlen, tcp_hlen; struct ether_vlan_header *eh; #ifdef INET6 struct ip6_hdr *ip6; #endif #ifdef INET struct ip *ip; #endif struct tcphdr *th; /* * Determine where frame payload starts. * Jump over vlan headers if already present */ eh = mtod(mp, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; eh_type = eh->evl_proto; } else { ehdrlen = ETHER_HDR_LEN; eh_type = eh->evl_encap_proto; } switch (ntohs(eh_type)) { #ifdef INET6 case ETHERTYPE_IPV6: ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); /* XXX-BZ For now we do not pretend to support ext. hdrs. */ if (ip6->ip6_nxt != IPPROTO_TCP) return (ENXIO); ip_hlen = sizeof(struct ip6_hdr); ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen); th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0); type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6; break; #endif #ifdef INET case ETHERTYPE_IP: ip = (struct ip *)(mp->m_data + ehdrlen); if (ip->ip_p != IPPROTO_TCP) return (ENXIO); ip->ip_sum = 0; ip_hlen = ip->ip_hl << 2; th = (struct tcphdr *)((caddr_t)ip + ip_hlen); th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htons(IPPROTO_TCP)); type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; /* Tell transmit desc to also do IPv4 checksum. */ *olinfo_status |= E1000_TXD_POPTS_IXSM << 8; break; #endif default: panic("%s: CSUM_TSO but no supported IP version (0x%04x)", __func__, ntohs(eh_type)); break; } ctxd = txr->next_avail_desc; TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[ctxd]; tcp_hlen = th->th_off << 2; /* This is used in the transmit desc in encap */ paylen = mp->m_pkthdr.len - ehdrlen - ip_hlen - tcp_hlen; /* VLAN MACLEN IPLEN */ if (mp->m_flags & M_VLANTAG) { vtag = htole16(mp->m_pkthdr.ether_vtag); vlan_macip_lens |= (vtag << E1000_ADVTXD_VLAN_SHIFT); } vlan_macip_lens |= ehdrlen << E1000_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= ip_hlen; TXD->vlan_macip_lens = htole32(vlan_macip_lens); /* ADV DTYPE TUCMD */ type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); /* MSS L4LEN IDX */ mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << E1000_ADVTXD_MSS_SHIFT); mss_l4len_idx |= (tcp_hlen << E1000_ADVTXD_L4LEN_SHIFT); /* 82575 needs the queue index added */ if (adapter->hw.mac.type == e1000_82575) mss_l4len_idx |= txr->me << 4; TXD->mss_l4len_idx = htole32(mss_l4len_idx); TXD->seqnum_seed = htole32(0); if (++ctxd == txr->num_desc) ctxd = 0; txr->tx_avail--; txr->next_avail_desc = ctxd; *cmd_type_len |= E1000_ADVTXD_DCMD_TSE; *olinfo_status |= E1000_TXD_POPTS_TXSM << 8; *olinfo_status |= paylen << E1000_ADVTXD_PAYLEN_SHIFT; ++txr->tso_tx; return (0); } /********************************************************************* * * Advanced Context Descriptor setup for VLAN, CSUM or TSO * **********************************************************************/ static int igb_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp, u32 *cmd_type_len, u32 *olinfo_status) { struct e1000_adv_tx_context_desc *TXD; struct adapter *adapter = txr->adapter; struct ether_vlan_header *eh; struct ip *ip; struct ip6_hdr *ip6; u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0, mss_l4len_idx = 0; int ehdrlen, ip_hlen = 0; u16 etype; u8 ipproto = 0; int offload = TRUE; int ctxd = txr->next_avail_desc; u16 vtag = 0; /* First check if TSO is to be used */ if (mp->m_pkthdr.csum_flags & CSUM_TSO) return (igb_tso_setup(txr, mp, cmd_type_len, olinfo_status)); if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0) offload = FALSE; /* Indicate the whole packet as payload when not doing TSO */ *olinfo_status |= mp->m_pkthdr.len << E1000_ADVTXD_PAYLEN_SHIFT; /* Now ready a context descriptor */ TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[ctxd]; /* ** In advanced descriptors the vlan tag must ** be placed into the context descriptor. Hence ** we need to make one even if not doing offloads. */ if (mp->m_flags & M_VLANTAG) { vtag = htole16(mp->m_pkthdr.ether_vtag); vlan_macip_lens |= (vtag << E1000_ADVTXD_VLAN_SHIFT); } else if (offload == FALSE) /* ... no offload to do */ return (0); /* * Determine where frame payload starts. * Jump over vlan headers if already present, * helpful for QinQ too. */ eh = mtod(mp, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { etype = ntohs(eh->evl_proto); ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; } else { etype = ntohs(eh->evl_encap_proto); ehdrlen = ETHER_HDR_LEN; } /* Set the ether header length */ vlan_macip_lens |= ehdrlen << E1000_ADVTXD_MACLEN_SHIFT; switch (etype) { case ETHERTYPE_IP: ip = (struct ip *)(mp->m_data + ehdrlen); ip_hlen = ip->ip_hl << 2; ipproto = ip->ip_p; type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; break; case ETHERTYPE_IPV6: ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); ip_hlen = sizeof(struct ip6_hdr); /* XXX-BZ this will go badly in case of ext hdrs. */ ipproto = ip6->ip6_nxt; type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6; break; default: offload = FALSE; break; } vlan_macip_lens |= ip_hlen; type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; switch (ipproto) { case IPPROTO_TCP: if (mp->m_pkthdr.csum_flags & CSUM_TCP) type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; break; case IPPROTO_UDP: if (mp->m_pkthdr.csum_flags & CSUM_UDP) type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP; break; #if __FreeBSD_version >= 800000 case IPPROTO_SCTP: if (mp->m_pkthdr.csum_flags & CSUM_SCTP) type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP; break; #endif default: offload = FALSE; break; } if (offload) /* For the TX descriptor setup */ *olinfo_status |= E1000_TXD_POPTS_TXSM << 8; /* 82575 needs the queue index added */ if (adapter->hw.mac.type == e1000_82575) mss_l4len_idx = txr->me << 4; /* Now copy bits into descriptor */ TXD->vlan_macip_lens = htole32(vlan_macip_lens); TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); TXD->seqnum_seed = htole32(0); TXD->mss_l4len_idx = htole32(mss_l4len_idx); /* We've consumed the first desc, adjust counters */ if (++ctxd == txr->num_desc) ctxd = 0; txr->next_avail_desc = ctxd; --txr->tx_avail; return (0); } /********************************************************************** * * Examine each tx_buffer in the used queue. If the hardware is done * processing the packet then free associated resources. The * tx_buffer is put back on the free queue. * * TRUE return means there's work in the ring to clean, FALSE its empty. **********************************************************************/ static bool igb_txeof(struct tx_ring *txr) { struct adapter *adapter = txr->adapter; struct ifnet *ifp = adapter->ifp; u32 work, processed = 0; int limit = adapter->tx_process_limit; struct igb_tx_buf *buf; union e1000_adv_tx_desc *txd; mtx_assert(&txr->tx_mtx, MA_OWNED); #ifdef DEV_NETMAP if (netmap_tx_irq(ifp, txr->me)) return (FALSE); #endif /* DEV_NETMAP */ if (txr->tx_avail == txr->num_desc) { txr->queue_status = IGB_QUEUE_IDLE; return FALSE; } /* Get work starting point */ work = txr->next_to_clean; buf = &txr->tx_buffers[work]; txd = &txr->tx_base[work]; work -= txr->num_desc; /* The distance to ring end */ bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); do { union e1000_adv_tx_desc *eop = buf->eop; if (eop == NULL) /* No work */ break; if ((eop->wb.status & E1000_TXD_STAT_DD) == 0) break; /* I/O not complete */ if (buf->m_head) { txr->bytes += buf->m_head->m_pkthdr.len; bus_dmamap_sync(txr->txtag, buf->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txr->txtag, buf->map); m_freem(buf->m_head); buf->m_head = NULL; } buf->eop = NULL; ++txr->tx_avail; /* We clean the range if multi segment */ while (txd != eop) { ++txd; ++buf; ++work; /* wrap the ring? */ if (__predict_false(!work)) { work -= txr->num_desc; buf = txr->tx_buffers; txd = txr->tx_base; } if (buf->m_head) { txr->bytes += buf->m_head->m_pkthdr.len; bus_dmamap_sync(txr->txtag, buf->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txr->txtag, buf->map); m_freem(buf->m_head); buf->m_head = NULL; } ++txr->tx_avail; buf->eop = NULL; } ++txr->packets; ++processed; ++ifp->if_opackets; txr->watchdog_time = ticks; /* Try the next packet */ ++txd; ++buf; ++work; /* reset with a wrap */ if (__predict_false(!work)) { work -= txr->num_desc; buf = txr->tx_buffers; txd = txr->tx_base; } prefetch(txd); } while (__predict_true(--limit)); bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); work += txr->num_desc; txr->next_to_clean = work; /* ** Watchdog calculation, we know there's ** work outstanding or the first return ** would have been taken, so none processed ** for too long indicates a hang. */ if ((!processed) && ((ticks - txr->watchdog_time) > IGB_WATCHDOG)) txr->queue_status |= IGB_QUEUE_HUNG; if (txr->tx_avail >= IGB_QUEUE_THRESHOLD) txr->queue_status &= ~IGB_QUEUE_DEPLETED; if (txr->tx_avail == txr->num_desc) { txr->queue_status = IGB_QUEUE_IDLE; return (FALSE); } return (TRUE); } /********************************************************************* * * Refresh mbuf buffers for RX descriptor rings * - now keeps its own state so discards due to resource * exhaustion are unnecessary, if an mbuf cannot be obtained * it just returns, keeping its placeholder, thus it can simply * be recalled to try again. * **********************************************************************/ static void igb_refresh_mbufs(struct rx_ring *rxr, int limit) { struct adapter *adapter = rxr->adapter; bus_dma_segment_t hseg[1]; bus_dma_segment_t pseg[1]; struct igb_rx_buf *rxbuf; struct mbuf *mh, *mp; int i, j, nsegs, error; bool refreshed = FALSE; i = j = rxr->next_to_refresh; /* ** Get one descriptor beyond ** our work mark to control ** the loop. */ if (++j == adapter->num_rx_desc) j = 0; while (j != limit) { rxbuf = &rxr->rx_buffers[i]; /* No hdr mbuf used with header split off */ if (rxr->hdr_split == FALSE) goto no_split; if (rxbuf->m_head == NULL) { mh = m_gethdr(M_NOWAIT, MT_DATA); if (mh == NULL) goto update; } else mh = rxbuf->m_head; mh->m_pkthdr.len = mh->m_len = MHLEN; mh->m_len = MHLEN; mh->m_flags |= M_PKTHDR; /* Get the memory mapping */ error = bus_dmamap_load_mbuf_sg(rxr->htag, rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { printf("Refresh mbufs: hdr dmamap load" " failure - %d\n", error); m_free(mh); rxbuf->m_head = NULL; goto update; } rxbuf->m_head = mh; bus_dmamap_sync(rxr->htag, rxbuf->hmap, BUS_DMASYNC_PREREAD); rxr->rx_base[i].read.hdr_addr = htole64(hseg[0].ds_addr); no_split: if (rxbuf->m_pack == NULL) { mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz); if (mp == NULL) goto update; } else mp = rxbuf->m_pack; mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz; /* Get the memory mapping */ error = bus_dmamap_load_mbuf_sg(rxr->ptag, rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { printf("Refresh mbufs: payload dmamap load" " failure - %d\n", error); m_free(mp); rxbuf->m_pack = NULL; goto update; } rxbuf->m_pack = mp; bus_dmamap_sync(rxr->ptag, rxbuf->pmap, BUS_DMASYNC_PREREAD); rxr->rx_base[i].read.pkt_addr = htole64(pseg[0].ds_addr); refreshed = TRUE; /* I feel wefreshed :) */ i = j; /* our next is precalculated */ rxr->next_to_refresh = i; if (++j == adapter->num_rx_desc) j = 0; } update: if (refreshed) /* update tail */ E1000_WRITE_REG(&adapter->hw, E1000_RDT(rxr->me), rxr->next_to_refresh); return; } /********************************************************************* * * Allocate memory for rx_buffer structures. Since we use one * rx_buffer per received packet, the maximum number of rx_buffer's * that we'll need is equal to the number of receive descriptors * that we've allocated. * **********************************************************************/ static int igb_allocate_receive_buffers(struct rx_ring *rxr) { struct adapter *adapter = rxr->adapter; device_t dev = adapter->dev; struct igb_rx_buf *rxbuf; int i, bsize, error; bsize = sizeof(struct igb_rx_buf) * adapter->num_rx_desc; if (!(rxr->rx_buffers = (struct igb_rx_buf *) malloc(bsize, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate rx_buffer memory\n"); error = ENOMEM; goto fail; } if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MSIZE, /* maxsize */ 1, /* nsegments */ MSIZE, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &rxr->htag))) { device_printf(dev, "Unable to create RX DMA tag\n"); goto fail; } if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MJUM9BYTES, /* maxsize */ 1, /* nsegments */ MJUM9BYTES, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &rxr->ptag))) { device_printf(dev, "Unable to create RX payload DMA tag\n"); goto fail; } for (i = 0; i < adapter->num_rx_desc; i++) { rxbuf = &rxr->rx_buffers[i]; error = bus_dmamap_create(rxr->htag, 0, &rxbuf->hmap); if (error) { device_printf(dev, "Unable to create RX head DMA maps\n"); goto fail; } error = bus_dmamap_create(rxr->ptag, 0, &rxbuf->pmap); if (error) { device_printf(dev, "Unable to create RX packet DMA maps\n"); goto fail; } } return (0); fail: /* Frees all, but can handle partial completion */ igb_free_receive_structures(adapter); return (error); } static void igb_free_receive_ring(struct rx_ring *rxr) { struct adapter *adapter = rxr->adapter; struct igb_rx_buf *rxbuf; for (int i = 0; i < adapter->num_rx_desc; i++) { rxbuf = &rxr->rx_buffers[i]; if (rxbuf->m_head != NULL) { bus_dmamap_sync(rxr->htag, rxbuf->hmap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rxr->htag, rxbuf->hmap); rxbuf->m_head->m_flags |= M_PKTHDR; m_freem(rxbuf->m_head); } if (rxbuf->m_pack != NULL) { bus_dmamap_sync(rxr->ptag, rxbuf->pmap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rxr->ptag, rxbuf->pmap); rxbuf->m_pack->m_flags |= M_PKTHDR; m_freem(rxbuf->m_pack); } rxbuf->m_head = NULL; rxbuf->m_pack = NULL; } } /********************************************************************* * * Initialize a receive ring and its buffers. * **********************************************************************/ static int igb_setup_receive_ring(struct rx_ring *rxr) { struct adapter *adapter; struct ifnet *ifp; device_t dev; struct igb_rx_buf *rxbuf; bus_dma_segment_t pseg[1], hseg[1]; struct lro_ctrl *lro = &rxr->lro; int rsize, nsegs, error = 0; #ifdef DEV_NETMAP struct netmap_adapter *na = NA(rxr->adapter->ifp); struct netmap_slot *slot; #endif /* DEV_NETMAP */ adapter = rxr->adapter; dev = adapter->dev; ifp = adapter->ifp; /* Clear the ring contents */ IGB_RX_LOCK(rxr); #ifdef DEV_NETMAP slot = netmap_reset(na, NR_RX, rxr->me, 0); #endif /* DEV_NETMAP */ rsize = roundup2(adapter->num_rx_desc * sizeof(union e1000_adv_rx_desc), IGB_DBA_ALIGN); bzero((void *)rxr->rx_base, rsize); /* ** Free current RX buffer structures and their mbufs */ igb_free_receive_ring(rxr); /* Configure for header split? */ if (igb_header_split) rxr->hdr_split = TRUE; /* Now replenish the ring mbufs */ for (int j = 0; j < adapter->num_rx_desc; ++j) { struct mbuf *mh, *mp; rxbuf = &rxr->rx_buffers[j]; #ifdef DEV_NETMAP if (slot) { /* slot sj is mapped to the i-th NIC-ring entry */ int sj = netmap_idx_n2k(&na->rx_rings[rxr->me], j); uint64_t paddr; void *addr; addr = PNMB(na, slot + sj, &paddr); netmap_load_map(na, rxr->ptag, rxbuf->pmap, addr); /* Update descriptor */ rxr->rx_base[j].read.pkt_addr = htole64(paddr); continue; } #endif /* DEV_NETMAP */ if (rxr->hdr_split == FALSE) goto skip_head; /* First the header */ rxbuf->m_head = m_gethdr(M_NOWAIT, MT_DATA); if (rxbuf->m_head == NULL) { error = ENOBUFS; goto fail; } m_adj(rxbuf->m_head, ETHER_ALIGN); mh = rxbuf->m_head; mh->m_len = mh->m_pkthdr.len = MHLEN; mh->m_flags |= M_PKTHDR; /* Get the memory mapping */ error = bus_dmamap_load_mbuf_sg(rxr->htag, rxbuf->hmap, rxbuf->m_head, hseg, &nsegs, BUS_DMA_NOWAIT); if (error != 0) /* Nothing elegant to do here */ goto fail; bus_dmamap_sync(rxr->htag, rxbuf->hmap, BUS_DMASYNC_PREREAD); /* Update descriptor */ rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr); skip_head: /* Now the payload cluster */ rxbuf->m_pack = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, adapter->rx_mbuf_sz); if (rxbuf->m_pack == NULL) { error = ENOBUFS; goto fail; } mp = rxbuf->m_pack; mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz; /* Get the memory mapping */ error = bus_dmamap_load_mbuf_sg(rxr->ptag, rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT); if (error != 0) goto fail; bus_dmamap_sync(rxr->ptag, rxbuf->pmap, BUS_DMASYNC_PREREAD); /* Update descriptor */ rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr); } /* Setup our descriptor indices */ rxr->next_to_check = 0; rxr->next_to_refresh = adapter->num_rx_desc - 1; rxr->lro_enabled = FALSE; rxr->rx_split_packets = 0; rxr->rx_bytes = 0; rxr->fmp = NULL; rxr->lmp = NULL; bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* ** Now set up the LRO interface, we ** also only do head split when LRO ** is enabled, since so often they ** are undesireable in similar setups. */ if (ifp->if_capenable & IFCAP_LRO) { error = tcp_lro_init(lro); if (error) { device_printf(dev, "LRO Initialization failed!\n"); goto fail; } INIT_DEBUGOUT("RX LRO Initialized\n"); rxr->lro_enabled = TRUE; lro->ifp = adapter->ifp; } IGB_RX_UNLOCK(rxr); return (0); fail: igb_free_receive_ring(rxr); IGB_RX_UNLOCK(rxr); return (error); } /********************************************************************* * * Initialize all receive rings. * **********************************************************************/ static int igb_setup_receive_structures(struct adapter *adapter) { struct rx_ring *rxr = adapter->rx_rings; int i; for (i = 0; i < adapter->num_queues; i++, rxr++) if (igb_setup_receive_ring(rxr)) goto fail; return (0); fail: /* * Free RX buffers allocated so far, we will only handle * the rings that completed, the failing case will have * cleaned up for itself. 'i' is the endpoint. */ for (int j = 0; j < i; ++j) { rxr = &adapter->rx_rings[j]; IGB_RX_LOCK(rxr); igb_free_receive_ring(rxr); IGB_RX_UNLOCK(rxr); } return (ENOBUFS); } /********************************************************************* * * Enable receive unit. * **********************************************************************/ static void igb_initialize_receive_units(struct adapter *adapter) { struct rx_ring *rxr = adapter->rx_rings; struct ifnet *ifp = adapter->ifp; struct e1000_hw *hw = &adapter->hw; u32 rctl, rxcsum, psize, srrctl = 0; INIT_DEBUGOUT("igb_initialize_receive_unit: begin"); /* * Make sure receives are disabled while setting * up the descriptor ring */ rctl = E1000_READ_REG(hw, E1000_RCTL); E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); /* ** Set up for header split */ if (igb_header_split) { /* Use a standard mbuf for the header */ srrctl |= IGB_HDR_BUF << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; } else srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; /* ** Set up for jumbo frames */ if (ifp->if_mtu > ETHERMTU) { rctl |= E1000_RCTL_LPE; if (adapter->rx_mbuf_sz == MJUMPAGESIZE) { srrctl |= 4096 >> E1000_SRRCTL_BSIZEPKT_SHIFT; rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX; } else if (adapter->rx_mbuf_sz > MJUMPAGESIZE) { srrctl |= 8192 >> E1000_SRRCTL_BSIZEPKT_SHIFT; rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX; } /* Set maximum packet len */ psize = adapter->max_frame_size; /* are we on a vlan? */ if (adapter->ifp->if_vlantrunk != NULL) psize += VLAN_TAG_SIZE; E1000_WRITE_REG(&adapter->hw, E1000_RLPML, psize); } else { rctl &= ~E1000_RCTL_LPE; srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; rctl |= E1000_RCTL_SZ_2048; } /* * If TX flow control is disabled and there's >1 queue defined, * enable DROP. * * This drops frames rather than hanging the RX MAC for all queues. */ if ((adapter->num_queues > 1) && (adapter->fc == e1000_fc_none || adapter->fc == e1000_fc_rx_pause)) { srrctl |= E1000_SRRCTL_DROP_EN; } /* Setup the Base and Length of the Rx Descriptor Rings */ for (int i = 0; i < adapter->num_queues; i++, rxr++) { u64 bus_addr = rxr->rxdma.dma_paddr; u32 rxdctl; E1000_WRITE_REG(hw, E1000_RDLEN(i), adapter->num_rx_desc * sizeof(struct e1000_rx_desc)); E1000_WRITE_REG(hw, E1000_RDBAH(i), (uint32_t)(bus_addr >> 32)); E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr); E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl); /* Enable this Queue */ rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i)); rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; rxdctl &= 0xFFF00000; rxdctl |= IGB_RX_PTHRESH; rxdctl |= IGB_RX_HTHRESH << 8; rxdctl |= IGB_RX_WTHRESH << 16; E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); } /* ** Setup for RX MultiQueue */ rxcsum = E1000_READ_REG(hw, E1000_RXCSUM); if (adapter->num_queues >1) { u32 random[10], mrqc, shift = 0; union igb_reta { u32 dword; u8 bytes[4]; } reta; arc4rand(&random, sizeof(random), 0); if (adapter->hw.mac.type == e1000_82575) shift = 6; /* Warning FM follows */ for (int i = 0; i < 128; i++) { reta.bytes[i & 3] = (i % adapter->num_queues) << shift; if ((i & 3) == 3) E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword); } /* Now fill in hash table */ mrqc = E1000_MRQC_ENABLE_RSS_4Q; for (int i = 0; i < 10; i++) E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, random[i]); mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 | E1000_MRQC_RSS_FIELD_IPV4_TCP); mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 | E1000_MRQC_RSS_FIELD_IPV6_TCP); mrqc |=( E1000_MRQC_RSS_FIELD_IPV4_UDP | E1000_MRQC_RSS_FIELD_IPV6_UDP); mrqc |=( E1000_MRQC_RSS_FIELD_IPV6_UDP_EX | E1000_MRQC_RSS_FIELD_IPV6_TCP_EX); E1000_WRITE_REG(hw, E1000_MRQC, mrqc); /* ** NOTE: Receive Full-Packet Checksum Offload ** is mutually exclusive with Multiqueue. However ** this is not the same as TCP/IP checksums which ** still work. */ rxcsum |= E1000_RXCSUM_PCSD; #if __FreeBSD_version >= 800000 /* For SCTP Offload */ if (((hw->mac.type == e1000_82576) || (hw->mac.type == e1000_82580)) && (ifp->if_capenable & IFCAP_RXCSUM)) rxcsum |= E1000_RXCSUM_CRCOFL; #endif } else { /* Non RSS setup */ if (ifp->if_capenable & IFCAP_RXCSUM) { rxcsum |= E1000_RXCSUM_IPPCSE; #if __FreeBSD_version >= 800000 if ((adapter->hw.mac.type == e1000_82576) || (adapter->hw.mac.type == e1000_82580)) rxcsum |= E1000_RXCSUM_CRCOFL; #endif } else rxcsum &= ~E1000_RXCSUM_TUOFL; } E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum); /* Setup the Receive Control Register */ rctl &= ~(3 << E1000_RCTL_MO_SHIFT); rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); /* Strip CRC bytes. */ rctl |= E1000_RCTL_SECRC; /* Make sure VLAN Filters are off */ rctl &= ~E1000_RCTL_VFE; /* Don't store bad packets */ rctl &= ~E1000_RCTL_SBP; /* Enable Receives */ E1000_WRITE_REG(hw, E1000_RCTL, rctl); /* * Setup the HW Rx Head and Tail Descriptor Pointers * - needs to be after enable */ for (int i = 0; i < adapter->num_queues; i++) { rxr = &adapter->rx_rings[i]; E1000_WRITE_REG(hw, E1000_RDH(i), rxr->next_to_check); #ifdef DEV_NETMAP /* * an init() while a netmap client is active must * preserve the rx buffers passed to userspace. * In this driver it means we adjust RDT to * something different from next_to_refresh * (which is not used in netmap mode). */ if (ifp->if_capenable & IFCAP_NETMAP) { struct netmap_adapter *na = NA(adapter->ifp); struct netmap_kring *kring = &na->rx_rings[i]; int t = rxr->next_to_refresh - nm_kr_rxspace(kring); if (t >= adapter->num_rx_desc) t -= adapter->num_rx_desc; else if (t < 0) t += adapter->num_rx_desc; E1000_WRITE_REG(hw, E1000_RDT(i), t); } else #endif /* DEV_NETMAP */ E1000_WRITE_REG(hw, E1000_RDT(i), rxr->next_to_refresh); } return; } /********************************************************************* * * Free receive rings. * **********************************************************************/ static void igb_free_receive_structures(struct adapter *adapter) { struct rx_ring *rxr = adapter->rx_rings; for (int i = 0; i < adapter->num_queues; i++, rxr++) { struct lro_ctrl *lro = &rxr->lro; igb_free_receive_buffers(rxr); tcp_lro_free(lro); igb_dma_free(adapter, &rxr->rxdma); } free(adapter->rx_rings, M_DEVBUF); } /********************************************************************* * * Free receive ring data structures. * **********************************************************************/ static void igb_free_receive_buffers(struct rx_ring *rxr) { struct adapter *adapter = rxr->adapter; struct igb_rx_buf *rxbuf; int i; INIT_DEBUGOUT("free_receive_structures: begin"); /* Cleanup any existing buffers */ if (rxr->rx_buffers != NULL) { for (i = 0; i < adapter->num_rx_desc; i++) { rxbuf = &rxr->rx_buffers[i]; if (rxbuf->m_head != NULL) { bus_dmamap_sync(rxr->htag, rxbuf->hmap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rxr->htag, rxbuf->hmap); rxbuf->m_head->m_flags |= M_PKTHDR; m_freem(rxbuf->m_head); } if (rxbuf->m_pack != NULL) { bus_dmamap_sync(rxr->ptag, rxbuf->pmap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rxr->ptag, rxbuf->pmap); rxbuf->m_pack->m_flags |= M_PKTHDR; m_freem(rxbuf->m_pack); } rxbuf->m_head = NULL; rxbuf->m_pack = NULL; if (rxbuf->hmap != NULL) { bus_dmamap_destroy(rxr->htag, rxbuf->hmap); rxbuf->hmap = NULL; } if (rxbuf->pmap != NULL) { bus_dmamap_destroy(rxr->ptag, rxbuf->pmap); rxbuf->pmap = NULL; } } if (rxr->rx_buffers != NULL) { free(rxr->rx_buffers, M_DEVBUF); rxr->rx_buffers = NULL; } } if (rxr->htag != NULL) { bus_dma_tag_destroy(rxr->htag); rxr->htag = NULL; } if (rxr->ptag != NULL) { bus_dma_tag_destroy(rxr->ptag); rxr->ptag = NULL; } } static __inline void igb_rx_discard(struct rx_ring *rxr, int i) { struct igb_rx_buf *rbuf; rbuf = &rxr->rx_buffers[i]; /* Partially received? Free the chain */ if (rxr->fmp != NULL) { rxr->fmp->m_flags |= M_PKTHDR; m_freem(rxr->fmp); rxr->fmp = NULL; rxr->lmp = NULL; } /* ** With advanced descriptors the writeback ** clobbers the buffer addrs, so its easier ** to just free the existing mbufs and take ** the normal refresh path to get new buffers ** and mapping. */ if (rbuf->m_head) { m_free(rbuf->m_head); rbuf->m_head = NULL; bus_dmamap_unload(rxr->htag, rbuf->hmap); } if (rbuf->m_pack) { m_free(rbuf->m_pack); rbuf->m_pack = NULL; bus_dmamap_unload(rxr->ptag, rbuf->pmap); } return; } static __inline void igb_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype) { /* * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet * should be computed by hardware. Also it should not have VLAN tag in * ethernet header. */ if (rxr->lro_enabled && (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && (ptype & E1000_RXDADV_PKTTYPE_ETQF) == 0 && (ptype & (E1000_RXDADV_PKTTYPE_IPV4 | E1000_RXDADV_PKTTYPE_TCP)) == (E1000_RXDADV_PKTTYPE_IPV4 | E1000_RXDADV_PKTTYPE_TCP) && (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) == (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) { /* * Send to the stack if: ** - LRO not enabled, or ** - no LRO resources, or ** - lro enqueue fails */ if (rxr->lro.lro_cnt != 0) if (tcp_lro_rx(&rxr->lro, m, 0) == 0) return; } IGB_RX_UNLOCK(rxr); (*ifp->if_input)(ifp, m); IGB_RX_LOCK(rxr); } /********************************************************************* * * This routine executes in interrupt context. It replenishes * the mbufs in the descriptor and sends data which has been * dma'ed into host memory to upper layer. * * We loop at most count times if count is > 0, or until done if * count < 0. * * Return TRUE if more to clean, FALSE otherwise *********************************************************************/ static bool igb_rxeof(struct igb_queue *que, int count, int *done) { struct adapter *adapter = que->adapter; struct rx_ring *rxr = que->rxr; struct ifnet *ifp = adapter->ifp; struct lro_ctrl *lro = &rxr->lro; struct lro_entry *queued; int i, processed = 0, rxdone = 0; u32 ptype, staterr = 0; union e1000_adv_rx_desc *cur; IGB_RX_LOCK(rxr); /* Sync the ring. */ bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); #ifdef DEV_NETMAP if (netmap_rx_irq(ifp, rxr->me, &processed)) { IGB_RX_UNLOCK(rxr); return (FALSE); } #endif /* DEV_NETMAP */ /* Main clean loop */ for (i = rxr->next_to_check; count != 0;) { struct mbuf *sendmp, *mh, *mp; struct igb_rx_buf *rxbuf; u16 hlen, plen, hdr, vtag; bool eop = FALSE; cur = &rxr->rx_base[i]; staterr = le32toh(cur->wb.upper.status_error); if ((staterr & E1000_RXD_STAT_DD) == 0) break; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) break; count--; sendmp = mh = mp = NULL; cur->wb.upper.status_error = 0; rxbuf = &rxr->rx_buffers[i]; plen = le16toh(cur->wb.upper.length); ptype = le32toh(cur->wb.lower.lo_dword.data) & IGB_PKTTYPE_MASK; if (((adapter->hw.mac.type == e1000_i350) || (adapter->hw.mac.type == e1000_i354)) && (staterr & E1000_RXDEXT_STATERR_LB)) vtag = be16toh(cur->wb.upper.vlan); else vtag = le16toh(cur->wb.upper.vlan); hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info); eop = ((staterr & E1000_RXD_STAT_EOP) == E1000_RXD_STAT_EOP); /* * Free the frame (all segments) if we're at EOP and * it's an error. * * The datasheet states that EOP + status is only valid for * the final segment in a multi-segment frame. */ if (eop && ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) != 0)) { adapter->dropped_pkts++; ++rxr->rx_discarded; igb_rx_discard(rxr, i); goto next_desc; } /* ** The way the hardware is configured to ** split, it will ONLY use the header buffer ** when header split is enabled, otherwise we ** get normal behavior, ie, both header and ** payload are DMA'd into the payload buffer. ** ** The fmp test is to catch the case where a ** packet spans multiple descriptors, in that ** case only the first header is valid. */ if (rxr->hdr_split && rxr->fmp == NULL) { bus_dmamap_unload(rxr->htag, rxbuf->hmap); hlen = (hdr & E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; if (hlen > IGB_HDR_BUF) hlen = IGB_HDR_BUF; mh = rxr->rx_buffers[i].m_head; mh->m_len = hlen; /* clear buf pointer for refresh */ rxbuf->m_head = NULL; /* ** Get the payload length, this ** could be zero if its a small ** packet. */ if (plen > 0) { mp = rxr->rx_buffers[i].m_pack; mp->m_len = plen; mh->m_next = mp; /* clear buf pointer */ rxbuf->m_pack = NULL; rxr->rx_split_packets++; } } else { /* ** Either no header split, or a ** secondary piece of a fragmented ** split packet. */ mh = rxr->rx_buffers[i].m_pack; mh->m_len = plen; /* clear buf info for refresh */ rxbuf->m_pack = NULL; } bus_dmamap_unload(rxr->ptag, rxbuf->pmap); ++processed; /* So we know when to refresh */ /* Initial frame - setup */ if (rxr->fmp == NULL) { mh->m_pkthdr.len = mh->m_len; /* Save the head of the chain */ rxr->fmp = mh; rxr->lmp = mh; if (mp != NULL) { /* Add payload if split */ mh->m_pkthdr.len += mp->m_len; rxr->lmp = mh->m_next; } } else { /* Chain mbuf's together */ rxr->lmp->m_next = mh; rxr->lmp = rxr->lmp->m_next; rxr->fmp->m_pkthdr.len += mh->m_len; } if (eop) { rxr->fmp->m_pkthdr.rcvif = ifp; ifp->if_ipackets++; rxr->rx_packets++; /* capture data for AIM */ rxr->packets++; rxr->bytes += rxr->fmp->m_pkthdr.len; rxr->rx_bytes += rxr->fmp->m_pkthdr.len; if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) igb_rx_checksum(staterr, rxr->fmp, ptype); if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && (staterr & E1000_RXD_STAT_VP) != 0) { rxr->fmp->m_pkthdr.ether_vtag = vtag; rxr->fmp->m_flags |= M_VLANTAG; } /* * In case of multiqueue, we have RXCSUM.PCSD bit set * and never cleared. This means we have RSS hash * available to be used. */ if (adapter->num_queues > 1) { rxr->fmp->m_pkthdr.flowid = le32toh(cur->wb.lower.hi_dword.rss); /* * Full RSS support is not avilable in * FreeBSD 10 so setting the hash type to * OPAQUE. */ M_HASHTYPE_SET(rxr->fmp, M_HASHTYPE_OPAQUE); } else { #ifndef IGB_LEGACY_TX rxr->fmp->m_pkthdr.flowid = que->msix; M_HASHTYPE_SET(rxr->fmp, M_HASHTYPE_OPAQUE); #endif } sendmp = rxr->fmp; /* Make sure to set M_PKTHDR. */ sendmp->m_flags |= M_PKTHDR; rxr->fmp = NULL; rxr->lmp = NULL; } next_desc: bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Advance our pointers to the next descriptor. */ if (++i == adapter->num_rx_desc) i = 0; /* ** Send to the stack or LRO */ if (sendmp != NULL) { rxr->next_to_check = i; igb_rx_input(rxr, ifp, sendmp, ptype); i = rxr->next_to_check; rxdone++; } /* Every 8 descriptors we go to refresh mbufs */ if (processed == 8) { igb_refresh_mbufs(rxr, i); processed = 0; } } /* Catch any remainders */ if (igb_rx_unrefreshed(rxr)) igb_refresh_mbufs(rxr, i); rxr->next_to_check = i; /* * Flush any outstanding LRO work */ while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) { SLIST_REMOVE_HEAD(&lro->lro_active, next); tcp_lro_flush(lro, queued); } if (done != NULL) *done += rxdone; IGB_RX_UNLOCK(rxr); return ((staterr & E1000_RXD_STAT_DD) ? TRUE : FALSE); } /********************************************************************* * * Verify that the hardware indicated that the checksum is valid. * Inform the stack about the status of checksum so that stack * doesn't spend time verifying the checksum. * *********************************************************************/ static void igb_rx_checksum(u32 staterr, struct mbuf *mp, u32 ptype) { u16 status = (u16)staterr; u8 errors = (u8) (staterr >> 24); int sctp; /* Ignore Checksum bit is set */ if (status & E1000_RXD_STAT_IXSM) { mp->m_pkthdr.csum_flags = 0; return; } if ((ptype & E1000_RXDADV_PKTTYPE_ETQF) == 0 && (ptype & E1000_RXDADV_PKTTYPE_SCTP) != 0) sctp = 1; else sctp = 0; if (status & E1000_RXD_STAT_IPCS) { /* Did it pass? */ if (!(errors & E1000_RXD_ERR_IPE)) { /* IP Checksum Good */ mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED; mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; } else mp->m_pkthdr.csum_flags = 0; } if (status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) { u64 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); #if __FreeBSD_version >= 800000 if (sctp) /* reassign */ type = CSUM_SCTP_VALID; #endif /* Did it pass? */ if (!(errors & E1000_RXD_ERR_TCPE)) { mp->m_pkthdr.csum_flags |= type; if (sctp == 0) mp->m_pkthdr.csum_data = htons(0xffff); } } return; } /* * This routine is run via an vlan * config EVENT */ static void igb_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) { struct adapter *adapter = ifp->if_softc; u32 index, bit; if (ifp->if_softc != arg) /* Not our event */ return; if ((vtag == 0) || (vtag > 4095)) /* Invalid */ return; IGB_CORE_LOCK(adapter); index = (vtag >> 5) & 0x7F; bit = vtag & 0x1F; adapter->shadow_vfta[index] |= (1 << bit); ++adapter->num_vlans; /* Change hw filter setting */ if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) igb_setup_vlan_hw_support(adapter); IGB_CORE_UNLOCK(adapter); } /* * This routine is run via an vlan * unconfig EVENT */ static void igb_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) { struct adapter *adapter = ifp->if_softc; u32 index, bit; if (ifp->if_softc != arg) return; if ((vtag == 0) || (vtag > 4095)) /* Invalid */ return; IGB_CORE_LOCK(adapter); index = (vtag >> 5) & 0x7F; bit = vtag & 0x1F; adapter->shadow_vfta[index] &= ~(1 << bit); --adapter->num_vlans; /* Change hw filter setting */ if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) igb_setup_vlan_hw_support(adapter); IGB_CORE_UNLOCK(adapter); } static void igb_setup_vlan_hw_support(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct ifnet *ifp = adapter->ifp; u32 reg; if (adapter->vf_ifp) { e1000_rlpml_set_vf(hw, adapter->max_frame_size + VLAN_TAG_SIZE); return; } reg = E1000_READ_REG(hw, E1000_CTRL); reg |= E1000_CTRL_VME; E1000_WRITE_REG(hw, E1000_CTRL, reg); /* Enable the Filter Table */ if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { reg = E1000_READ_REG(hw, E1000_RCTL); reg &= ~E1000_RCTL_CFIEN; reg |= E1000_RCTL_VFE; E1000_WRITE_REG(hw, E1000_RCTL, reg); } /* Update the frame size */ E1000_WRITE_REG(&adapter->hw, E1000_RLPML, adapter->max_frame_size + VLAN_TAG_SIZE); /* Don't bother with table if no vlans */ if ((adapter->num_vlans == 0) || ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)) return; /* ** A soft reset zero's out the VFTA, so ** we need to repopulate it now. */ for (int i = 0; i < IGB_VFTA_SIZE; i++) if (adapter->shadow_vfta[i] != 0) { if (adapter->vf_ifp) e1000_vfta_set_vf(hw, adapter->shadow_vfta[i], TRUE); else e1000_write_vfta(hw, i, adapter->shadow_vfta[i]); } } static void igb_enable_intr(struct adapter *adapter) { /* With RSS set up what to auto clear */ if (adapter->msix_mem) { u32 mask = (adapter->que_mask | adapter->link_mask); E1000_WRITE_REG(&adapter->hw, E1000_EIAC, mask); E1000_WRITE_REG(&adapter->hw, E1000_EIAM, mask); E1000_WRITE_REG(&adapter->hw, E1000_EIMS, mask); E1000_WRITE_REG(&adapter->hw, E1000_IMS, E1000_IMS_LSC); } else { E1000_WRITE_REG(&adapter->hw, E1000_IMS, IMS_ENABLE_MASK); } E1000_WRITE_FLUSH(&adapter->hw); return; } static void igb_disable_intr(struct adapter *adapter) { if (adapter->msix_mem) { E1000_WRITE_REG(&adapter->hw, E1000_EIMC, ~0); E1000_WRITE_REG(&adapter->hw, E1000_EIAC, 0); } E1000_WRITE_REG(&adapter->hw, E1000_IMC, ~0); E1000_WRITE_FLUSH(&adapter->hw); return; } /* * Bit of a misnomer, what this really means is * to enable OS management of the system... aka * to disable special hardware management features */ static void igb_init_manageability(struct adapter *adapter) { if (adapter->has_manage) { int manc2h = E1000_READ_REG(&adapter->hw, E1000_MANC2H); int manc = E1000_READ_REG(&adapter->hw, E1000_MANC); /* disable hardware interception of ARP */ manc &= ~(E1000_MANC_ARP_EN); /* enable receiving management packets to the host */ manc |= E1000_MANC_EN_MNG2HOST; manc2h |= 1 << 5; /* Mng Port 623 */ manc2h |= 1 << 6; /* Mng Port 664 */ E1000_WRITE_REG(&adapter->hw, E1000_MANC2H, manc2h); E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc); } } /* * Give control back to hardware management * controller if there is one. */ static void igb_release_manageability(struct adapter *adapter) { if (adapter->has_manage) { int manc = E1000_READ_REG(&adapter->hw, E1000_MANC); /* re-enable hardware interception of ARP */ manc |= E1000_MANC_ARP_EN; manc &= ~E1000_MANC_EN_MNG2HOST; E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc); } } /* * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit. * For ASF and Pass Through versions of f/w this means that * the driver is loaded. * */ static void igb_get_hw_control(struct adapter *adapter) { u32 ctrl_ext; if (adapter->vf_ifp) return; /* Let firmware know the driver has taken over */ ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); } /* * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit. * For ASF and Pass Through versions of f/w this means that the * driver is no longer loaded. * */ static void igb_release_hw_control(struct adapter *adapter) { u32 ctrl_ext; if (adapter->vf_ifp) return; /* Let firmware taken over control of h/w */ ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); } static int igb_is_valid_ether_addr(uint8_t *addr) { char zero_addr[6] = { 0, 0, 0, 0, 0, 0 }; if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) { return (FALSE); } return (TRUE); } /* * Enable PCI Wake On Lan capability */ static void igb_enable_wakeup(device_t dev) { u16 cap, status; u8 id; /* First find the capabilities pointer*/ cap = pci_read_config(dev, PCIR_CAP_PTR, 2); /* Read the PM Capabilities */ id = pci_read_config(dev, cap, 1); if (id != PCIY_PMG) /* Something wrong */ return; /* OK, we have the power capabilities, so now get the status register */ cap += PCIR_POWER_STATUS; status = pci_read_config(dev, cap, 2); status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; pci_write_config(dev, cap, status, 2); return; } static void igb_led_func(void *arg, int onoff) { struct adapter *adapter = arg; IGB_CORE_LOCK(adapter); if (onoff) { e1000_setup_led(&adapter->hw); e1000_led_on(&adapter->hw); } else { e1000_led_off(&adapter->hw); e1000_cleanup_led(&adapter->hw); } IGB_CORE_UNLOCK(adapter); } /********************************************************************** * * Update the board statistics counters. * **********************************************************************/ static void igb_update_stats_counters(struct adapter *adapter) { struct ifnet *ifp; struct e1000_hw *hw = &adapter->hw; struct e1000_hw_stats *stats; /* ** The virtual function adapter has only a ** small controlled set of stats, do only ** those and return. */ if (adapter->vf_ifp) { igb_update_vf_stats_counters(adapter); return; } stats = (struct e1000_hw_stats *)adapter->stats; - if(adapter->hw.phy.media_type == e1000_media_type_copper || + if (adapter->hw.phy.media_type == e1000_media_type_copper || (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { stats->symerrs += E1000_READ_REG(hw,E1000_SYMERRS); stats->sec += E1000_READ_REG(hw, E1000_SEC); } stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS); stats->mpc += E1000_READ_REG(hw, E1000_MPC); stats->scc += E1000_READ_REG(hw, E1000_SCC); stats->ecol += E1000_READ_REG(hw, E1000_ECOL); stats->mcc += E1000_READ_REG(hw, E1000_MCC); stats->latecol += E1000_READ_REG(hw, E1000_LATECOL); stats->colc += E1000_READ_REG(hw, E1000_COLC); stats->dc += E1000_READ_REG(hw, E1000_DC); stats->rlec += E1000_READ_REG(hw, E1000_RLEC); stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC); stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC); /* ** For watchdog management we need to know if we have been ** paused during the last interval, so capture that here. */ adapter->pause_frames = E1000_READ_REG(&adapter->hw, E1000_XOFFRXC); stats->xoffrxc += adapter->pause_frames; stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC); stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC); stats->prc64 += E1000_READ_REG(hw, E1000_PRC64); stats->prc127 += E1000_READ_REG(hw, E1000_PRC127); stats->prc255 += E1000_READ_REG(hw, E1000_PRC255); stats->prc511 += E1000_READ_REG(hw, E1000_PRC511); stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023); stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522); stats->gprc += E1000_READ_REG(hw, E1000_GPRC); stats->bprc += E1000_READ_REG(hw, E1000_BPRC); stats->mprc += E1000_READ_REG(hw, E1000_MPRC); stats->gptc += E1000_READ_REG(hw, E1000_GPTC); /* For the 64-bit byte counters the low dword must be read first. */ /* Both registers clear on the read of the high dword */ stats->gorc += E1000_READ_REG(hw, E1000_GORCL) + ((u64)E1000_READ_REG(hw, E1000_GORCH) << 32); stats->gotc += E1000_READ_REG(hw, E1000_GOTCL) + ((u64)E1000_READ_REG(hw, E1000_GOTCH) << 32); stats->rnbc += E1000_READ_REG(hw, E1000_RNBC); stats->ruc += E1000_READ_REG(hw, E1000_RUC); stats->rfc += E1000_READ_REG(hw, E1000_RFC); stats->roc += E1000_READ_REG(hw, E1000_ROC); stats->rjc += E1000_READ_REG(hw, E1000_RJC); stats->mgprc += E1000_READ_REG(hw, E1000_MGTPRC); stats->mgpdc += E1000_READ_REG(hw, E1000_MGTPDC); stats->mgptc += E1000_READ_REG(hw, E1000_MGTPTC); stats->tor += E1000_READ_REG(hw, E1000_TORL) + ((u64)E1000_READ_REG(hw, E1000_TORH) << 32); stats->tot += E1000_READ_REG(hw, E1000_TOTL) + ((u64)E1000_READ_REG(hw, E1000_TOTH) << 32); stats->tpr += E1000_READ_REG(hw, E1000_TPR); stats->tpt += E1000_READ_REG(hw, E1000_TPT); stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64); stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127); stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255); stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511); stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023); stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522); stats->mptc += E1000_READ_REG(hw, E1000_MPTC); stats->bptc += E1000_READ_REG(hw, E1000_BPTC); /* Interrupt Counts */ stats->iac += E1000_READ_REG(hw, E1000_IAC); stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC); stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC); stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC); stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC); stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC); stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC); stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC); stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC); /* Host to Card Statistics */ stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC); stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC); stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC); stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC); stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC); stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC); stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC); stats->hgorc += (E1000_READ_REG(hw, E1000_HGORCL) + ((u64)E1000_READ_REG(hw, E1000_HGORCH) << 32)); stats->hgotc += (E1000_READ_REG(hw, E1000_HGOTCL) + ((u64)E1000_READ_REG(hw, E1000_HGOTCH) << 32)); stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS); stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC); stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC); stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC); stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC); stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS); stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR); stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC); stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC); ifp = adapter->ifp; ifp->if_collisions = stats->colc; /* Rx Errors */ ifp->if_ierrors = adapter->dropped_pkts + stats->rxerrc + stats->crcerrs + stats->algnerrc + stats->ruc + stats->roc + stats->mpc + stats->cexterr; /* Tx Errors */ ifp->if_oerrors = stats->ecol + stats->latecol + adapter->watchdog_events; /* Driver specific counters */ adapter->device_control = E1000_READ_REG(hw, E1000_CTRL); adapter->rx_control = E1000_READ_REG(hw, E1000_RCTL); adapter->int_mask = E1000_READ_REG(hw, E1000_IMS); adapter->eint_mask = E1000_READ_REG(hw, E1000_EIMS); adapter->packet_buf_alloc_tx = ((E1000_READ_REG(hw, E1000_PBA) & 0xffff0000) >> 16); adapter->packet_buf_alloc_rx = (E1000_READ_REG(hw, E1000_PBA) & 0xffff); } /********************************************************************** * * Initialize the VF board statistics counters. * **********************************************************************/ static void igb_vf_init_stats(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct e1000_vf_stats *stats; stats = (struct e1000_vf_stats *)adapter->stats; if (stats == NULL) return; stats->last_gprc = E1000_READ_REG(hw, E1000_VFGPRC); stats->last_gorc = E1000_READ_REG(hw, E1000_VFGORC); stats->last_gptc = E1000_READ_REG(hw, E1000_VFGPTC); stats->last_gotc = E1000_READ_REG(hw, E1000_VFGOTC); stats->last_mprc = E1000_READ_REG(hw, E1000_VFMPRC); } /********************************************************************** * * Update the VF board statistics counters. * **********************************************************************/ static void igb_update_vf_stats_counters(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct e1000_vf_stats *stats; if (adapter->link_speed == 0) return; stats = (struct e1000_vf_stats *)adapter->stats; UPDATE_VF_REG(E1000_VFGPRC, stats->last_gprc, stats->gprc); UPDATE_VF_REG(E1000_VFGORC, stats->last_gorc, stats->gorc); UPDATE_VF_REG(E1000_VFGPTC, stats->last_gptc, stats->gptc); UPDATE_VF_REG(E1000_VFGOTC, stats->last_gotc, stats->gotc); UPDATE_VF_REG(E1000_VFMPRC, stats->last_mprc, stats->mprc); } /* Export a single 32-bit register via a read-only sysctl. */ static int igb_sysctl_reg_handler(SYSCTL_HANDLER_ARGS) { struct adapter *adapter; u_int val; adapter = oidp->oid_arg1; val = E1000_READ_REG(&adapter->hw, oidp->oid_arg2); return (sysctl_handle_int(oidp, &val, 0, req)); } /* ** Tuneable interrupt rate handler */ static int igb_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS) { struct igb_queue *que = ((struct igb_queue *)oidp->oid_arg1); int error; u32 reg, usec, rate; reg = E1000_READ_REG(&que->adapter->hw, E1000_EITR(que->msix)); usec = ((reg & 0x7FFC) >> 2); if (usec > 0) rate = 1000000 / usec; else rate = 0; error = sysctl_handle_int(oidp, &rate, 0, req); if (error || !req->newptr) return error; return 0; } /* * Add sysctl variables, one per statistic, to the system. */ static void igb_add_hw_stats(struct adapter *adapter) { device_t dev = adapter->dev; struct tx_ring *txr = adapter->tx_rings; struct rx_ring *rxr = adapter->rx_rings; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); struct sysctl_oid *tree = device_get_sysctl_tree(dev); struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); struct e1000_hw_stats *stats = adapter->stats; struct sysctl_oid *stat_node, *queue_node, *int_node, *host_node; struct sysctl_oid_list *stat_list, *queue_list, *int_list, *host_list; #define QUEUE_NAME_LEN 32 char namebuf[QUEUE_NAME_LEN]; /* Driver Statistics */ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", CTLFLAG_RD, &adapter->dropped_pkts, "Driver dropped packets"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq", CTLFLAG_RD, &adapter->link_irq, "Link MSIX IRQ Handled"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_fail", CTLFLAG_RD, &adapter->mbuf_defrag_failed, "Defragmenting mbuf chain failed"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_dma_fail", CTLFLAG_RD, &adapter->no_tx_dma_setup, "Driver tx dma failure in xmit"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns", CTLFLAG_RD, &adapter->rx_overruns, "RX overruns"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts", CTLFLAG_RD, &adapter->watchdog_events, "Watchdog timeouts"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "device_control", CTLFLAG_RD, &adapter->device_control, "Device Control Register"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_control", CTLFLAG_RD, &adapter->rx_control, "Receiver Control Register"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "interrupt_mask", CTLFLAG_RD, &adapter->int_mask, "Interrupt Mask"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "extended_int_mask", CTLFLAG_RD, &adapter->eint_mask, "Extended Interrupt Mask"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_buf_alloc", CTLFLAG_RD, &adapter->packet_buf_alloc_tx, "Transmit Buffer Packet Allocation"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_buf_alloc", CTLFLAG_RD, &adapter->packet_buf_alloc_rx, "Receive Buffer Packet Allocation"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water", CTLFLAG_RD, &adapter->hw.fc.high_water, 0, "Flow Control High Watermark"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water", CTLFLAG_RD, &adapter->hw.fc.low_water, 0, "Flow Control Low Watermark"); for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) { struct lro_ctrl *lro = &rxr->lro; snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "Queue Name"); queue_list = SYSCTL_CHILDREN(queue_node); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate", CTLTYPE_UINT | CTLFLAG_RD, &adapter->queues[i], sizeof(&adapter->queues[i]), igb_sysctl_interrupt_rate_handler, "IU", "Interrupt Rate"); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDH(txr->me), igb_sysctl_reg_handler, "IU", "Transmit Descriptor Head"); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_TDT(txr->me), igb_sysctl_reg_handler, "IU", "Transmit Descriptor Tail"); SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail", CTLFLAG_RD, &txr->no_desc_avail, "Queue No Descriptor Available"); SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", CTLFLAG_RD, &txr->total_packets, "Queue Packets Transmitted"); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDH(rxr->me), igb_sysctl_reg_handler, "IU", "Receive Descriptor Head"); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", CTLTYPE_UINT | CTLFLAG_RD, adapter, E1000_RDT(rxr->me), igb_sysctl_reg_handler, "IU", "Receive Descriptor Tail"); SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_packets", CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received"); SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes", CTLFLAG_RD, &rxr->rx_bytes, "Queue Bytes Received"); SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "lro_queued", CTLFLAG_RD, &lro->lro_queued, 0, "LRO Queued"); SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "lro_flushed", CTLFLAG_RD, &lro->lro_flushed, 0, "LRO Flushed"); } /* MAC stats get their own sub node */ stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", CTLFLAG_RD, NULL, "MAC Statistics"); stat_list = SYSCTL_CHILDREN(stat_node); /* ** VF adapter has a very limited set of stats ** since its not managing the metal, so to speak. */ if (adapter->vf_ifp) { SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd", CTLFLAG_RD, &stats->gprc, "Good Packets Received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd", CTLFLAG_RD, &stats->gorc, "Good Octets Received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd", CTLFLAG_RD, &stats->mprc, "Multicast Packets Received"); return; } SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "excess_coll", CTLFLAG_RD, &stats->ecol, "Excessive collisions"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "single_coll", CTLFLAG_RD, &stats->scc, "Single collisions"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "multiple_coll", CTLFLAG_RD, &stats->mcc, "Multiple collisions"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "late_coll", CTLFLAG_RD, &stats->latecol, "Late collisions"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "collision_count", CTLFLAG_RD, &stats->colc, "Collision Count"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "symbol_errors", CTLFLAG_RD, &stats->symerrs, "Symbol Errors"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "sequence_errors", CTLFLAG_RD, &stats->sec, "Sequence Errors"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "defer_count", CTLFLAG_RD, &stats->dc, "Defer Count"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "missed_packets", CTLFLAG_RD, &stats->mpc, "Missed Packets"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_length_errors", CTLFLAG_RD, &stats->rlec, "Receive Length Errors"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_no_buff", CTLFLAG_RD, &stats->rnbc, "Receive No Buffers"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_undersize", CTLFLAG_RD, &stats->ruc, "Receive Undersize"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_fragmented", CTLFLAG_RD, &stats->rfc, "Fragmented Packets Received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_oversize", CTLFLAG_RD, &stats->roc, "Oversized Packets Received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_jabber", CTLFLAG_RD, &stats->rjc, "Recevied Jabber"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_errs", CTLFLAG_RD, &stats->rxerrc, "Receive Errors"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "crc_errs", CTLFLAG_RD, &stats->crcerrs, "CRC errors"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "alignment_errs", CTLFLAG_RD, &stats->algnerrc, "Alignment Errors"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_no_crs", CTLFLAG_RD, &stats->tncrs, "Transmit with No CRS"); /* On 82575 these are collision counts */ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs", CTLFLAG_RD, &stats->cexterr, "Collision/Carrier extension errors"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xon_recvd", CTLFLAG_RD, &stats->xonrxc, "XON Received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xon_txd", CTLFLAG_RD, &stats->xontxc, "XON Transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xoff_recvd", CTLFLAG_RD, &stats->xoffrxc, "XOFF Received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xoff_txd", CTLFLAG_RD, &stats->xofftxc, "XOFF Transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "unsupported_fc_recvd", CTLFLAG_RD, &stats->fcruc, "Unsupported Flow Control Received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mgmt_pkts_recvd", CTLFLAG_RD, &stats->mgprc, "Management Packets Received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mgmt_pkts_drop", CTLFLAG_RD, &stats->mgpdc, "Management Packets Dropped"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mgmt_pkts_txd", CTLFLAG_RD, &stats->mgptc, "Management Packets Transmitted"); /* Packet Reception Stats */ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd", CTLFLAG_RD, &stats->tpr, "Total Packets Received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd", CTLFLAG_RD, &stats->gprc, "Good Packets Received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd", CTLFLAG_RD, &stats->bprc, "Broadcast Packets Received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd", CTLFLAG_RD, &stats->mprc, "Multicast Packets Received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_64", CTLFLAG_RD, &stats->prc64, "64 byte frames received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127", CTLFLAG_RD, &stats->prc127, "65-127 byte frames received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255", CTLFLAG_RD, &stats->prc255, "128-255 byte frames received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511", CTLFLAG_RD, &stats->prc511, "256-511 byte frames received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023", CTLFLAG_RD, &stats->prc1023, "512-1023 byte frames received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522", CTLFLAG_RD, &stats->prc1522, "1023-1522 byte frames received"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd", CTLFLAG_RD, &stats->gorc, - "Good Octets Received"); - SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "total_octets_recvd", - CTLFLAG_RD, &stats->tor, - "Total Octets Received"); + "Good Octets Received"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "total_octets_recvd", + CTLFLAG_RD, &stats->tor, + "Total Octets Received"); /* Packet Transmission Stats */ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", CTLFLAG_RD, &stats->gotc, "Good Octets Transmitted"); - SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "total_octets_txd", - CTLFLAG_RD, &stats->tot, - "Total Octets Transmitted"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "total_octets_txd", + CTLFLAG_RD, &stats->tot, + "Total Octets Transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd", CTLFLAG_RD, &stats->tpt, "Total Packets Transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", CTLFLAG_RD, &stats->gptc, "Good Packets Transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd", CTLFLAG_RD, &stats->bptc, "Broadcast Packets Transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd", CTLFLAG_RD, &stats->mptc, "Multicast Packets Transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_64", CTLFLAG_RD, &stats->ptc64, "64 byte frames transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127", CTLFLAG_RD, &stats->ptc127, "65-127 byte frames transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255", CTLFLAG_RD, &stats->ptc255, "128-255 byte frames transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511", CTLFLAG_RD, &stats->ptc511, "256-511 byte frames transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023", CTLFLAG_RD, &stats->ptc1023, "512-1023 byte frames transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522", CTLFLAG_RD, &stats->ptc1522, "1024-1522 byte frames transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tso_txd", CTLFLAG_RD, &stats->tsctc, "TSO Contexts Transmitted"); SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail", CTLFLAG_RD, &stats->tsctfc, "TSO Contexts Failed"); /* Interrupt Stats */ int_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "interrupts", CTLFLAG_RD, NULL, "Interrupt Statistics"); int_list = SYSCTL_CHILDREN(int_node); SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "asserts", CTLFLAG_RD, &stats->iac, "Interrupt Assertion Count"); SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_pkt_timer", CTLFLAG_RD, &stats->icrxptc, "Interrupt Cause Rx Pkt Timer Expire Count"); SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_abs_timer", CTLFLAG_RD, &stats->icrxatc, "Interrupt Cause Rx Abs Timer Expire Count"); SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_pkt_timer", CTLFLAG_RD, &stats->ictxptc, "Interrupt Cause Tx Pkt Timer Expire Count"); SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_abs_timer", CTLFLAG_RD, &stats->ictxatc, "Interrupt Cause Tx Abs Timer Expire Count"); SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_queue_empty", CTLFLAG_RD, &stats->ictxqec, "Interrupt Cause Tx Queue Empty Count"); SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_queue_min_thresh", CTLFLAG_RD, &stats->ictxqmtc, "Interrupt Cause Tx Queue Min Thresh Count"); SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_desc_min_thresh", CTLFLAG_RD, &stats->icrxdmtc, "Interrupt Cause Rx Desc Min Thresh Count"); SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_overrun", CTLFLAG_RD, &stats->icrxoc, "Interrupt Cause Receiver Overrun Count"); /* Host to Card Stats */ host_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "host", CTLFLAG_RD, NULL, "Host to Card Statistics"); host_list = SYSCTL_CHILDREN(host_node); SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_tx_pkt", CTLFLAG_RD, &stats->cbtmpc, "Circuit Breaker Tx Packet Count"); SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "host_tx_pkt_discard", CTLFLAG_RD, &stats->htdpmc, "Host Transmit Discarded Packets"); SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "rx_pkt", CTLFLAG_RD, &stats->rpthc, "Rx Packets To Host"); SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_rx_pkts", CTLFLAG_RD, &stats->cbrmpc, "Circuit Breaker Rx Packet Count"); SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_rx_pkt_drop", CTLFLAG_RD, &stats->cbrdpc, "Circuit Breaker Rx Dropped Count"); SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "tx_good_pkt", CTLFLAG_RD, &stats->hgptc, "Host Good Packets Tx Count"); SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_tx_pkt_drop", CTLFLAG_RD, &stats->htcbdpc, "Host Tx Circuit Breaker Dropped Count"); SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "rx_good_bytes", CTLFLAG_RD, &stats->hgorc, "Host Good Octets Received Count"); SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "tx_good_bytes", CTLFLAG_RD, &stats->hgotc, "Host Good Octets Transmit Count"); SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "length_errors", CTLFLAG_RD, &stats->lenerrs, "Length Errors"); SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "serdes_violation_pkt", CTLFLAG_RD, &stats->scvpc, "SerDes/SGMII Code Violation Pkt Count"); SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "header_redir_missed", CTLFLAG_RD, &stats->hrmpc, "Header Redirection Missed Packet Count"); } /********************************************************************** * * This routine provides a way to dump out the adapter eeprom, * often a useful debug/service tool. This only dumps the first * 32 words, stuff that matters is in that extent. * **********************************************************************/ static int igb_sysctl_nvm_info(SYSCTL_HANDLER_ARGS) { struct adapter *adapter; int error; int result; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); /* * This value will cause a hex dump of the * first 32 16-bit words of the EEPROM to * the screen. */ if (result == 1) { adapter = (struct adapter *)arg1; igb_print_nvm_info(adapter); } return (error); } static void igb_print_nvm_info(struct adapter *adapter) { u16 eeprom_data; int i, j, row = 0; /* Its a bit crude, but it gets the job done */ printf("\nInterface EEPROM Dump:\n"); printf("Offset\n0x0000 "); for (i = 0, j = 0; i < 32; i++, j++) { if (j == 8) { /* Make the offset block */ j = 0; ++row; printf("\n0x00%x0 ",row); } e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data); printf("%04x ", eeprom_data); } printf("\n"); } static void igb_set_sysctl_value(struct adapter *adapter, const char *name, const char *description, int *limit, int value) { *limit = value; SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev), SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)), OID_AUTO, name, CTLFLAG_RW, limit, value, description); } /* ** Set flow control using sysctl: ** Flow control values: ** 0 - off ** 1 - rx pause ** 2 - tx pause ** 3 - full */ static int igb_set_flowcntl(SYSCTL_HANDLER_ARGS) { int error; static int input = 3; /* default is full */ struct adapter *adapter = (struct adapter *) arg1; error = sysctl_handle_int(oidp, &input, 0, req); if ((error) || (req->newptr == NULL)) return (error); switch (input) { case e1000_fc_rx_pause: case e1000_fc_tx_pause: case e1000_fc_full: case e1000_fc_none: adapter->hw.fc.requested_mode = input; adapter->fc = input; break; default: /* Do nothing */ return (error); } adapter->hw.fc.current_mode = adapter->hw.fc.requested_mode; e1000_force_mac_fc(&adapter->hw); /* XXX TODO: update DROP_EN on each RX queue if appropriate */ return (error); } /* ** Manage DMA Coalesce: ** Control values: ** 0/1 - off/on ** Legal timer values are: ** 250,500,1000-10000 in thousands */ static int igb_sysctl_dmac(SYSCTL_HANDLER_ARGS) { struct adapter *adapter = (struct adapter *) arg1; int error; error = sysctl_handle_int(oidp, &adapter->dmac, 0, req); if ((error) || (req->newptr == NULL)) return (error); switch (adapter->dmac) { case 0: - /*Disabling */ + /* Disabling */ break; case 1: /* Just enable and use default */ adapter->dmac = 1000; break; case 250: case 500: case 1000: case 2000: case 3000: case 4000: case 5000: case 6000: case 7000: case 8000: case 9000: case 10000: /* Legal values - allow */ break; default: /* Do nothing, illegal value */ adapter->dmac = 0; return (EINVAL); } /* Reinit the interface */ igb_init(adapter); return (error); } /* ** Manage Energy Efficient Ethernet: ** Control values: ** 0/1 - enabled/disabled */ static int igb_sysctl_eee(SYSCTL_HANDLER_ARGS) { struct adapter *adapter = (struct adapter *) arg1; int error, value; value = adapter->hw.dev_spec._82575.eee_disable; error = sysctl_handle_int(oidp, &value, 0, req); if (error || req->newptr == NULL) return (error); IGB_CORE_LOCK(adapter); adapter->hw.dev_spec._82575.eee_disable = (value != 0); igb_init_locked(adapter); IGB_CORE_UNLOCK(adapter); return (0); } Index: stable/10/sys/dev/e1000/if_igb.h =================================================================== --- stable/10/sys/dev/e1000/if_igb.h (revision 296054) +++ stable/10/sys/dev/e1000/if_igb.h (revision 296055) @@ -1,571 +1,626 @@ /****************************************************************************** Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ -#ifndef _IGB_H_DEFINED_ -#define _IGB_H_DEFINED_ +#ifndef _IF_IGB_H_ +#define _IF_IGB_H_ -/* Tunables */ +#include +#include +#ifndef IGB_LEGACY_TX +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef RSS +#include +#include +#endif + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "e1000_api.h" +#include "e1000_82575.h" + +/* Tunables */ /* * IGB_TXD: Maximum number of Transmit Descriptors * * This value is the number of transmit descriptors allocated by the driver. * Increasing this value allows the driver to queue more transmits. Each * descriptor is 16 bytes. * Since TDLEN should be multiple of 128bytes, the number of transmit * desscriptors should meet the following condition. * (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0 */ #define IGB_MIN_TXD 256 #define IGB_DEFAULT_TXD 1024 #define IGB_MAX_TXD 4096 /* * IGB_RXD: Maximum number of Receive Descriptors * * This value is the number of receive descriptors allocated by the driver. * Increasing this value allows the driver to buffer more incoming packets. * Each descriptor is 16 bytes. A receive buffer is also allocated for each * descriptor. The maximum MTU size is 16110. * Since TDLEN should be multiple of 128bytes, the number of transmit * desscriptors should meet the following condition. * (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0 */ #define IGB_MIN_RXD 256 #define IGB_DEFAULT_RXD 1024 #define IGB_MAX_RXD 4096 /* * IGB_TIDV - Transmit Interrupt Delay Value * Valid Range: 0-65535 (0=off) * Default Value: 64 * This value delays the generation of transmit interrupts in units of * 1.024 microseconds. Transmit interrupt reduction can improve CPU * efficiency if properly tuned for specific network traffic. If the * system is reporting dropped transmits, this value may be set too high * causing the driver to run out of available transmit descriptors. */ #define IGB_TIDV 64 /* * IGB_TADV - Transmit Absolute Interrupt Delay Value * Valid Range: 0-65535 (0=off) * Default Value: 64 * This value, in units of 1.024 microseconds, limits the delay in which a * transmit interrupt is generated. Useful only if IGB_TIDV is non-zero, * this value ensures that an interrupt is generated after the initial * packet is sent on the wire within the set amount of time. Proper tuning, * along with IGB_TIDV, may improve traffic throughput in specific * network conditions. */ #define IGB_TADV 64 /* * IGB_RDTR - Receive Interrupt Delay Timer (Packet Timer) * Valid Range: 0-65535 (0=off) * Default Value: 0 * This value delays the generation of receive interrupts in units of 1.024 * microseconds. Receive interrupt reduction can improve CPU efficiency if * properly tuned for specific network traffic. Increasing this value adds * extra latency to frame reception and can end up decreasing the throughput * of TCP traffic. If the system is reporting dropped receives, this value * may be set too high, causing the driver to run out of available receive * descriptors. * * CAUTION: When setting IGB_RDTR to a value other than 0, adapters * may hang (stop transmitting) under certain network conditions. * If this occurs a WATCHDOG message is logged in the system * event log. In addition, the controller is automatically reset, * restoring the network connection. To eliminate the potential * for the hang ensure that IGB_RDTR is set to 0. */ #define IGB_RDTR 0 /* * Receive Interrupt Absolute Delay Timer (Not valid for 82542/82543/82544) * Valid Range: 0-65535 (0=off) * Default Value: 64 * This value, in units of 1.024 microseconds, limits the delay in which a * receive interrupt is generated. Useful only if IGB_RDTR is non-zero, * this value ensures that an interrupt is generated after the initial * packet is received within the set amount of time. Proper tuning, * along with IGB_RDTR, may improve traffic throughput in specific network * conditions. */ #define IGB_RADV 64 /* * This parameter controls the duration of transmit watchdog timer. */ #define IGB_WATCHDOG (10 * hz) /* * This parameter controls when the driver calls the routine to reclaim * transmit descriptors. Cleaning earlier seems a win. */ #define IGB_TX_CLEANUP_THRESHOLD (adapter->num_tx_desc / 2) /* * This parameter controls whether or not autonegotation is enabled. * 0 - Disable autonegotiation * 1 - Enable autonegotiation */ #define DO_AUTO_NEG 1 /* * This parameter control whether or not the driver will wait for * autonegotiation to complete. * 1 - Wait for autonegotiation to complete * 0 - Don't wait for autonegotiation to complete */ #define WAIT_FOR_AUTO_NEG_DEFAULT 0 /* Tunables -- End */ #define AUTONEG_ADV_DEFAULT (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ ADVERTISE_100_HALF | ADVERTISE_100_FULL | \ ADVERTISE_1000_FULL) #define AUTO_ALL_MODES 0 /* PHY master/slave setting */ #define IGB_MASTER_SLAVE e1000_ms_hw_default /* Support AutoMediaDetect for Marvell M88 PHY in i354 */ #define IGB_MEDIA_RESET (1 << 0) /* * Micellaneous constants */ -#define IGB_VENDOR_ID 0x8086 +#define IGB_INTEL_VENDOR_ID 0x8086 #define IGB_JUMBO_PBA 0x00000028 #define IGB_DEFAULT_PBA 0x00000030 #define IGB_SMARTSPEED_DOWNSHIFT 3 #define IGB_SMARTSPEED_MAX 15 #define IGB_MAX_LOOP 10 #define IGB_RX_PTHRESH ((hw->mac.type == e1000_i354) ? 12 : \ ((hw->mac.type <= e1000_82576) ? 16 : 8)) #define IGB_RX_HTHRESH 8 #define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \ adapter->msix_mem) ? 1 : 4) #define IGB_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8) #define IGB_TX_HTHRESH 1 #define IGB_TX_WTHRESH ((hw->mac.type != e1000_82575 && \ adapter->msix_mem) ? 1 : 16) #define MAX_NUM_MULTICAST_ADDRESSES 128 #define PCI_ANY_ID (~0U) #define ETHER_ALIGN 2 #define IGB_TX_BUFFER_SIZE ((uint32_t) 1514) #define IGB_FC_PAUSE_TIME 0x0680 #define IGB_EEPROM_APME 0x400; /* Queue minimum free for use */ #define IGB_QUEUE_THRESHOLD (adapter->num_tx_desc / 8) /* * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will * also optimize cache line size effect. H/W supports up to cache line size 128. */ #define IGB_DBA_ALIGN 128 #define SPEED_MODE_BIT (1<<21) /* On PCI-E MACs only */ /* PCI Config defines */ #define IGB_MSIX_BAR 3 /* Defines for printing debug information */ #define DEBUG_INIT 0 #define DEBUG_IOCTL 0 #define DEBUG_HW 0 #define INIT_DEBUGOUT(S) if (DEBUG_INIT) printf(S "\n") #define INIT_DEBUGOUT1(S, A) if (DEBUG_INIT) printf(S "\n", A) #define INIT_DEBUGOUT2(S, A, B) if (DEBUG_INIT) printf(S "\n", A, B) #define IOCTL_DEBUGOUT(S) if (DEBUG_IOCTL) printf(S "\n") #define IOCTL_DEBUGOUT1(S, A) if (DEBUG_IOCTL) printf(S "\n", A) #define IOCTL_DEBUGOUT2(S, A, B) if (DEBUG_IOCTL) printf(S "\n", A, B) #define HW_DEBUGOUT(S) if (DEBUG_HW) printf(S "\n") #define HW_DEBUGOUT1(S, A) if (DEBUG_HW) printf(S "\n", A) #define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B) #define IGB_MAX_SCATTER 64 #define IGB_VFTA_SIZE 128 #define IGB_BR_SIZE 4096 /* ring buf size */ #define IGB_TSO_SIZE (65535 + sizeof(struct ether_vlan_header)) #define IGB_TSO_SEG_SIZE 4096 /* Max dma segment size */ #define IGB_TXPBSIZE 20408 #define IGB_HDR_BUF 128 #define IGB_PKTTYPE_MASK 0x0000FFF0 #define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coalesce Flush */ #define ETH_ZLEN 60 #define ETH_ADDR_LEN 6 /* Offload bits in mbuf flag */ #if __FreeBSD_version >= 800000 #define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP) #else #define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP) #endif /* Define the starting Interrupt rate per Queue */ #define IGB_INTS_PER_SEC 8000 #define IGB_DEFAULT_ITR ((1000000/IGB_INTS_PER_SEC) << 2) #define IGB_LINK_ITR 2000 #define I210_LINK_DELAY 1000 /* Precision Time Sync (IEEE 1588) defines */ #define ETHERTYPE_IEEE1588 0x88F7 #define PICOSECS_PER_TICK 20833 #define TSYNC_PORT 319 /* UDP port for the protocol */ /* * Bus dma allocation structure used by * e1000_dma_malloc and e1000_dma_free. */ struct igb_dma_alloc { bus_addr_t dma_paddr; caddr_t dma_vaddr; bus_dma_tag_t dma_tag; bus_dmamap_t dma_map; bus_dma_segment_t dma_seg; int dma_nseg; }; /* ** Driver queue struct: this is the interrupt container ** for the associated tx and rx ring. */ struct igb_queue { struct adapter *adapter; u32 msix; /* This queue's MSIX vector */ u32 eims; /* This queue's EIMS bit */ u32 eitr_setting; struct resource *res; void *tag; struct tx_ring *txr; struct rx_ring *rxr; struct task que_task; struct taskqueue *tq; u64 irqs; }; /* * The transmit ring, one per queue */ struct tx_ring { struct adapter *adapter; struct mtx tx_mtx; u32 me; int watchdog_time; union e1000_adv_tx_desc *tx_base; struct igb_tx_buf *tx_buffers; struct igb_dma_alloc txdma; volatile u16 tx_avail; u16 next_avail_desc; u16 next_to_clean; u16 num_desc; enum { IGB_QUEUE_IDLE = 1, IGB_QUEUE_WORKING = 2, IGB_QUEUE_HUNG = 4, IGB_QUEUE_DEPLETED = 8, } queue_status; u32 txd_cmd; bus_dma_tag_t txtag; char mtx_name[16]; #ifndef IGB_LEGACY_TX struct buf_ring *br; struct task txq_task; #endif u32 bytes; /* used for AIM */ u32 packets; /* Soft Stats */ unsigned long tso_tx; unsigned long no_tx_map_avail; unsigned long no_tx_dma_setup; u64 no_desc_avail; u64 total_packets; }; /* * Receive ring: one per queue */ struct rx_ring { struct adapter *adapter; u32 me; struct igb_dma_alloc rxdma; union e1000_adv_rx_desc *rx_base; struct lro_ctrl lro; bool lro_enabled; bool hdr_split; struct mtx rx_mtx; char mtx_name[16]; u32 next_to_refresh; u32 next_to_check; struct igb_rx_buf *rx_buffers; bus_dma_tag_t htag; /* dma tag for rx head */ bus_dma_tag_t ptag; /* dma tag for rx packet */ /* * First/last mbuf pointers, for * collecting multisegment RX packets. */ struct mbuf *fmp; struct mbuf *lmp; u32 bytes; u32 packets; int rdt; int rdh; /* Soft stats */ u64 rx_split_packets; u64 rx_discarded; u64 rx_packets; u64 rx_bytes; }; struct adapter { struct ifnet *ifp; struct e1000_hw hw; struct e1000_osdep osdep; struct device *dev; struct cdev *led_dev; struct resource *pci_mem; struct resource *msix_mem; int memrid; /* * Interrupt resources: this set is * either used for legacy, or for Link * when doing MSIX */ void *tag; struct resource *res; struct ifmedia media; struct callout timer; int msix; int if_flags; int pause_frames; struct mtx core_mtx; eventhandler_tag vlan_attach; eventhandler_tag vlan_detach; u16 num_vlans; u16 num_queues; /* ** Shadow VFTA table, this is needed because ** the real vlan filter table gets cleared during ** a soft reset and the driver needs to be able ** to repopulate it. */ u32 shadow_vfta[IGB_VFTA_SIZE]; /* Info about the interface */ u32 optics; u32 fc; /* local flow ctrl setting */ int advertise; /* link speeds */ bool link_active; u16 max_frame_size; u16 num_segs; u16 link_speed; bool link_up; u32 linkvec; u16 link_duplex; u32 dmac; int link_mask; /* Flags */ u32 flags; /* Mbuf cluster size */ u32 rx_mbuf_sz; /* Support for pluggable optics */ bool sfp_probe; struct task link_task; /* Link tasklet */ struct task mod_task; /* SFP tasklet */ struct task msf_task; /* Multispeed Fiber */ struct taskqueue *tq; /* ** Queues: ** This is the irq holder, it has ** and RX/TX pair or rings associated ** with it. */ struct igb_queue *queues; /* * Transmit rings: * Allocated at run time, an array of rings. */ struct tx_ring *tx_rings; u32 num_tx_desc; /* * Receive rings: * Allocated at run time, an array of rings. */ struct rx_ring *rx_rings; u64 que_mask; u32 num_rx_desc; /* Multicast array memory */ u8 *mta; /* Misc stats maintained by the driver */ unsigned long device_control; unsigned long dropped_pkts; unsigned long eint_mask; unsigned long int_mask; unsigned long link_irq; unsigned long mbuf_defrag_failed; unsigned long no_tx_dma_setup; unsigned long packet_buf_alloc_rx; unsigned long packet_buf_alloc_tx; unsigned long rx_control; unsigned long rx_overruns; unsigned long watchdog_events; /* Used in pf and vf */ void *stats; int enable_aim; int has_manage; int wol; int rx_process_limit; int tx_process_limit; u16 vf_ifp; /* a VF interface */ bool in_detach; /* Used only in igb_ioctl */ }; /* ****************************************************************************** * vendor_info_array * * This array contains the list of Subvendor/Subdevice IDs on which the driver * should load. * * ******************************************************************************/ typedef struct _igb_vendor_info_t { unsigned int vendor_id; unsigned int device_id; unsigned int subvendor_id; unsigned int subdevice_id; unsigned int index; } igb_vendor_info_t; struct igb_tx_buf { union e1000_adv_tx_desc *eop; struct mbuf *m_head; bus_dmamap_t map; }; struct igb_rx_buf { struct mbuf *m_head; struct mbuf *m_pack; bus_dmamap_t hmap; /* bus_dma map for header */ bus_dmamap_t pmap; /* bus_dma map for packet */ }; /* ** Find the number of unrefreshed RX descriptors */ static inline u16 igb_rx_unrefreshed(struct rx_ring *rxr) { struct adapter *adapter = rxr->adapter; if (rxr->next_to_check > rxr->next_to_refresh) return (rxr->next_to_check - rxr->next_to_refresh - 1); else return ((adapter->num_rx_desc + rxr->next_to_check) - rxr->next_to_refresh - 1); } #define IGB_CORE_LOCK_INIT(_sc, _name) \ mtx_init(&(_sc)->core_mtx, _name, "IGB Core Lock", MTX_DEF) #define IGB_CORE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->core_mtx) #define IGB_CORE_LOCK(_sc) mtx_lock(&(_sc)->core_mtx) #define IGB_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->core_mtx) #define IGB_CORE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->core_mtx, MA_OWNED) #define IGB_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->tx_mtx) #define IGB_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_mtx) #define IGB_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_mtx) #define IGB_TX_TRYLOCK(_sc) mtx_trylock(&(_sc)->tx_mtx) #define IGB_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_mtx, MA_OWNED) #define IGB_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_mtx) #define IGB_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_mtx) #define IGB_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_mtx) #define IGB_RX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->rx_mtx, MA_OWNED) #define UPDATE_VF_REG(reg, last, cur) \ { \ u32 new = E1000_READ_REG(hw, reg); \ if (new < last) \ cur += 0x100000000LL; \ last = new; \ cur &= 0xFFFFFFFF00000000LL; \ cur |= new; \ } #if __FreeBSD_version >= 800000 && __FreeBSD_version < 800504 static __inline int drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br) { #ifdef ALTQ if (ALTQ_IS_ENABLED(&ifp->if_snd)) return (1); #endif return (!buf_ring_empty(br)); } #endif -#endif /* _IGB_H_DEFINED_ */ +#endif /* _IF_IGB_H_ */ Index: stable/10 =================================================================== --- stable/10 (revision 296054) +++ stable/10 (revision 296055) Property changes on: stable/10 ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r287465,295323