Index: head/sys/dev/axgbe/xgbe-dev.c =================================================================== --- head/sys/dev/axgbe/xgbe-dev.c (revision 368304) +++ head/sys/dev/axgbe/xgbe-dev.c (revision 368305) @@ -1,2845 +1,2845 @@ /* * AMD 10Gb Ethernet driver * * Copyright (c) 2014-2016,2020 Advanced Micro Devices, Inc. * * This file is available to you under your choice of the following two * licenses: * * License 1: GPLv2 * * This file is free software; you may copy, redistribute and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or (at * your option) any later version. * * This file is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . * * This file incorporates work covered by the following copyright and * permission notice: * The Synopsys DWC ETHER XGMAC Software Driver and documentation * (hereinafter "Software") is an unsupported proprietary work of Synopsys, * Inc. unless otherwise expressly agreed to in writing between Synopsys * and you. * * The Software IS NOT an item of Licensed Software or Licensed Product * under any End User Software License Agreement or Agreement for Licensed * Product with Synopsys or any supplement thereto. Permission is hereby * granted, free of charge, to any person obtaining a copy of this software * annotated with this license and the Software, to deal in the Software * without restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished * to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * * License 2: Modified BSD * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Advanced Micro Devices, Inc. nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * This file incorporates work covered by the following copyright and * permission notice: * The Synopsys DWC ETHER XGMAC Software Driver and documentation * (hereinafter "Software") is an unsupported proprietary work of Synopsys, * Inc. unless otherwise expressly agreed to in writing between Synopsys * and you. * * The Software IS NOT an item of Licensed Software or Licensed Product * under any End User Software License Agreement or Agreement for Licensed * Product with Synopsys or any supplement thereto. Permission is hereby * granted, free of charge, to any person obtaining a copy of this software * annotated with this license and the Software, to deal in the Software * without restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished * to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "xgbe.h" #include "xgbe-common.h" #include static inline unsigned int xgbe_get_max_frame(struct xgbe_prv_data *pdata) { return (if_getmtu(pdata->netdev) + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); } static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata, unsigned int usec) { unsigned long rate; unsigned int ret; rate = pdata->sysclk_rate; /* * Convert the input usec value to the watchdog timer value. Each * watchdog timer value is equivalent to 256 clock cycles. * Calculate the required value as: * ( usec * ( system_clock_mhz / 10^6 ) / 256 */ ret = (usec * (rate / 1000000)) / 256; return (ret); } static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata, unsigned int riwt) { unsigned long rate; unsigned int ret; rate = pdata->sysclk_rate; /* * Convert the input watchdog timer value to the usec value. Each * watchdog timer value is equivalent to 256 clock cycles. * Calculate the required value as: * ( riwt * 256 ) / ( system_clock_mhz / 10^6 ) */ ret = (riwt * 256) / (rate / 1000000); return (ret); } static int xgbe_config_pbl_val(struct xgbe_prv_data *pdata) { unsigned int pblx8, pbl; unsigned int i; pblx8 = DMA_PBL_X8_DISABLE; pbl = pdata->pbl; if (pdata->pbl > 32) { pblx8 = DMA_PBL_X8_ENABLE; pbl >>= 3; } for (i = 0; i < pdata->channel_count; i++) { XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, PBLX8, pblx8); if (pdata->channel[i]->tx_ring) XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, PBL, pbl); if (pdata->channel[i]->rx_ring) XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, PBL, pbl); } return (0); } static int xgbe_config_osp_mode(struct xgbe_prv_data *pdata) { unsigned int i; for (i = 0; i < pdata->channel_count; i++) { if (!pdata->channel[i]->tx_ring) break; XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, OSP, pdata->tx_osp_mode); } return (0); } static int xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val) { unsigned int i; for (i = 0; i < pdata->rx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val); return (0); } static int xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val) { unsigned int i; for (i = 0; i < pdata->tx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val); return (0); } static int xgbe_config_rx_threshold(struct xgbe_prv_data *pdata, unsigned int val) { unsigned int i; for (i = 0; i < pdata->rx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val); return (0); } static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata, unsigned int val) { unsigned int i; for (i = 0; i < pdata->tx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val); return (0); } static int xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata) { unsigned int i; for (i = 0; i < pdata->channel_count; i++) { if (!pdata->channel[i]->rx_ring) break; XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RIWT, RWT, pdata->rx_riwt); } return (0); } static int xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata) { return (0); } static void xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata) { unsigned int i; for (i = 0; i < pdata->channel_count; i++) { if (!pdata->channel[i]->rx_ring) break; XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, RBSZ, pdata->rx_buf_size); } } static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata) { unsigned int i; for (i = 0; i < pdata->channel_count; i++) { if (!pdata->channel[i]->tx_ring) break; axgbe_printf(0, "Enabling TSO in channel %d\n", i); XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, TSE, 1); } } static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata) { unsigned int i; for (i = 0; i < pdata->channel_count; i++) { if (!pdata->channel[i]->rx_ring) break; XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 1); } XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE); } static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type, unsigned int index, unsigned int val) { unsigned int wait; int ret = 0; mtx_lock(&pdata->rss_mutex); if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) { ret = -EBUSY; goto unlock; } XGMAC_IOWRITE(pdata, MAC_RSSDR, val); XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index); XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type); XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0); XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1); wait = 1000; while (wait--) { if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) goto unlock; DELAY(1000); } ret = -EBUSY; unlock: mtx_unlock(&pdata->rss_mutex); return (ret); } static int xgbe_write_rss_hash_key(struct xgbe_prv_data *pdata) { unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(uint32_t); unsigned int *key = (unsigned int *)&pdata->rss_key; int ret; while (key_regs--) { ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE, key_regs, *key++); if (ret) return (ret); } return (0); } static int xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata) { unsigned int i; int ret; for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) { ret = xgbe_write_rss_reg(pdata, XGBE_RSS_LOOKUP_TABLE_TYPE, i, pdata->rss_table[i]); if (ret) return (ret); } return (0); } static int xgbe_set_rss_hash_key(struct xgbe_prv_data *pdata, const uint8_t *key) { memcpy(pdata->rss_key, key, sizeof(pdata->rss_key)); return (xgbe_write_rss_hash_key(pdata)); } static int xgbe_set_rss_lookup_table(struct xgbe_prv_data *pdata, const uint32_t *table) { unsigned int i; for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]); return (xgbe_write_rss_lookup_table(pdata)); } static int xgbe_enable_rss(struct xgbe_prv_data *pdata) { int ret; if (!pdata->hw_feat.rss) return (-EOPNOTSUPP); /* Program the hash key */ ret = xgbe_write_rss_hash_key(pdata); if (ret) return (ret); /* Program the lookup table */ ret = xgbe_write_rss_lookup_table(pdata); if (ret) return (ret); /* Set the RSS options */ XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options); /* Enable RSS */ XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1); axgbe_printf(0, "RSS Enabled\n"); return (0); } static int xgbe_disable_rss(struct xgbe_prv_data *pdata) { if (!pdata->hw_feat.rss) return (-EOPNOTSUPP); XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0); axgbe_printf(0, "RSS Disabled\n"); return (0); } static void xgbe_config_rss(struct xgbe_prv_data *pdata) { int ret; if (!pdata->hw_feat.rss) return; /* Check if the interface has RSS capability */ if (pdata->enable_rss) ret = xgbe_enable_rss(pdata); else ret = xgbe_disable_rss(pdata); if (ret) axgbe_error("error configuring RSS, RSS disabled\n"); } static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata) { unsigned int max_q_count, q_count; unsigned int reg, reg_val; unsigned int i; /* Clear MTL flow control */ for (i = 0; i < pdata->rx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0); /* Clear MAC flow control */ max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); reg = MAC_Q0TFCR; for (i = 0; i < q_count; i++) { reg_val = XGMAC_IOREAD(pdata, reg); XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0); XGMAC_IOWRITE(pdata, reg, reg_val); reg += MAC_QTFCR_INC; } return (0); } static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata) { unsigned int max_q_count, q_count; unsigned int reg, reg_val; unsigned int i; /* Set MTL flow control */ for (i = 0; i < pdata->rx_q_count; i++) { unsigned int ehfc = 0; if (pdata->rx_rfd[i]) { /* Flow control thresholds are established */ /* TODO - enable pfc/ets support */ ehfc = 1; } XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc); axgbe_printf(1, "flow control %s for RXq%u\n", ehfc ? "enabled" : "disabled", i); } /* Set MAC flow control */ max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); reg = MAC_Q0TFCR; for (i = 0; i < q_count; i++) { reg_val = XGMAC_IOREAD(pdata, reg); /* Enable transmit flow control */ XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1); /* Set pause time */ XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff); XGMAC_IOWRITE(pdata, reg, reg_val); reg += MAC_QTFCR_INC; } return (0); } static int xgbe_disable_rx_flow_control(struct xgbe_prv_data *pdata) { XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0); return (0); } static int xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata) { XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1); return (0); } static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata) { if (pdata->tx_pause) xgbe_enable_tx_flow_control(pdata); else xgbe_disable_tx_flow_control(pdata); return (0); } static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata) { if (pdata->rx_pause) xgbe_enable_rx_flow_control(pdata); else xgbe_disable_rx_flow_control(pdata); return (0); } static void xgbe_config_flow_control(struct xgbe_prv_data *pdata) { xgbe_config_tx_flow_control(pdata); xgbe_config_rx_flow_control(pdata); XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0); } static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata) { struct xgbe_channel *channel; unsigned int i, ver; /* Set the interrupt mode if supported */ if (pdata->channel_irq_mode) XGMAC_IOWRITE_BITS(pdata, DMA_MR, INTM, pdata->channel_irq_mode); ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER); for (i = 0; i < pdata->channel_count; i++) { channel = pdata->channel[i]; /* Clear all the interrupts which are set */ XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, XGMAC_DMA_IOREAD(channel, DMA_CH_SR)); /* Clear all interrupt enable bits */ channel->curr_ier = 0; /* Enable following interrupts * NIE - Normal Interrupt Summary Enable * AIE - Abnormal Interrupt Summary Enable * FBEE - Fatal Bus Error Enable */ if (ver < 0x21) { XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE20, 1); XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE20, 1); } else { XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE, 1); XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE, 1); } XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1); if (channel->tx_ring) { /* Enable the following Tx interrupts * TIE - Transmit Interrupt Enable (unless using * per channel interrupts in edge triggered * mode) */ if (!pdata->per_channel_irq || pdata->channel_irq_mode) XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1); } if (channel->rx_ring) { /* Enable following Rx interrupts * RBUE - Receive Buffer Unavailable Enable * RIE - Receive Interrupt Enable (unless using * per channel interrupts in edge triggered * mode) */ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1); if (!pdata->per_channel_irq || pdata->channel_irq_mode) XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1); } XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier); } } static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata) { unsigned int mtl_q_isr; unsigned int q_count, i; q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt); for (i = 0; i < q_count; i++) { /* Clear all the interrupts which are set */ mtl_q_isr = XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR); XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr); /* No MTL interrupts to be enabled */ XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0); } } static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata) { unsigned int mac_ier = 0; /* Enable Timestamp interrupt */ XGMAC_SET_BITS(mac_ier, MAC_IER, TSIE, 1); XGMAC_IOWRITE(pdata, MAC_IER, mac_ier); /* Enable all counter interrupts */ XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff); XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff); /* Enable MDIO single command completion interrupt */ XGMAC_IOWRITE_BITS(pdata, MAC_MDIOIER, SNGLCOMPIE, 1); } static int xgbe_set_speed(struct xgbe_prv_data *pdata, int speed) { unsigned int ss; switch (speed) { case SPEED_1000: ss = 0x03; break; case SPEED_2500: ss = 0x02; break; case SPEED_10000: ss = 0x00; break; default: return (-EINVAL); } if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss) XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss); return (0); } static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata) { /* Put the VLAN tag in the Rx descriptor */ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1); /* Don't check the VLAN type */ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1); /* Check only C-TAG (0x8100) packets */ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0); /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0); /* Enable VLAN tag stripping */ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3); axgbe_printf(0, "VLAN Stripping Enabled\n"); return (0); } static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata) { XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0); axgbe_printf(0, "VLAN Stripping Disabled\n"); return (0); } static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata) { /* Enable VLAN filtering */ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1); /* Enable VLAN Hash Table filtering */ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1); /* Disable VLAN tag inverse matching */ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0); /* Only filter on the lower 12-bits of the VLAN tag */ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1); /* In order for the VLAN Hash Table filtering to be effective, * the VLAN tag identifier in the VLAN Tag Register must not * be zero. Set the VLAN tag identifier to "1" to enable the * VLAN Hash Table filtering. This implies that a VLAN tag of * 1 will always pass filtering. */ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1); axgbe_printf(0, "VLAN filtering Enabled\n"); return (0); } static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata) { /* Disable VLAN filtering */ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0); axgbe_printf(0, "VLAN filtering Disabled\n"); return (0); } static uint32_t xgbe_vid_crc32_le(__le16 vid_le) { uint32_t crc = ~0; uint32_t temp = 0; unsigned char *data = (unsigned char *)&vid_le; unsigned char data_byte = 0; int i, bits; bits = get_bitmask_order(VLAN_VID_MASK); for (i = 0; i < bits; i++) { if ((i % 8) == 0) data_byte = data[i / 8]; temp = ((crc & 1) ^ data_byte) & 1; crc >>= 1; data_byte >>= 1; if (temp) crc ^= CRC32_POLY_LE; } return (crc); } static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata) { uint32_t crc; uint16_t vid; uint16_t vlan_hash_table = 0; __le16 vid_le = 0; axgbe_printf(1, "%s: Before updating VLANHTR 0x%x\n", __func__, XGMAC_IOREAD(pdata, MAC_VLANHTR)); /* Generate the VLAN Hash Table value */ for_each_set_bit(vid, pdata->active_vlans, VLAN_NVID) { /* Get the CRC32 value of the VLAN ID */ vid_le = cpu_to_le16(vid); crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28; vlan_hash_table |= (1 << crc); axgbe_printf(1, "%s: vid 0x%x vid_le 0x%x crc 0x%x " "vlan_hash_table 0x%x\n", __func__, vid, vid_le, crc, vlan_hash_table); } /* Set the VLAN Hash Table filtering register */ XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table); axgbe_printf(1, "%s: After updating VLANHTR 0x%x\n", __func__, XGMAC_IOREAD(pdata, MAC_VLANHTR)); return (0); } static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata, unsigned int enable) { unsigned int val = enable ? 1 : 0; if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val) return (0); axgbe_printf(1, "%s promiscous mode\n", enable? "entering" : "leaving"); XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val); /* Hardware will still perform VLAN filtering in promiscuous mode */ if (enable) { axgbe_printf(1, "Disabling rx vlan filtering\n"); xgbe_disable_rx_vlan_filtering(pdata); } else { if ((if_getcapenable(pdata->netdev) & IFCAP_VLAN_HWFILTER)) { axgbe_printf(1, "Enabling rx vlan filtering\n"); xgbe_enable_rx_vlan_filtering(pdata); } } return (0); } static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata, unsigned int enable) { unsigned int val = enable ? 1 : 0; if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val) return (0); axgbe_printf(1,"%s allmulti mode\n", enable ? "entering" : "leaving"); XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val); return (0); } static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata, char *addr, unsigned int *mac_reg) { unsigned int mac_addr_hi, mac_addr_lo; uint8_t *mac_addr; mac_addr_lo = 0; mac_addr_hi = 0; if (addr) { mac_addr = (uint8_t *)&mac_addr_lo; mac_addr[0] = addr[0]; mac_addr[1] = addr[1]; mac_addr[2] = addr[2]; mac_addr[3] = addr[3]; mac_addr = (uint8_t *)&mac_addr_hi; mac_addr[0] = addr[4]; mac_addr[1] = addr[5]; axgbe_printf(1, "adding mac address %pM at %#x\n", addr, *mac_reg); XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1); } XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_hi); *mac_reg += MAC_MACA_INC; XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_lo); *mac_reg += MAC_MACA_INC; } static void xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata) { unsigned int mac_reg; unsigned int addn_macs; mac_reg = MAC_MACA1HR; addn_macs = pdata->hw_feat.addn_mac; xgbe_set_mac_reg(pdata, pdata->mac_addr, &mac_reg); addn_macs--; /* Clear remaining additional MAC address entries */ while (addn_macs--) xgbe_set_mac_reg(pdata, NULL, &mac_reg); } static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata) { /* TODO - add support to set mac hash table */ xgbe_set_mac_addn_addrs(pdata); return (0); } static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, uint8_t *addr) { unsigned int mac_addr_hi, mac_addr_lo; mac_addr_hi = (addr[5] << 8) | (addr[4] << 0); mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | (addr[0] << 0); XGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi); XGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo); return (0); } static int xgbe_config_rx_mode(struct xgbe_prv_data *pdata) { unsigned int pr_mode, am_mode; pr_mode = ((pdata->netdev->if_drv_flags & IFF_PPROMISC) != 0); am_mode = ((pdata->netdev->if_drv_flags & IFF_ALLMULTI) != 0); xgbe_set_promiscuous_mode(pdata, pr_mode); xgbe_set_all_multicast_mode(pdata, am_mode); xgbe_add_mac_addresses(pdata); return (0); } static int xgbe_clr_gpio(struct xgbe_prv_data *pdata, unsigned int gpio) { unsigned int reg; if (gpio > 15) return (-EINVAL); reg = XGMAC_IOREAD(pdata, MAC_GPIOSR); reg &= ~(1 << (gpio + 16)); XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg); return (0); } static int xgbe_set_gpio(struct xgbe_prv_data *pdata, unsigned int gpio) { unsigned int reg; if (gpio > 15) return (-EINVAL); reg = XGMAC_IOREAD(pdata, MAC_GPIOSR); reg |= (1 << (gpio + 16)); XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg); return (0); } static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, int mmd_reg) { unsigned long flags; unsigned int mmd_address, index, offset; int mmd_data; if (mmd_reg & MII_ADDR_C45) mmd_address = mmd_reg & ~MII_ADDR_C45; else mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); /* The PCS registers are accessed using mmio. The underlying * management interface uses indirect addressing to access the MMD * register sets. This requires accessing of the PCS register in two * phases, an address phase and a data phase. * * The mmio interface is based on 16-bit offsets and values. All * register offsets must therefore be adjusted by left shifting the * offset 1 bit and reading 16 bits of data. */ mmd_address <<= 1; index = mmd_address & ~pdata->xpcs_window_mask; offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); spin_lock_irqsave(&pdata->xpcs_lock, flags); XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index); mmd_data = XPCS16_IOREAD(pdata, offset); spin_unlock_irqrestore(&pdata->xpcs_lock, flags); return (mmd_data); } static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, int mmd_reg, int mmd_data) { unsigned long flags; unsigned int mmd_address, index, offset; if (mmd_reg & MII_ADDR_C45) mmd_address = mmd_reg & ~MII_ADDR_C45; else mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); /* The PCS registers are accessed using mmio. The underlying * management interface uses indirect addressing to access the MMD * register sets. This requires accessing of the PCS register in two * phases, an address phase and a data phase. * * The mmio interface is based on 16-bit offsets and values. All * register offsets must therefore be adjusted by left shifting the * offset 1 bit and writing 16 bits of data. */ mmd_address <<= 1; index = mmd_address & ~pdata->xpcs_window_mask; offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); spin_lock_irqsave(&pdata->xpcs_lock, flags); XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index); XPCS16_IOWRITE(pdata, offset, mmd_data); spin_unlock_irqrestore(&pdata->xpcs_lock, flags); } static int xgbe_read_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad, int mmd_reg) { unsigned long flags; unsigned int mmd_address; int mmd_data; if (mmd_reg & MII_ADDR_C45) mmd_address = mmd_reg & ~MII_ADDR_C45; else mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); /* The PCS registers are accessed using mmio. The underlying APB3 * management interface uses indirect addressing to access the MMD * register sets. This requires accessing of the PCS register in two * phases, an address phase and a data phase. * * The mmio interface is based on 32-bit offsets and values. All * register offsets must therefore be adjusted by left shifting the * offset 2 bits and reading 32 bits of data. */ spin_lock_irqsave(&pdata->xpcs_lock, flags); XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8); mmd_data = XPCS32_IOREAD(pdata, (mmd_address & 0xff) << 2); spin_unlock_irqrestore(&pdata->xpcs_lock, flags); return (mmd_data); } static void xgbe_write_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad, int mmd_reg, int mmd_data) { unsigned int mmd_address; unsigned long flags; if (mmd_reg & MII_ADDR_C45) mmd_address = mmd_reg & ~MII_ADDR_C45; else mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); /* The PCS registers are accessed using mmio. The underlying APB3 * management interface uses indirect addressing to access the MMD * register sets. This requires accessing of the PCS register in two * phases, an address phase and a data phase. * * The mmio interface is based on 32-bit offsets and values. All * register offsets must therefore be adjusted by left shifting the * offset 2 bits and writing 32 bits of data. */ spin_lock_irqsave(&pdata->xpcs_lock, flags); XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8); XPCS32_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data); spin_unlock_irqrestore(&pdata->xpcs_lock, flags); } static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad, int mmd_reg) { switch (pdata->vdata->xpcs_access) { case XGBE_XPCS_ACCESS_V1: return (xgbe_read_mmd_regs_v1(pdata, prtad, mmd_reg)); case XGBE_XPCS_ACCESS_V2: default: return (xgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg)); } } static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad, int mmd_reg, int mmd_data) { switch (pdata->vdata->xpcs_access) { case XGBE_XPCS_ACCESS_V1: return (xgbe_write_mmd_regs_v1(pdata, prtad, mmd_reg, mmd_data)); case XGBE_XPCS_ACCESS_V2: default: return (xgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data)); } } static unsigned int xgbe_create_mdio_sca(int port, int reg) { unsigned int mdio_sca, da; da = (reg & MII_ADDR_C45) ? reg >> 16 : 0; mdio_sca = 0; XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg); XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port); XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da); return (mdio_sca); } static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, int reg, uint16_t val) { unsigned int mdio_sca, mdio_sccd; mtx_lock_spin(&pdata->mdio_mutex); mdio_sca = xgbe_create_mdio_sca(addr, reg); XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); mdio_sccd = 0; XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, DATA, val); XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 1); XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1); XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd); if (msleep_spin(pdata, &pdata->mdio_mutex, "mdio_xfer", hz / 8) == EWOULDBLOCK) { axgbe_error("%s: MDIO write error\n", __func__); mtx_unlock_spin(&pdata->mdio_mutex); return (-ETIMEDOUT); } mtx_unlock_spin(&pdata->mdio_mutex); return (0); } static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, int reg) { unsigned int mdio_sca, mdio_sccd; mtx_lock_spin(&pdata->mdio_mutex); mdio_sca = xgbe_create_mdio_sca(addr, reg); XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); mdio_sccd = 0; XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 3); XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1); XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd); if (msleep_spin(pdata, &pdata->mdio_mutex, "mdio_xfer", hz / 8) == EWOULDBLOCK) { axgbe_error("%s: MDIO read error\n", __func__); mtx_unlock_spin(&pdata->mdio_mutex); return (-ETIMEDOUT); } mtx_unlock_spin(&pdata->mdio_mutex); return (XGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA)); } static int xgbe_set_ext_mii_mode(struct xgbe_prv_data *pdata, unsigned int port, enum xgbe_mdio_mode mode) { unsigned int reg_val = XGMAC_IOREAD(pdata, MAC_MDIOCL22R); switch (mode) { case XGBE_MDIO_MODE_CL22: if (port > XGMAC_MAX_C22_PORT) return (-EINVAL); reg_val |= (1 << port); break; case XGBE_MDIO_MODE_CL45: break; default: return (-EINVAL); } XGMAC_IOWRITE(pdata, MAC_MDIOCL22R, reg_val); return (0); } static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc) { return (!XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN)); } static int xgbe_disable_rx_csum(struct xgbe_prv_data *pdata) { XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0); axgbe_printf(0, "Receive checksum offload Disabled\n"); return (0); } static int xgbe_enable_rx_csum(struct xgbe_prv_data *pdata) { XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1); axgbe_printf(0, "Receive checksum offload Enabled\n"); return (0); } static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata) { struct xgbe_ring_desc *rdesc = rdata->rdesc; /* Reset the Tx descriptor * Set buffer 1 (lo) address to zero * Set buffer 1 (hi) address to zero * Reset all other control bits (IC, TTSE, B2L & B1L) * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc) */ rdesc->desc0 = 0; rdesc->desc1 = 0; rdesc->desc2 = 0; rdesc->desc3 = 0; wmb(); } static void xgbe_tx_desc_init(struct xgbe_channel *channel) { struct xgbe_ring *ring = channel->tx_ring; struct xgbe_ring_data *rdata; int i; int start_index = ring->cur; /* Initialze all descriptors */ for (i = 0; i < ring->rdesc_count; i++) { rdata = XGBE_GET_DESC_DATA(ring, i); /* Initialize Tx descriptor */ xgbe_tx_desc_reset(rdata); } /* Update the total number of Tx descriptors */ XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1); /* Update the starting address of descriptor ring */ rdata = XGBE_GET_DESC_DATA(ring, start_index); XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI, upper_32_bits(rdata->rdata_paddr)); XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO, lower_32_bits(rdata->rdata_paddr)); } static void xgbe_rx_desc_init(struct xgbe_channel *channel) { struct xgbe_ring *ring = channel->rx_ring; struct xgbe_ring_data *rdata; unsigned int start_index = ring->cur; /* * Just set desc_count and the starting address of the desc list * here. Rest will be done as part of the txrx path. */ /* Update the total number of Rx descriptors */ XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1); /* Update the starting address of descriptor ring */ rdata = XGBE_GET_DESC_DATA(ring, start_index); XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI, upper_32_bits(rdata->rdata_paddr)); XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO, lower_32_bits(rdata->rdata_paddr)); } static int xgbe_dev_read(struct xgbe_channel *channel) { struct xgbe_prv_data *pdata = channel->pdata; struct xgbe_ring *ring = channel->rx_ring; struct xgbe_ring_data *rdata; struct xgbe_ring_desc *rdesc; struct xgbe_packet_data *packet = &ring->packet_data; - unsigned int err, etlt, l34t; + unsigned int err, etlt, l34t = 0; axgbe_printf(1, "-->xgbe_dev_read: cur = %d\n", ring->cur); rdata = XGBE_GET_DESC_DATA(ring, ring->cur); rdesc = rdata->rdesc; /* Check for data availability */ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN)) return (1); rmb(); if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) { /* TODO - Timestamp Context Descriptor */ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT, 1); XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT_NEXT, 0); return (0); } /* Normal Descriptor, be sure Context Descriptor bit is off */ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT, 0); /* Indicate if a Context Descriptor is next */ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA)) XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT_NEXT, 1); /* Get the header length */ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) { XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST, 1); rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2, RX_NORMAL_DESC2, HL); if (rdata->rx.hdr_len) pdata->ext_stats.rx_split_header_packets++; } else XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST, 0); /* Get the RSS hash */ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) { XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, RSS_HASH, 1); packet->rss_hash = le32_to_cpu(rdesc->desc1); l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T); switch (l34t) { case RX_DESC3_L34T_IPV4_TCP: packet->rss_hash_type = M_HASHTYPE_RSS_TCP_IPV4; break; case RX_DESC3_L34T_IPV4_UDP: packet->rss_hash_type = M_HASHTYPE_RSS_UDP_IPV4; break; case RX_DESC3_L34T_IPV6_TCP: packet->rss_hash_type = M_HASHTYPE_RSS_TCP_IPV6; break; case RX_DESC3_L34T_IPV6_UDP: packet->rss_hash_type = M_HASHTYPE_RSS_UDP_IPV6; break; default: packet->rss_hash_type = M_HASHTYPE_OPAQUE; break; } } /* Not all the data has been transferred for this packet */ if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) { /* This is not the last of the data for this packet */ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST, 0); return (0); } /* This is the last of the data for this packet */ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST, 1); /* Get the packet length */ rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); /* Set checksum done indicator as appropriate */ /* TODO - add tunneling support */ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CSUM_DONE, 1); /* Check for errors (only valid in last descriptor) */ err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES); etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT); axgbe_printf(1, "%s: err=%u, etlt=%#x\n", __func__, err, etlt); if (!err || !etlt) { /* No error if err is 0 or etlt is 0 */ if (etlt == 0x09) { XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, VLAN_CTAG, 1); packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0, RX_NORMAL_DESC0, OVT); axgbe_printf(1, "vlan-ctag=%#06x\n", packet->vlan_ctag); } } else { unsigned int tnp = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, TNP); if ((etlt == 0x05) || (etlt == 0x06)) { axgbe_printf(1, "%s: err1 l34t %d err 0x%x etlt 0x%x\n", __func__, l34t, err, etlt); XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CSUM_DONE, 0); XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, TNPCSUM_DONE, 0); pdata->ext_stats.rx_csum_errors++; } else if (tnp && ((etlt == 0x09) || (etlt == 0x0a))) { axgbe_printf(1, "%s: err2 l34t %d err 0x%x etlt 0x%x\n", __func__, l34t, err, etlt); XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CSUM_DONE, 0); XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, TNPCSUM_DONE, 0); pdata->ext_stats.rx_vxlan_csum_errors++; } else { axgbe_printf(1, "%s: tnp %d l34t %d err 0x%x etlt 0x%x\n", __func__, tnp, l34t, err, etlt); axgbe_printf(1, "%s: Channel: %d SR 0x%x DSR 0x%x \n", __func__, channel->queue_index, XGMAC_DMA_IOREAD(channel, DMA_CH_SR), XGMAC_DMA_IOREAD(channel, DMA_CH_DSR)); axgbe_printf(1, "%s: ring cur %d dirty %d\n", __func__, ring->cur, ring->dirty); axgbe_printf(1, "%s: Desc 0x%08x-0x%08x-0x%08x-0x%08x\n", __func__, rdesc->desc0, rdesc->desc1, rdesc->desc2, rdesc->desc3); XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS, FRAME, 1); } } axgbe_printf(1, "<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel->name, ring->cur & (ring->rdesc_count - 1), ring->cur); return (0); } static int xgbe_is_context_desc(struct xgbe_ring_desc *rdesc) { /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */ return (XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT)); } static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc) { /* Rx and Tx share LD bit, so check TDES3.LD bit */ return (XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD)); } static int xgbe_enable_int(struct xgbe_channel *channel, enum xgbe_int int_id) { struct xgbe_prv_data *pdata = channel->pdata; axgbe_printf(1, "enable_int: DMA_CH_IER read - 0x%x\n", channel->curr_ier); switch (int_id) { case XGMAC_INT_DMA_CH_SR_TI: XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1); break; case XGMAC_INT_DMA_CH_SR_TPS: XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 1); break; case XGMAC_INT_DMA_CH_SR_TBU: XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 1); break; case XGMAC_INT_DMA_CH_SR_RI: XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1); break; case XGMAC_INT_DMA_CH_SR_RBU: XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1); break; case XGMAC_INT_DMA_CH_SR_RPS: XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 1); break; case XGMAC_INT_DMA_CH_SR_TI_RI: XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1); XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1); break; case XGMAC_INT_DMA_CH_SR_FBE: XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1); break; case XGMAC_INT_DMA_ALL: channel->curr_ier |= channel->saved_ier; break; default: return (-1); } XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier); axgbe_printf(1, "enable_int: DMA_CH_IER write - 0x%x\n", channel->curr_ier); return (0); } static int xgbe_disable_int(struct xgbe_channel *channel, enum xgbe_int int_id) { struct xgbe_prv_data *pdata = channel->pdata; axgbe_printf(1, "disable_int: DMA_CH_IER read - 0x%x\n", channel->curr_ier); switch (int_id) { case XGMAC_INT_DMA_CH_SR_TI: XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0); break; case XGMAC_INT_DMA_CH_SR_TPS: XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 0); break; case XGMAC_INT_DMA_CH_SR_TBU: XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 0); break; case XGMAC_INT_DMA_CH_SR_RI: XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0); break; case XGMAC_INT_DMA_CH_SR_RBU: XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 0); break; case XGMAC_INT_DMA_CH_SR_RPS: XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 0); break; case XGMAC_INT_DMA_CH_SR_TI_RI: XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0); XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0); break; case XGMAC_INT_DMA_CH_SR_FBE: XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 0); break; case XGMAC_INT_DMA_ALL: channel->saved_ier = channel->curr_ier; channel->curr_ier = 0; break; default: return (-1); } XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier); axgbe_printf(1, "disable_int: DMA_CH_IER write - 0x%x\n", channel->curr_ier); return (0); } static int __xgbe_exit(struct xgbe_prv_data *pdata) { unsigned int count = 2000; /* Issue a software reset */ XGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1); DELAY(10); /* Poll Until Poll Condition */ while (--count && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR)) DELAY(500); if (!count) return (-EBUSY); return (0); } static int xgbe_exit(struct xgbe_prv_data *pdata) { int ret; /* To guard against possible incorrectly generated interrupts, * issue the software reset twice. */ ret = __xgbe_exit(pdata); if (ret) { axgbe_error("%s: exit error %d\n", __func__, ret); return (ret); } return (__xgbe_exit(pdata)); } static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata) { unsigned int i, count; if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21) return (0); for (i = 0; i < pdata->tx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1); /* Poll Until Poll Condition */ for (i = 0; i < pdata->tx_q_count; i++) { count = 2000; while (--count && XGMAC_MTL_IOREAD_BITS(pdata, i, MTL_Q_TQOMR, FTQ)) DELAY(500); if (!count) return (-EBUSY); } return (0); } static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata) { unsigned int sbmr; sbmr = XGMAC_IOREAD(pdata, DMA_SBMR); /* Set enhanced addressing mode */ XGMAC_SET_BITS(sbmr, DMA_SBMR, EAME, 1); /* Set the System Bus mode */ XGMAC_SET_BITS(sbmr, DMA_SBMR, UNDEF, 1); XGMAC_SET_BITS(sbmr, DMA_SBMR, BLEN, pdata->blen >> 2); XGMAC_SET_BITS(sbmr, DMA_SBMR, AAL, pdata->aal); XGMAC_SET_BITS(sbmr, DMA_SBMR, RD_OSR_LMT, pdata->rd_osr_limit - 1); XGMAC_SET_BITS(sbmr, DMA_SBMR, WR_OSR_LMT, pdata->wr_osr_limit - 1); XGMAC_IOWRITE(pdata, DMA_SBMR, sbmr); /* Set descriptor fetching threshold */ if (pdata->vdata->tx_desc_prefetch) XGMAC_IOWRITE_BITS(pdata, DMA_TXEDMACR, TDPS, pdata->vdata->tx_desc_prefetch); if (pdata->vdata->rx_desc_prefetch) XGMAC_IOWRITE_BITS(pdata, DMA_RXEDMACR, RDPS, pdata->vdata->rx_desc_prefetch); } static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata) { XGMAC_IOWRITE(pdata, DMA_AXIARCR, pdata->arcr); XGMAC_IOWRITE(pdata, DMA_AXIAWCR, pdata->awcr); if (pdata->awarcr) XGMAC_IOWRITE(pdata, DMA_AXIAWARCR, pdata->awarcr); } static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata) { unsigned int i; /* Set Tx to weighted round robin scheduling algorithm */ XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR); /* Set Tx traffic classes to use WRR algorithm with equal weights */ for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, MTL_TSA_ETS); XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1); } /* Set Rx to strict priority algorithm */ XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP); } static void xgbe_queue_flow_control_threshold(struct xgbe_prv_data *pdata, unsigned int queue, unsigned int q_fifo_size) { unsigned int frame_fifo_size; unsigned int rfa, rfd; frame_fifo_size = XGMAC_FLOW_CONTROL_ALIGN(xgbe_get_max_frame(pdata)); axgbe_printf(1, "%s: queue %d q_fifo_size %d frame_fifo_size 0x%x\n", __func__, queue, q_fifo_size, frame_fifo_size); /* TODO - add pfc/ets related support */ /* This path deals with just maximum frame sizes which are * limited to a jumbo frame of 9,000 (plus headers, etc.) * so we can never exceed the maximum allowable RFA/RFD * values. */ if (q_fifo_size <= 2048) { /* rx_rfd to zero to signal no flow control */ pdata->rx_rfa[queue] = 0; pdata->rx_rfd[queue] = 0; return; } if (q_fifo_size <= 4096) { /* Between 2048 and 4096 */ pdata->rx_rfa[queue] = 0; /* Full - 1024 bytes */ pdata->rx_rfd[queue] = 1; /* Full - 1536 bytes */ return; } if (q_fifo_size <= frame_fifo_size) { /* Between 4096 and max-frame */ pdata->rx_rfa[queue] = 2; /* Full - 2048 bytes */ pdata->rx_rfd[queue] = 5; /* Full - 3584 bytes */ return; } if (q_fifo_size <= (frame_fifo_size * 3)) { /* Between max-frame and 3 max-frames, * trigger if we get just over a frame of data and * resume when we have just under half a frame left. */ rfa = q_fifo_size - frame_fifo_size; rfd = rfa + (frame_fifo_size / 2); } else { /* Above 3 max-frames - trigger when just over * 2 frames of space available */ rfa = frame_fifo_size * 2; rfa += XGMAC_FLOW_CONTROL_UNIT; rfd = rfa + frame_fifo_size; } pdata->rx_rfa[queue] = XGMAC_FLOW_CONTROL_VALUE(rfa); pdata->rx_rfd[queue] = XGMAC_FLOW_CONTROL_VALUE(rfd); axgbe_printf(1, "%s: forced queue %d rfa 0x%x rfd 0x%x\n", __func__, queue, pdata->rx_rfa[queue], pdata->rx_rfd[queue]); } static void xgbe_calculate_flow_control_threshold(struct xgbe_prv_data *pdata, unsigned int *fifo) { unsigned int q_fifo_size; unsigned int i; for (i = 0; i < pdata->rx_q_count; i++) { q_fifo_size = (fifo[i] + 1) * XGMAC_FIFO_UNIT; axgbe_printf(1, "%s: fifo[%d] - 0x%x q_fifo_size 0x%x\n", __func__, i, fifo[i], q_fifo_size); xgbe_queue_flow_control_threshold(pdata, i, q_fifo_size); } } static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata) { unsigned int i; for (i = 0; i < pdata->rx_q_count; i++) { axgbe_printf(1, "%s: queue %d rfa %d rfd %d\n", __func__, i, pdata->rx_rfa[i], pdata->rx_rfd[i]); XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, pdata->rx_rfa[i]); XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, pdata->rx_rfd[i]); axgbe_printf(1, "%s: MTL_Q_RQFCR 0x%x\n", __func__, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQFCR)); } } static unsigned int xgbe_get_tx_fifo_size(struct xgbe_prv_data *pdata) { /* The configured value may not be the actual amount of fifo RAM */ return (min_t(unsigned int, pdata->tx_max_fifo_size, pdata->hw_feat.tx_fifo_size)); } static unsigned int xgbe_get_rx_fifo_size(struct xgbe_prv_data *pdata) { /* The configured value may not be the actual amount of fifo RAM */ return (min_t(unsigned int, pdata->rx_max_fifo_size, pdata->hw_feat.rx_fifo_size)); } static void xgbe_calculate_equal_fifo(unsigned int fifo_size, unsigned int queue_count, unsigned int *fifo) { unsigned int q_fifo_size; unsigned int p_fifo; unsigned int i; q_fifo_size = fifo_size / queue_count; /* Calculate the fifo setting by dividing the queue's fifo size * by the fifo allocation increment (with 0 representing the * base allocation increment so decrement the result by 1). */ p_fifo = q_fifo_size / XGMAC_FIFO_UNIT; if (p_fifo) p_fifo--; /* Distribute the fifo equally amongst the queues */ for (i = 0; i < queue_count; i++) fifo[i] = p_fifo; } static unsigned int xgbe_set_nonprio_fifos(unsigned int fifo_size, unsigned int queue_count, unsigned int *fifo) { unsigned int i; MPASS(powerof2(XGMAC_FIFO_MIN_ALLOC)); if (queue_count <= IEEE_8021QAZ_MAX_TCS) return (fifo_size); /* Rx queues 9 and up are for specialized packets, * such as PTP or DCB control packets, etc. and * don't require a large fifo */ for (i = IEEE_8021QAZ_MAX_TCS; i < queue_count; i++) { fifo[i] = (XGMAC_FIFO_MIN_ALLOC / XGMAC_FIFO_UNIT) - 1; fifo_size -= XGMAC_FIFO_MIN_ALLOC; } return (fifo_size); } static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata) { unsigned int fifo_size; unsigned int fifo[XGBE_MAX_QUEUES]; unsigned int i; fifo_size = xgbe_get_tx_fifo_size(pdata); axgbe_printf(1, "%s: fifo_size 0x%x\n", __func__, fifo_size); xgbe_calculate_equal_fifo(fifo_size, pdata->tx_q_count, fifo); for (i = 0; i < pdata->tx_q_count; i++) { XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo[i]); axgbe_printf(1, "Tx q %d FIFO Size 0x%x\n", i, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TQOMR)); } axgbe_printf(1, "%d Tx hardware queues, %d byte fifo per queue\n", pdata->tx_q_count, ((fifo[0] + 1) * XGMAC_FIFO_UNIT)); } static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata) { unsigned int fifo_size; unsigned int fifo[XGBE_MAX_QUEUES]; unsigned int prio_queues; unsigned int i; /* TODO - add pfc/ets related support */ /* Clear any DCB related fifo/queue information */ fifo_size = xgbe_get_rx_fifo_size(pdata); prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count); axgbe_printf(1, "%s: fifo_size 0x%x rx_q_cnt %d prio %d\n", __func__, fifo_size, pdata->rx_q_count, prio_queues); /* Assign a minimum fifo to the non-VLAN priority queues */ fifo_size = xgbe_set_nonprio_fifos(fifo_size, pdata->rx_q_count, fifo); xgbe_calculate_equal_fifo(fifo_size, prio_queues, fifo); for (i = 0; i < pdata->rx_q_count; i++) { XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo[i]); axgbe_printf(1, "Rx q %d FIFO Size 0x%x\n", i, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQOMR)); } xgbe_calculate_flow_control_threshold(pdata, fifo); xgbe_config_flow_control_threshold(pdata); axgbe_printf(1, "%u Rx hardware queues, %u byte fifo/queue\n", pdata->rx_q_count, ((fifo[0] + 1) * XGMAC_FIFO_UNIT)); } static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata) { unsigned int qptc, qptc_extra, queue; unsigned int prio_queues; unsigned int ppq, ppq_extra, prio; unsigned int mask; unsigned int i, j, reg, reg_val; /* Map the MTL Tx Queues to Traffic Classes * Note: Tx Queues >= Traffic Classes */ qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt; qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt; for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) { for (j = 0; j < qptc; j++) { axgbe_printf(1, "TXq%u mapped to TC%u\n", queue, i); XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, Q2TCMAP, i); pdata->q2tc_map[queue++] = i; } if (i < qptc_extra) { axgbe_printf(1, "TXq%u mapped to TC%u\n", queue, i); XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, Q2TCMAP, i); pdata->q2tc_map[queue++] = i; } } /* Map the 8 VLAN priority values to available MTL Rx queues */ prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count); ppq = IEEE_8021QAZ_MAX_TCS / prio_queues; ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues; reg = MAC_RQC2R; reg_val = 0; for (i = 0, prio = 0; i < prio_queues;) { mask = 0; for (j = 0; j < ppq; j++) { axgbe_printf(1, "PRIO%u mapped to RXq%u\n", prio, i); mask |= (1 << prio); pdata->prio2q_map[prio++] = i; } if (i < ppq_extra) { axgbe_printf(1, "PRIO%u mapped to RXq%u\n", prio, i); mask |= (1 << prio); pdata->prio2q_map[prio++] = i; } reg_val |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3)); if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues)) continue; XGMAC_IOWRITE(pdata, reg, reg_val); reg += MAC_RQC2_INC; reg_val = 0; } /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */ reg = MTL_RQDCM0R; reg_val = 0; for (i = 0; i < pdata->rx_q_count;) { reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3)); if ((i % MTL_RQDCM_Q_PER_REG) && (i != pdata->rx_q_count)) continue; XGMAC_IOWRITE(pdata, reg, reg_val); reg += MTL_RQDCM_INC; reg_val = 0; } } static void xgbe_config_mac_address(struct xgbe_prv_data *pdata) { xgbe_set_mac_address(pdata, IF_LLADDR(pdata->netdev)); /* Filtering is done using perfect filtering and hash filtering */ if (pdata->hw_feat.hash_table_size) { XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1); XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1); XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 1); } } static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata) { unsigned int val; val = (if_getmtu(pdata->netdev) > XGMAC_STD_PACKET_MTU) ? 1 : 0; XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val); } static void xgbe_config_mac_speed(struct xgbe_prv_data *pdata) { xgbe_set_speed(pdata, pdata->phy_speed); } static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata) { if ((if_getcapenable(pdata->netdev) & IFCAP_RXCSUM)) xgbe_enable_rx_csum(pdata); else xgbe_disable_rx_csum(pdata); } static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata) { /* Indicate that VLAN Tx CTAGs come from context descriptors */ XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0); XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1); /* Set the current VLAN Hash Table register value */ xgbe_update_vlan_hash_table(pdata); if ((if_getcapenable(pdata->netdev) & IFCAP_VLAN_HWFILTER)) { axgbe_printf(1, "Enabling rx vlan filtering\n"); xgbe_enable_rx_vlan_filtering(pdata); } else { axgbe_printf(1, "Disabling rx vlan filtering\n"); xgbe_disable_rx_vlan_filtering(pdata); } if ((if_getcapenable(pdata->netdev) & IFCAP_VLAN_HWTAGGING)) { axgbe_printf(1, "Enabling rx vlan stripping\n"); xgbe_enable_rx_vlan_stripping(pdata); } else { axgbe_printf(1, "Disabling rx vlan stripping\n"); xgbe_disable_rx_vlan_stripping(pdata); } } static uint64_t xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo) { bool read_hi; uint64_t val; if (pdata->vdata->mmc_64bit) { switch (reg_lo) { /* These registers are always 32 bit */ case MMC_RXRUNTERROR: case MMC_RXJABBERERROR: case MMC_RXUNDERSIZE_G: case MMC_RXOVERSIZE_G: case MMC_RXWATCHDOGERROR: read_hi = false; break; default: read_hi = true; } } else { switch (reg_lo) { /* These registers are always 64 bit */ case MMC_TXOCTETCOUNT_GB_LO: case MMC_TXOCTETCOUNT_G_LO: case MMC_RXOCTETCOUNT_GB_LO: case MMC_RXOCTETCOUNT_G_LO: read_hi = true; break; default: read_hi = false; } } val = XGMAC_IOREAD(pdata, reg_lo); if (read_hi) val |= ((uint64_t)XGMAC_IOREAD(pdata, reg_lo + 4) << 32); return (val); } static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata) { struct xgbe_mmc_stats *stats = &pdata->mmc_stats; unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_TISR); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB)) stats->txoctetcount_gb += xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB)) stats->txframecount_gb += xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G)) stats->txbroadcastframes_g += xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G)) stats->txmulticastframes_g += xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB)) stats->tx64octets_gb += xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB)) stats->tx65to127octets_gb += xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB)) stats->tx128to255octets_gb += xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB)) stats->tx256to511octets_gb += xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB)) stats->tx512to1023octets_gb += xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB)) stats->tx1024tomaxoctets_gb += xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB)) stats->txunicastframes_gb += xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB)) stats->txmulticastframes_gb += xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB)) stats->txbroadcastframes_g += xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR)) stats->txunderflowerror += xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G)) stats->txoctetcount_g += xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G)) stats->txframecount_g += xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES)) stats->txpauseframes += xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G)) stats->txvlanframes_g += xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); } static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata) { struct xgbe_mmc_stats *stats = &pdata->mmc_stats; unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_RISR); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB)) stats->rxframecount_gb += xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB)) stats->rxoctetcount_gb += xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G)) stats->rxoctetcount_g += xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G)) stats->rxbroadcastframes_g += xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G)) stats->rxmulticastframes_g += xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR)) stats->rxcrcerror += xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR)) stats->rxrunterror += xgbe_mmc_read(pdata, MMC_RXRUNTERROR); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR)) stats->rxjabbererror += xgbe_mmc_read(pdata, MMC_RXJABBERERROR); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G)) stats->rxundersize_g += xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G)) stats->rxoversize_g += xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB)) stats->rx64octets_gb += xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB)) stats->rx65to127octets_gb += xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB)) stats->rx128to255octets_gb += xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB)) stats->rx256to511octets_gb += xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB)) stats->rx512to1023octets_gb += xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB)) stats->rx1024tomaxoctets_gb += xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G)) stats->rxunicastframes_g += xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR)) stats->rxlengtherror += xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE)) stats->rxoutofrangetype += xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES)) stats->rxpauseframes += xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW)) stats->rxfifooverflow += xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB)) stats->rxvlanframes_gb += xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR)) stats->rxwatchdogerror += xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR); } static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata) { struct xgbe_mmc_stats *stats = &pdata->mmc_stats; /* Freeze counters */ XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1); stats->txoctetcount_gb += xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); stats->txframecount_gb += xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); stats->txbroadcastframes_g += xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); stats->txmulticastframes_g += xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); stats->tx64octets_gb += xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); stats->tx65to127octets_gb += xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); stats->tx128to255octets_gb += xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); stats->tx256to511octets_gb += xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); stats->tx512to1023octets_gb += xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); stats->tx1024tomaxoctets_gb += xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); stats->txunicastframes_gb += xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); stats->txmulticastframes_gb += xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); stats->txbroadcastframes_gb += xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); stats->txunderflowerror += xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); stats->txoctetcount_g += xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); stats->txframecount_g += xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); stats->txpauseframes += xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); stats->txvlanframes_g += xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); stats->rxframecount_gb += xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); stats->rxoctetcount_gb += xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); stats->rxoctetcount_g += xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); stats->rxbroadcastframes_g += xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); stats->rxmulticastframes_g += xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); stats->rxcrcerror += xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO); stats->rxrunterror += xgbe_mmc_read(pdata, MMC_RXRUNTERROR); stats->rxjabbererror += xgbe_mmc_read(pdata, MMC_RXJABBERERROR); stats->rxundersize_g += xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G); stats->rxoversize_g += xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G); stats->rx64octets_gb += xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); stats->rx65to127octets_gb += xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); stats->rx128to255octets_gb += xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); stats->rx256to511octets_gb += xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); stats->rx512to1023octets_gb += xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); stats->rx1024tomaxoctets_gb += xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); stats->rxunicastframes_g += xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); stats->rxlengtherror += xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO); stats->rxoutofrangetype += xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); stats->rxpauseframes += xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); stats->rxfifooverflow += xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); stats->rxvlanframes_gb += xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); stats->rxwatchdogerror += xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR); /* Un-freeze counters */ XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0); } static void xgbe_config_mmc(struct xgbe_prv_data *pdata) { /* Set counters to reset on read */ XGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1); /* Reset the counters */ XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1); } static void xgbe_txq_prepare_tx_stop(struct xgbe_prv_data *pdata, unsigned int queue) { unsigned int tx_status; unsigned long tx_timeout; /* The Tx engine cannot be stopped if it is actively processing * packets. Wait for the Tx queue to empty the Tx fifo. Don't * wait forever though... */ tx_timeout = ticks + (XGBE_DMA_STOP_TIMEOUT * hz); while (ticks < tx_timeout) { tx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR); if ((XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) && (XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0)) break; DELAY(500); } if (ticks >= tx_timeout) axgbe_printf(1, "timed out waiting for Tx queue %u to empty\n", queue); } static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata, unsigned int queue) { unsigned int tx_dsr, tx_pos, tx_qidx; unsigned int tx_status; unsigned long tx_timeout; if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20) return (xgbe_txq_prepare_tx_stop(pdata, queue)); /* Calculate the status register to read and the position within */ if (queue < DMA_DSRX_FIRST_QUEUE) { tx_dsr = DMA_DSR0; tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START; } else { tx_qidx = queue - DMA_DSRX_FIRST_QUEUE; tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC); tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) + DMA_DSRX_TPS_START; } /* The Tx engine cannot be stopped if it is actively processing * descriptors. Wait for the Tx engine to enter the stopped or * suspended state. Don't wait forever though... */ tx_timeout = ticks + (XGBE_DMA_STOP_TIMEOUT * hz); while (ticks < tx_timeout) { tx_status = XGMAC_IOREAD(pdata, tx_dsr); tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH); if ((tx_status == DMA_TPS_STOPPED) || (tx_status == DMA_TPS_SUSPENDED)) break; DELAY(500); } if (ticks >= tx_timeout) axgbe_printf(1, "timed out waiting for Tx DMA channel %u to stop\n", queue); } static void xgbe_enable_tx(struct xgbe_prv_data *pdata) { unsigned int i; /* Enable each Tx DMA channel */ for (i = 0; i < pdata->channel_count; i++) { if (!pdata->channel[i]->tx_ring) break; XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1); } /* Enable each Tx queue */ for (i = 0; i < pdata->tx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, MTL_Q_ENABLED); /* Enable MAC Tx */ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1); } static void xgbe_disable_tx(struct xgbe_prv_data *pdata) { unsigned int i; /* Prepare for Tx DMA channel stop */ for (i = 0; i < pdata->tx_q_count; i++) xgbe_prepare_tx_stop(pdata, i); /* Disable MAC Tx */ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); /* Disable each Tx queue */ for (i = 0; i < pdata->tx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0); /* Disable each Tx DMA channel */ for (i = 0; i < pdata->channel_count; i++) { if (!pdata->channel[i]->tx_ring) break; XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0); } } static void xgbe_prepare_rx_stop(struct xgbe_prv_data *pdata, unsigned int queue) { unsigned int rx_status; unsigned long rx_timeout; /* The Rx engine cannot be stopped if it is actively processing * packets. Wait for the Rx queue to empty the Rx fifo. Don't * wait forever though... */ rx_timeout = ticks + (XGBE_DMA_STOP_TIMEOUT * hz); while (ticks < rx_timeout) { rx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR); if ((XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) && (XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0)) break; DELAY(500); } if (ticks >= rx_timeout) axgbe_printf(1, "timed out waiting for Rx queue %d to empty\n", queue); } static void xgbe_enable_rx(struct xgbe_prv_data *pdata) { unsigned int reg_val, i; /* Enable each Rx DMA channel */ for (i = 0; i < pdata->channel_count; i++) { if (!pdata->channel[i]->rx_ring) break; XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1); } /* Enable each Rx queue */ reg_val = 0; for (i = 0; i < pdata->rx_q_count; i++) reg_val |= (0x02 << (i << 1)); XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val); /* Enable MAC Rx */ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1); XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1); XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1); XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1); } static void xgbe_disable_rx(struct xgbe_prv_data *pdata) { unsigned int i; /* Disable MAC Rx */ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0); XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0); XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0); XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0); /* Prepare for Rx DMA channel stop */ for (i = 0; i < pdata->rx_q_count; i++) xgbe_prepare_rx_stop(pdata, i); /* Disable each Rx queue */ XGMAC_IOWRITE(pdata, MAC_RQC0R, 0); /* Disable each Rx DMA channel */ for (i = 0; i < pdata->channel_count; i++) { if (!pdata->channel[i]->rx_ring) break; XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0); } } static void xgbe_powerup_tx(struct xgbe_prv_data *pdata) { unsigned int i; /* Enable each Tx DMA channel */ for (i = 0; i < pdata->channel_count; i++) { if (!pdata->channel[i]->tx_ring) break; XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1); } /* Enable MAC Tx */ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1); } static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata) { unsigned int i; /* Prepare for Tx DMA channel stop */ for (i = 0; i < pdata->tx_q_count; i++) xgbe_prepare_tx_stop(pdata, i); /* Disable MAC Tx */ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); /* Disable each Tx DMA channel */ for (i = 0; i < pdata->channel_count; i++) { if (!pdata->channel[i]->tx_ring) break; XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0); } } static void xgbe_powerup_rx(struct xgbe_prv_data *pdata) { unsigned int i; /* Enable each Rx DMA channel */ for (i = 0; i < pdata->channel_count; i++) { if (!pdata->channel[i]->rx_ring) break; XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1); } } static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata) { unsigned int i; /* Disable each Rx DMA channel */ for (i = 0; i < pdata->channel_count; i++) { if (!pdata->channel[i]->rx_ring) break; XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0); } } static int xgbe_init(struct xgbe_prv_data *pdata) { struct xgbe_desc_if *desc_if = &pdata->desc_if; int ret; /* Flush Tx queues */ ret = xgbe_flush_tx_queues(pdata); if (ret) { axgbe_error("error flushing TX queues\n"); return (ret); } /* * Initialize DMA related features */ xgbe_config_dma_bus(pdata); xgbe_config_dma_cache(pdata); xgbe_config_osp_mode(pdata); xgbe_config_pbl_val(pdata); xgbe_config_rx_coalesce(pdata); xgbe_config_tx_coalesce(pdata); xgbe_config_rx_buffer_size(pdata); xgbe_config_tso_mode(pdata); xgbe_config_sph_mode(pdata); xgbe_config_rss(pdata); desc_if->wrapper_tx_desc_init(pdata); desc_if->wrapper_rx_desc_init(pdata); xgbe_enable_dma_interrupts(pdata); /* * Initialize MTL related features */ xgbe_config_mtl_mode(pdata); xgbe_config_queue_mapping(pdata); xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode); xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode); xgbe_config_tx_threshold(pdata, pdata->tx_threshold); xgbe_config_rx_threshold(pdata, pdata->rx_threshold); xgbe_config_tx_fifo_size(pdata); xgbe_config_rx_fifo_size(pdata); /*TODO: Error Packet and undersized good Packet forwarding enable (FEP and FUP) */ xgbe_enable_mtl_interrupts(pdata); /* * Initialize MAC related features */ xgbe_config_mac_address(pdata); xgbe_config_rx_mode(pdata); xgbe_config_jumbo_enable(pdata); xgbe_config_flow_control(pdata); xgbe_config_mac_speed(pdata); xgbe_config_checksum_offload(pdata); xgbe_config_vlan_support(pdata); xgbe_config_mmc(pdata); xgbe_enable_mac_interrupts(pdata); return (0); } void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) { hw_if->tx_complete = xgbe_tx_complete; hw_if->set_mac_address = xgbe_set_mac_address; hw_if->config_rx_mode = xgbe_config_rx_mode; hw_if->enable_rx_csum = xgbe_enable_rx_csum; hw_if->disable_rx_csum = xgbe_disable_rx_csum; hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping; hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping; hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering; hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering; hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table; hw_if->read_mmd_regs = xgbe_read_mmd_regs; hw_if->write_mmd_regs = xgbe_write_mmd_regs; hw_if->set_speed = xgbe_set_speed; hw_if->set_ext_mii_mode = xgbe_set_ext_mii_mode; hw_if->read_ext_mii_regs = xgbe_read_ext_mii_regs; hw_if->write_ext_mii_regs = xgbe_write_ext_mii_regs; hw_if->set_gpio = xgbe_set_gpio; hw_if->clr_gpio = xgbe_clr_gpio; hw_if->enable_tx = xgbe_enable_tx; hw_if->disable_tx = xgbe_disable_tx; hw_if->enable_rx = xgbe_enable_rx; hw_if->disable_rx = xgbe_disable_rx; hw_if->powerup_tx = xgbe_powerup_tx; hw_if->powerdown_tx = xgbe_powerdown_tx; hw_if->powerup_rx = xgbe_powerup_rx; hw_if->powerdown_rx = xgbe_powerdown_rx; hw_if->dev_read = xgbe_dev_read; hw_if->enable_int = xgbe_enable_int; hw_if->disable_int = xgbe_disable_int; hw_if->init = xgbe_init; hw_if->exit = xgbe_exit; /* Descriptor related Sequences have to be initialized here */ hw_if->tx_desc_init = xgbe_tx_desc_init; hw_if->rx_desc_init = xgbe_rx_desc_init; hw_if->tx_desc_reset = xgbe_tx_desc_reset; hw_if->is_last_desc = xgbe_is_last_desc; hw_if->is_context_desc = xgbe_is_context_desc; /* For FLOW ctrl */ hw_if->config_tx_flow_control = xgbe_config_tx_flow_control; hw_if->config_rx_flow_control = xgbe_config_rx_flow_control; /* For RX coalescing */ hw_if->config_rx_coalesce = xgbe_config_rx_coalesce; hw_if->config_tx_coalesce = xgbe_config_tx_coalesce; hw_if->usec_to_riwt = xgbe_usec_to_riwt; hw_if->riwt_to_usec = xgbe_riwt_to_usec; /* For RX and TX threshold config */ hw_if->config_rx_threshold = xgbe_config_rx_threshold; hw_if->config_tx_threshold = xgbe_config_tx_threshold; /* For RX and TX Store and Forward Mode config */ hw_if->config_rsf_mode = xgbe_config_rsf_mode; hw_if->config_tsf_mode = xgbe_config_tsf_mode; /* For TX DMA Operating on Second Frame config */ hw_if->config_osp_mode = xgbe_config_osp_mode; /* For MMC statistics support */ hw_if->tx_mmc_int = xgbe_tx_mmc_int; hw_if->rx_mmc_int = xgbe_rx_mmc_int; hw_if->read_mmc_stats = xgbe_read_mmc_stats; /* For Receive Side Scaling */ hw_if->enable_rss = xgbe_enable_rss; hw_if->disable_rss = xgbe_disable_rss; hw_if->set_rss_hash_key = xgbe_set_rss_hash_key; hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table; } Index: head/sys/dev/axgbe/xgbe-mdio.c =================================================================== --- head/sys/dev/axgbe/xgbe-mdio.c (revision 368304) +++ head/sys/dev/axgbe/xgbe-mdio.c (revision 368305) @@ -1,1634 +1,1634 @@ /* * AMD 10Gb Ethernet driver * * Copyright (c) 2014-2016,2020 Advanced Micro Devices, Inc. * * This file is available to you under your choice of the following two * licenses: * * License 1: GPLv2 * * This file is free software; you may copy, redistribute and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or (at * your option) any later version. * * This file is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . * * This file incorporates work covered by the following copyright and * permission notice: * The Synopsys DWC ETHER XGMAC Software Driver and documentation * (hereinafter "Software") is an unsupported proprietary work of Synopsys, * Inc. unless otherwise expressly agreed to in writing between Synopsys * and you. * * The Software IS NOT an item of Licensed Software or Licensed Product * under any End User Software License Agreement or Agreement for Licensed * Product with Synopsys or any supplement thereto. Permission is hereby * granted, free of charge, to any person obtaining a copy of this software * annotated with this license and the Software, to deal in the Software * without restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished * to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * * License 2: Modified BSD * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Advanced Micro Devices, Inc. nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * This file incorporates work covered by the following copyright and * permission notice: * The Synopsys DWC ETHER XGMAC Software Driver and documentation * (hereinafter "Software") is an unsupported proprietary work of Synopsys, * Inc. unless otherwise expressly agreed to in writing between Synopsys * and you. * * The Software IS NOT an item of Licensed Software or Licensed Product * under any End User Software License Agreement or Agreement for Licensed * Product with Synopsys or any supplement thereto. Permission is hereby * granted, free of charge, to any person obtaining a copy of this software * annotated with this license and the Software, to deal in the Software * without restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished * to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "xgbe.h" #include "xgbe-common.h" static void xgbe_an_state_machine(struct xgbe_prv_data *pdata); static void xgbe_an37_clear_interrupts(struct xgbe_prv_data *pdata) { int reg; reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT); reg &= ~XGBE_AN_CL37_INT_MASK; XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT, reg); } static void xgbe_an37_disable_interrupts(struct xgbe_prv_data *pdata) { int reg; reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL); reg &= ~XGBE_AN_CL37_INT_MASK; XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL, reg); reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL); reg &= ~XGBE_PCS_CL37_BP; XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL, reg); } static void xgbe_an37_enable_interrupts(struct xgbe_prv_data *pdata) { int reg; reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL); reg |= XGBE_PCS_CL37_BP; XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL, reg); reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL); reg |= XGBE_AN_CL37_INT_MASK; XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL, reg); } static void xgbe_an73_clear_interrupts(struct xgbe_prv_data *pdata) { XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0); } static void xgbe_an73_disable_interrupts(struct xgbe_prv_data *pdata) { XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0); } static void xgbe_an73_enable_interrupts(struct xgbe_prv_data *pdata) { XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, XGBE_AN_CL73_INT_MASK); } static void xgbe_an_enable_interrupts(struct xgbe_prv_data *pdata) { switch (pdata->an_mode) { case XGBE_AN_MODE_CL73: case XGBE_AN_MODE_CL73_REDRV: xgbe_an73_enable_interrupts(pdata); break; case XGBE_AN_MODE_CL37: case XGBE_AN_MODE_CL37_SGMII: xgbe_an37_enable_interrupts(pdata); break; default: break; } } static void xgbe_an_clear_interrupts_all(struct xgbe_prv_data *pdata) { xgbe_an73_clear_interrupts(pdata); xgbe_an37_clear_interrupts(pdata); } static void xgbe_kr_mode(struct xgbe_prv_data *pdata) { /* Set MAC to 10G speed */ pdata->hw_if.set_speed(pdata, SPEED_10000); /* Call PHY implementation support to complete rate change */ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_KR); } static void xgbe_kx_2500_mode(struct xgbe_prv_data *pdata) { /* Set MAC to 2.5G speed */ pdata->hw_if.set_speed(pdata, SPEED_2500); /* Call PHY implementation support to complete rate change */ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_KX_2500); } static void xgbe_kx_1000_mode(struct xgbe_prv_data *pdata) { /* Set MAC to 1G speed */ pdata->hw_if.set_speed(pdata, SPEED_1000); /* Call PHY implementation support to complete rate change */ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_KX_1000); } static void xgbe_sfi_mode(struct xgbe_prv_data *pdata) { /* If a KR re-driver is present, change to KR mode instead */ if (pdata->kr_redrv) return (xgbe_kr_mode(pdata)); /* Set MAC to 10G speed */ pdata->hw_if.set_speed(pdata, SPEED_10000); /* Call PHY implementation support to complete rate change */ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_SFI); } static void xgbe_x_mode(struct xgbe_prv_data *pdata) { /* Set MAC to 1G speed */ pdata->hw_if.set_speed(pdata, SPEED_1000); /* Call PHY implementation support to complete rate change */ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_X); } static void xgbe_sgmii_1000_mode(struct xgbe_prv_data *pdata) { /* Set MAC to 1G speed */ pdata->hw_if.set_speed(pdata, SPEED_1000); /* Call PHY implementation support to complete rate change */ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_SGMII_1000); } static void xgbe_sgmii_100_mode(struct xgbe_prv_data *pdata) { /* Set MAC to 1G speed */ pdata->hw_if.set_speed(pdata, SPEED_1000); /* Call PHY implementation support to complete rate change */ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_SGMII_100); } static enum xgbe_mode xgbe_cur_mode(struct xgbe_prv_data *pdata) { return (pdata->phy_if.phy_impl.cur_mode(pdata)); } static bool xgbe_in_kr_mode(struct xgbe_prv_data *pdata) { return (xgbe_cur_mode(pdata) == XGBE_MODE_KR); } static void xgbe_change_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) { switch (mode) { case XGBE_MODE_KX_1000: xgbe_kx_1000_mode(pdata); break; case XGBE_MODE_KX_2500: xgbe_kx_2500_mode(pdata); break; case XGBE_MODE_KR: xgbe_kr_mode(pdata); break; case XGBE_MODE_SGMII_100: xgbe_sgmii_100_mode(pdata); break; case XGBE_MODE_SGMII_1000: xgbe_sgmii_1000_mode(pdata); break; case XGBE_MODE_X: xgbe_x_mode(pdata); break; case XGBE_MODE_SFI: xgbe_sfi_mode(pdata); break; case XGBE_MODE_UNKNOWN: break; default: axgbe_error("invalid operation mode requested (%u)\n", mode); } } static void xgbe_switch_mode(struct xgbe_prv_data *pdata) { xgbe_change_mode(pdata, pdata->phy_if.phy_impl.switch_mode(pdata)); } static bool xgbe_set_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) { if (mode == xgbe_cur_mode(pdata)) return (false); xgbe_change_mode(pdata, mode); return (true); } static bool xgbe_use_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) { return (pdata->phy_if.phy_impl.use_mode(pdata, mode)); } static void xgbe_an37_set(struct xgbe_prv_data *pdata, bool enable, bool restart) { unsigned int reg; reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_CTRL1); reg &= ~MDIO_VEND2_CTRL1_AN_ENABLE; if (enable) reg |= MDIO_VEND2_CTRL1_AN_ENABLE; if (restart) reg |= MDIO_VEND2_CTRL1_AN_RESTART; XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_CTRL1, reg); } static void xgbe_an37_restart(struct xgbe_prv_data *pdata) { xgbe_an37_enable_interrupts(pdata); xgbe_an37_set(pdata, true, true); } static void xgbe_an37_disable(struct xgbe_prv_data *pdata) { xgbe_an37_set(pdata, false, false); xgbe_an37_disable_interrupts(pdata); } static void xgbe_an73_set(struct xgbe_prv_data *pdata, bool enable, bool restart) { unsigned int reg; /* Disable KR training for now */ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL); reg &= ~XGBE_KR_TRAINING_ENABLE; XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg); /* Update AN settings */ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1); reg &= ~MDIO_AN_CTRL1_ENABLE; if (enable) reg |= MDIO_AN_CTRL1_ENABLE; if (restart) reg |= MDIO_AN_CTRL1_RESTART; XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_CTRL1, reg); } static void xgbe_an73_restart(struct xgbe_prv_data *pdata) { xgbe_an73_enable_interrupts(pdata); xgbe_an73_set(pdata, true, true); } static void xgbe_an73_disable(struct xgbe_prv_data *pdata) { xgbe_an73_set(pdata, false, false); xgbe_an73_disable_interrupts(pdata); pdata->an_start = 0; } static void xgbe_an_restart(struct xgbe_prv_data *pdata) { if (pdata->phy_if.phy_impl.an_pre) pdata->phy_if.phy_impl.an_pre(pdata); switch (pdata->an_mode) { case XGBE_AN_MODE_CL73: case XGBE_AN_MODE_CL73_REDRV: xgbe_an73_restart(pdata); break; case XGBE_AN_MODE_CL37: case XGBE_AN_MODE_CL37_SGMII: xgbe_an37_restart(pdata); break; default: break; } } static void xgbe_an_disable(struct xgbe_prv_data *pdata) { if (pdata->phy_if.phy_impl.an_post) pdata->phy_if.phy_impl.an_post(pdata); switch (pdata->an_mode) { case XGBE_AN_MODE_CL73: case XGBE_AN_MODE_CL73_REDRV: xgbe_an73_disable(pdata); break; case XGBE_AN_MODE_CL37: case XGBE_AN_MODE_CL37_SGMII: xgbe_an37_disable(pdata); break; default: break; } } static void xgbe_an_disable_all(struct xgbe_prv_data *pdata) { xgbe_an73_disable(pdata); xgbe_an37_disable(pdata); } static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata, enum xgbe_rx *state) { unsigned int ad_reg, lp_reg, reg; *state = XGBE_RX_COMPLETE; /* If we're not in KR mode then we're done */ if (!xgbe_in_kr_mode(pdata)) return (XGBE_AN_PAGE_RECEIVED); /* Enable/Disable FEC */ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2); reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL); reg &= ~(MDIO_PMA_10GBR_FECABLE_ABLE | MDIO_PMA_10GBR_FECABLE_ERRABLE); if ((ad_reg & 0xc000) && (lp_reg & 0xc000)) reg |= pdata->fec_ability; XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL, reg); /* Start KR training */ if (pdata->phy_if.phy_impl.kr_training_pre) pdata->phy_if.phy_impl.kr_training_pre(pdata); /* Start KR training */ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL); reg |= XGBE_KR_TRAINING_ENABLE; reg |= XGBE_KR_TRAINING_START; XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg); if (pdata->phy_if.phy_impl.kr_training_post) pdata->phy_if.phy_impl.kr_training_post(pdata); return (XGBE_AN_PAGE_RECEIVED); } static enum xgbe_an xgbe_an73_tx_xnp(struct xgbe_prv_data *pdata, enum xgbe_rx *state) { uint16_t msg; *state = XGBE_RX_XNP; msg = XGBE_XNP_MCF_NULL_MESSAGE; msg |= XGBE_XNP_MP_FORMATTED; XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP + 2, 0); XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0); XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP, msg); return (XGBE_AN_PAGE_RECEIVED); } static enum xgbe_an xgbe_an73_rx_bpa(struct xgbe_prv_data *pdata, enum xgbe_rx *state) { unsigned int link_support; unsigned int reg, ad_reg, lp_reg; /* Read Base Ability register 2 first */ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1); /* Check for a supported mode, otherwise restart in a different one */ link_support = xgbe_in_kr_mode(pdata) ? 0x80 : 0x20; if (!(reg & link_support)) return (XGBE_AN_INCOMPAT_LINK); /* Check Extended Next Page support */ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE); lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA); return (((ad_reg & XGBE_XNP_NP_EXCHANGE) || (lp_reg & XGBE_XNP_NP_EXCHANGE)) ? xgbe_an73_tx_xnp(pdata, state) : xgbe_an73_tx_training(pdata, state)); } static enum xgbe_an xgbe_an73_rx_xnp(struct xgbe_prv_data *pdata, enum xgbe_rx *state) { unsigned int ad_reg, lp_reg; /* Check Extended Next Page support */ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_XNP); lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPX); return (((ad_reg & XGBE_XNP_NP_EXCHANGE) || (lp_reg & XGBE_XNP_NP_EXCHANGE)) ? xgbe_an73_tx_xnp(pdata, state) : xgbe_an73_tx_training(pdata, state)); } static enum xgbe_an xgbe_an73_page_received(struct xgbe_prv_data *pdata) { enum xgbe_rx *state; unsigned long an_timeout; enum xgbe_an ret; if (!pdata->an_start) { pdata->an_start = ticks; } else { an_timeout = pdata->an_start + ((uint64_t)XGBE_AN_MS_TIMEOUT * (uint64_t)hz) / 1000ull; if ((int)(ticks - an_timeout) > 0) { /* Auto-negotiation timed out, reset state */ pdata->kr_state = XGBE_RX_BPA; pdata->kx_state = XGBE_RX_BPA; pdata->an_start = ticks; axgbe_printf(2, "CL73 AN timed out, resetting state\n"); } } state = xgbe_in_kr_mode(pdata) ? &pdata->kr_state : &pdata->kx_state; switch (*state) { case XGBE_RX_BPA: ret = xgbe_an73_rx_bpa(pdata, state); break; case XGBE_RX_XNP: ret = xgbe_an73_rx_xnp(pdata, state); break; default: ret = XGBE_AN_ERROR; } return (ret); } static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata) { /* Be sure we aren't looping trying to negotiate */ if (xgbe_in_kr_mode(pdata)) { pdata->kr_state = XGBE_RX_ERROR; if (!(XGBE_ADV(&pdata->phy, 1000baseKX_Full)) && !(XGBE_ADV(&pdata->phy, 2500baseX_Full))) return (XGBE_AN_NO_LINK); if (pdata->kx_state != XGBE_RX_BPA) return (XGBE_AN_NO_LINK); } else { pdata->kx_state = XGBE_RX_ERROR; if (!(XGBE_ADV(&pdata->phy, 10000baseKR_Full))) return (XGBE_AN_NO_LINK); if (pdata->kr_state != XGBE_RX_BPA) return (XGBE_AN_NO_LINK); } xgbe_an_disable(pdata); xgbe_switch_mode(pdata); xgbe_an_restart(pdata); return (XGBE_AN_INCOMPAT_LINK); } static void xgbe_an37_isr(struct xgbe_prv_data *pdata) { unsigned int reg; /* Disable AN interrupts */ xgbe_an37_disable_interrupts(pdata); /* Save the interrupt(s) that fired */ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT); pdata->an_int = reg & XGBE_AN_CL37_INT_MASK; pdata->an_status = reg & ~XGBE_AN_CL37_INT_MASK; if (pdata->an_int) { /* Clear the interrupt(s) that fired and process them */ reg &= ~XGBE_AN_CL37_INT_MASK; XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT, reg); xgbe_an_state_machine(pdata); } else { /* Enable AN interrupts */ xgbe_an37_enable_interrupts(pdata); /* Reissue interrupt if status is not clear */ if (pdata->vdata->irq_reissue_support) XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 3); } } static void xgbe_an73_isr(struct xgbe_prv_data *pdata) { /* Disable AN interrupts */ xgbe_an73_disable_interrupts(pdata); /* Save the interrupt(s) that fired */ pdata->an_int = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_INT); if (pdata->an_int) { /* Clear the interrupt(s) that fired and process them */ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, ~pdata->an_int); xgbe_an_state_machine(pdata); } else { /* Enable AN interrupts */ xgbe_an73_enable_interrupts(pdata); /* Reissue interrupt if status is not clear */ if (pdata->vdata->irq_reissue_support) XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 3); } } static void xgbe_an_isr_task(unsigned long data) { struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data; axgbe_printf(2, "AN interrupt received\n"); switch (pdata->an_mode) { case XGBE_AN_MODE_CL73: case XGBE_AN_MODE_CL73_REDRV: xgbe_an73_isr(pdata); break; case XGBE_AN_MODE_CL37: case XGBE_AN_MODE_CL37_SGMII: xgbe_an37_isr(pdata); break; default: break; } } static void xgbe_an_combined_isr(struct xgbe_prv_data *pdata) { xgbe_an_isr_task((unsigned long)pdata); } static const char * xgbe_state_as_string(enum xgbe_an state) { switch (state) { case XGBE_AN_READY: return ("Ready"); case XGBE_AN_PAGE_RECEIVED: return ("Page-Received"); case XGBE_AN_INCOMPAT_LINK: return ("Incompatible-Link"); case XGBE_AN_COMPLETE: return ("Complete"); case XGBE_AN_NO_LINK: return ("No-Link"); case XGBE_AN_ERROR: return ("Error"); default: return ("Undefined"); } } static void xgbe_an37_state_machine(struct xgbe_prv_data *pdata) { enum xgbe_an cur_state = pdata->an_state; if (!pdata->an_int) return; if (pdata->an_int & XGBE_AN_CL37_INT_CMPLT) { pdata->an_state = XGBE_AN_COMPLETE; pdata->an_int &= ~XGBE_AN_CL37_INT_CMPLT; /* If SGMII is enabled, check the link status */ if ((pdata->an_mode == XGBE_AN_MODE_CL37_SGMII) && !(pdata->an_status & XGBE_SGMII_AN_LINK_STATUS)) pdata->an_state = XGBE_AN_NO_LINK; } axgbe_printf(2, "%s: CL37 AN %s\n", __func__, xgbe_state_as_string(pdata->an_state)); cur_state = pdata->an_state; switch (pdata->an_state) { case XGBE_AN_READY: break; case XGBE_AN_COMPLETE: axgbe_printf(2, "Auto negotiation successful\n"); break; case XGBE_AN_NO_LINK: break; default: pdata->an_state = XGBE_AN_ERROR; } if (pdata->an_state == XGBE_AN_ERROR) { axgbe_printf(2, "error during auto-negotiation, state=%u\n", cur_state); pdata->an_int = 0; xgbe_an37_clear_interrupts(pdata); } if (pdata->an_state >= XGBE_AN_COMPLETE) { pdata->an_result = pdata->an_state; pdata->an_state = XGBE_AN_READY; if (pdata->phy_if.phy_impl.an_post) pdata->phy_if.phy_impl.an_post(pdata); axgbe_printf(2, "CL37 AN result: %s\n", xgbe_state_as_string(pdata->an_result)); } axgbe_printf(2, "%s: an_state %d an_int %d an_mode %d an_status %d\n", __func__, pdata->an_state, pdata->an_int, pdata->an_mode, pdata->an_status); xgbe_an37_enable_interrupts(pdata); } static void xgbe_an73_state_machine(struct xgbe_prv_data *pdata) { enum xgbe_an cur_state = pdata->an_state; if (!pdata->an_int) goto out; next_int: if (pdata->an_int & XGBE_AN_CL73_PG_RCV) { pdata->an_state = XGBE_AN_PAGE_RECEIVED; pdata->an_int &= ~XGBE_AN_CL73_PG_RCV; } else if (pdata->an_int & XGBE_AN_CL73_INC_LINK) { pdata->an_state = XGBE_AN_INCOMPAT_LINK; pdata->an_int &= ~XGBE_AN_CL73_INC_LINK; } else if (pdata->an_int & XGBE_AN_CL73_INT_CMPLT) { pdata->an_state = XGBE_AN_COMPLETE; pdata->an_int &= ~XGBE_AN_CL73_INT_CMPLT; } else { pdata->an_state = XGBE_AN_ERROR; } again: axgbe_printf(2, "CL73 AN %s\n", xgbe_state_as_string(pdata->an_state)); cur_state = pdata->an_state; switch (pdata->an_state) { case XGBE_AN_READY: pdata->an_supported = 0; break; case XGBE_AN_PAGE_RECEIVED: pdata->an_state = xgbe_an73_page_received(pdata); pdata->an_supported++; break; case XGBE_AN_INCOMPAT_LINK: pdata->an_supported = 0; pdata->parallel_detect = 0; pdata->an_state = xgbe_an73_incompat_link(pdata); break; case XGBE_AN_COMPLETE: pdata->parallel_detect = pdata->an_supported ? 0 : 1; axgbe_printf(2, "%s successful\n", pdata->an_supported ? "Auto negotiation" : "Parallel detection"); break; case XGBE_AN_NO_LINK: break; default: pdata->an_state = XGBE_AN_ERROR; } if (pdata->an_state == XGBE_AN_NO_LINK) { pdata->an_int = 0; xgbe_an73_clear_interrupts(pdata); } else if (pdata->an_state == XGBE_AN_ERROR) { axgbe_printf(2, "error during auto-negotiation, state=%u\n", cur_state); pdata->an_int = 0; xgbe_an73_clear_interrupts(pdata); } if (pdata->an_state >= XGBE_AN_COMPLETE) { pdata->an_result = pdata->an_state; pdata->an_state = XGBE_AN_READY; pdata->kr_state = XGBE_RX_BPA; pdata->kx_state = XGBE_RX_BPA; pdata->an_start = 0; if (pdata->phy_if.phy_impl.an_post) pdata->phy_if.phy_impl.an_post(pdata); axgbe_printf(2, "CL73 AN result: %s\n", xgbe_state_as_string(pdata->an_result)); } if (cur_state != pdata->an_state) goto again; if (pdata->an_int) goto next_int; out: /* Enable AN interrupts on the way out */ xgbe_an73_enable_interrupts(pdata); } static void xgbe_an_state_machine(struct xgbe_prv_data *pdata) { sx_xlock(&pdata->an_mutex); switch (pdata->an_mode) { case XGBE_AN_MODE_CL73: case XGBE_AN_MODE_CL73_REDRV: xgbe_an73_state_machine(pdata); break; case XGBE_AN_MODE_CL37: case XGBE_AN_MODE_CL37_SGMII: xgbe_an37_state_machine(pdata); break; default: break; } /* Reissue interrupt if status is not clear */ if (pdata->vdata->irq_reissue_support) XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 3); sx_xunlock(&pdata->an_mutex); } static void xgbe_an37_init(struct xgbe_prv_data *pdata) { struct xgbe_phy local_phy; unsigned int reg; pdata->phy_if.phy_impl.an_advertising(pdata, &local_phy); axgbe_printf(2, "%s: advertising 0x%x\n", __func__, local_phy.advertising); /* Set up Advertisement register */ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_ADVERTISE); if (XGBE_ADV(&local_phy, Pause)) reg |= 0x100; else reg &= ~0x100; if (XGBE_ADV(&local_phy, Asym_Pause)) reg |= 0x80; else reg &= ~0x80; /* Full duplex, but not half */ reg |= XGBE_AN_CL37_FD_MASK; reg &= ~XGBE_AN_CL37_HD_MASK; axgbe_printf(2, "%s: Writing reg: 0x%x\n", __func__, reg); XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_ADVERTISE, reg); /* Set up the Control register */ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL); axgbe_printf(2, "%s: AN_ADVERTISE reg 0x%x an_mode %d\n", __func__, reg, pdata->an_mode); reg &= ~XGBE_AN_CL37_TX_CONFIG_MASK; reg &= ~XGBE_AN_CL37_PCS_MODE_MASK; switch (pdata->an_mode) { case XGBE_AN_MODE_CL37: reg |= XGBE_AN_CL37_PCS_MODE_BASEX; break; case XGBE_AN_MODE_CL37_SGMII: reg |= XGBE_AN_CL37_PCS_MODE_SGMII; break; default: break; } reg |= XGBE_AN_CL37_MII_CTRL_8BIT; axgbe_printf(2, "%s: Writing reg: 0x%x\n", __func__, reg); XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL, reg); axgbe_printf(2, "CL37 AN (%s) initialized\n", (pdata->an_mode == XGBE_AN_MODE_CL37) ? "BaseX" : "SGMII"); } static void xgbe_an73_init(struct xgbe_prv_data *pdata) { /* * This local_phy is needed because phy-v2 alters the * advertising flag variable. so phy-v1 an_advertising is just copying */ struct xgbe_phy local_phy; unsigned int reg; pdata->phy_if.phy_impl.an_advertising(pdata, &local_phy); /* Set up Advertisement register 3 first */ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); if (XGBE_ADV(&local_phy, 10000baseR_FEC)) reg |= 0xc000; else reg &= ~0xc000; XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, reg); /* Set up Advertisement register 2 next */ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1); if (XGBE_ADV(&local_phy, 10000baseKR_Full)) reg |= 0x80; else reg &= ~0x80; if (XGBE_ADV(&local_phy, 1000baseKX_Full) || XGBE_ADV(&local_phy, 2500baseX_Full)) reg |= 0x20; else reg &= ~0x20; XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, reg); /* Set up Advertisement register 1 last */ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE); if (XGBE_ADV(&local_phy, Pause)) reg |= 0x400; else reg &= ~0x400; if (XGBE_ADV(&local_phy, Asym_Pause)) reg |= 0x800; else reg &= ~0x800; /* We don't intend to perform XNP */ reg &= ~XGBE_XNP_NP_EXCHANGE; XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg); axgbe_printf(2, "CL73 AN initialized\n"); } static void xgbe_an_init(struct xgbe_prv_data *pdata) { /* Set up advertisement registers based on current settings */ pdata->an_mode = pdata->phy_if.phy_impl.an_mode(pdata); axgbe_printf(2, "%s: setting up an_mode %d\n", __func__, pdata->an_mode); switch (pdata->an_mode) { case XGBE_AN_MODE_CL73: case XGBE_AN_MODE_CL73_REDRV: xgbe_an73_init(pdata); break; case XGBE_AN_MODE_CL37: case XGBE_AN_MODE_CL37_SGMII: xgbe_an37_init(pdata); break; default: break; } } static const char * xgbe_phy_fc_string(struct xgbe_prv_data *pdata) { if (pdata->tx_pause && pdata->rx_pause) return ("rx/tx"); else if (pdata->rx_pause) return ("rx"); else if (pdata->tx_pause) return ("tx"); else return ("off"); } static const char * xgbe_phy_speed_string(int speed) { switch (speed) { case SPEED_100: return ("100Mbps"); case SPEED_1000: return ("1Gbps"); case SPEED_2500: return ("2.5Gbps"); case SPEED_10000: return ("10Gbps"); case SPEED_UNKNOWN: return ("Unknown"); default: return ("Unsupported"); } } static void xgbe_phy_print_status(struct xgbe_prv_data *pdata) { if (pdata->phy.link) axgbe_printf(0, "Link is UP - %s/%s - flow control %s\n", xgbe_phy_speed_string(pdata->phy.speed), pdata->phy.duplex == DUPLEX_FULL ? "Full" : "Half", xgbe_phy_fc_string(pdata)); else axgbe_printf(0, "Link is DOWN\n"); } static void xgbe_phy_adjust_link(struct xgbe_prv_data *pdata) { int new_state = 0; axgbe_printf(1, "link %d/%d tx %d/%d rx %d/%d speed %d/%d autoneg %d/%d\n", pdata->phy_link, pdata->phy.link, pdata->tx_pause, pdata->phy.tx_pause, pdata->rx_pause, pdata->phy.rx_pause, pdata->phy_speed, pdata->phy.speed, pdata->pause_autoneg, pdata->phy.pause_autoneg); if (pdata->phy.link) { /* Flow control support */ pdata->pause_autoneg = pdata->phy.pause_autoneg; if (pdata->tx_pause != pdata->phy.tx_pause) { new_state = 1; axgbe_printf(2, "tx pause %d/%d\n", pdata->tx_pause, pdata->phy.tx_pause); pdata->tx_pause = pdata->phy.tx_pause; pdata->hw_if.config_tx_flow_control(pdata); } if (pdata->rx_pause != pdata->phy.rx_pause) { new_state = 1; axgbe_printf(2, "rx pause %d/%d\n", pdata->rx_pause, pdata->phy.rx_pause); pdata->rx_pause = pdata->phy.rx_pause; pdata->hw_if.config_rx_flow_control(pdata); } /* Speed support */ if (pdata->phy_speed != pdata->phy.speed) { new_state = 1; pdata->phy_speed = pdata->phy.speed; } if (pdata->phy_link != pdata->phy.link) { new_state = 1; pdata->phy_link = pdata->phy.link; } } else if (pdata->phy_link) { new_state = 1; pdata->phy_link = 0; pdata->phy_speed = SPEED_UNKNOWN; } axgbe_printf(2, "phy_link %d Link %d new_state %d\n", pdata->phy_link, pdata->phy.link, new_state); if (new_state) xgbe_phy_print_status(pdata); } static bool xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed) { return (pdata->phy_if.phy_impl.valid_speed(pdata, speed)); } static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata) { enum xgbe_mode mode; axgbe_printf(2, "fixed PHY configuration\n"); /* Disable auto-negotiation */ xgbe_an_disable(pdata); /* Set specified mode for specified speed */ mode = pdata->phy_if.phy_impl.get_mode(pdata, pdata->phy.speed); switch (mode) { case XGBE_MODE_KX_1000: case XGBE_MODE_KX_2500: case XGBE_MODE_KR: case XGBE_MODE_SGMII_100: case XGBE_MODE_SGMII_1000: case XGBE_MODE_X: case XGBE_MODE_SFI: break; case XGBE_MODE_UNKNOWN: default: return (-EINVAL); } /* Validate duplex mode */ if (pdata->phy.duplex != DUPLEX_FULL) return (-EINVAL); xgbe_set_mode(pdata, mode); return (0); } static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata, bool set_mode) { int ret; - unsigned int reg; + unsigned int reg = 0; sx_xlock(&pdata->an_mutex); set_bit(XGBE_LINK_INIT, &pdata->dev_state); pdata->link_check = ticks; ret = pdata->phy_if.phy_impl.an_config(pdata); if (ret) { axgbe_error("%s: an_config fail %d\n", __func__, ret); goto out; } if (pdata->phy.autoneg != AUTONEG_ENABLE) { ret = xgbe_phy_config_fixed(pdata); if (ret || !pdata->kr_redrv) { if (ret) axgbe_error("%s: fix conf fail %d\n", __func__, ret); goto out; } axgbe_printf(2, "AN redriver support\n"); } else axgbe_printf(2, "AN PHY configuration\n"); /* Disable auto-negotiation interrupt */ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0); reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK); axgbe_printf(2, "%s: set_mode %d AN int reg value 0x%x\n", __func__, set_mode, reg); /* Clear any auto-negotitation interrupts */ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0); /* Start auto-negotiation in a supported mode */ if (set_mode) { /* Start auto-negotiation in a supported mode */ if (xgbe_use_mode(pdata, XGBE_MODE_KR)) { xgbe_set_mode(pdata, XGBE_MODE_KR); } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_2500)) { xgbe_set_mode(pdata, XGBE_MODE_KX_2500); } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_1000)) { xgbe_set_mode(pdata, XGBE_MODE_KX_1000); } else if (xgbe_use_mode(pdata, XGBE_MODE_SFI)) { xgbe_set_mode(pdata, XGBE_MODE_SFI); } else if (xgbe_use_mode(pdata, XGBE_MODE_X)) { xgbe_set_mode(pdata, XGBE_MODE_X); } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_1000)) { xgbe_set_mode(pdata, XGBE_MODE_SGMII_1000); } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_100)) { xgbe_set_mode(pdata, XGBE_MODE_SGMII_100); } else { XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07); ret = -EINVAL; goto out; } } /* Disable and stop any in progress auto-negotiation */ xgbe_an_disable_all(pdata); /* Clear any auto-negotitation interrupts */ xgbe_an_clear_interrupts_all(pdata); pdata->an_result = XGBE_AN_READY; pdata->an_state = XGBE_AN_READY; pdata->kr_state = XGBE_RX_BPA; pdata->kx_state = XGBE_RX_BPA; /* Re-enable auto-negotiation interrupt */ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07); reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK); /* Set up advertisement registers based on current settings */ xgbe_an_init(pdata); /* Enable and start auto-negotiation */ xgbe_an_restart(pdata); out: if (ret) { axgbe_printf(0, "%s: set_mode %d AN int reg value 0x%x ret value %d\n", __func__, set_mode, reg, ret); set_bit(XGBE_LINK_ERR, &pdata->dev_state); } else clear_bit(XGBE_LINK_ERR, &pdata->dev_state); sx_unlock(&pdata->an_mutex); return (ret); } static int xgbe_phy_config_aneg(struct xgbe_prv_data *pdata) { return (__xgbe_phy_config_aneg(pdata, true)); } static int xgbe_phy_reconfig_aneg(struct xgbe_prv_data *pdata) { return (__xgbe_phy_config_aneg(pdata, false)); } static bool xgbe_phy_aneg_done(struct xgbe_prv_data *pdata) { return (pdata->an_result == XGBE_AN_COMPLETE); } static void xgbe_check_link_timeout(struct xgbe_prv_data *pdata) { unsigned long link_timeout; link_timeout = pdata->link_check + (XGBE_LINK_TIMEOUT * hz); if ((int)(ticks - link_timeout) > 0) { axgbe_printf(2, "AN link timeout\n"); xgbe_phy_config_aneg(pdata); } } static enum xgbe_mode xgbe_phy_status_aneg(struct xgbe_prv_data *pdata) { return (pdata->phy_if.phy_impl.an_outcome(pdata)); } static void xgbe_phy_status_result(struct xgbe_prv_data *pdata) { enum xgbe_mode mode; XGBE_ZERO_LP_ADV(&pdata->phy); if ((pdata->phy.autoneg != AUTONEG_ENABLE) || pdata->parallel_detect) mode = xgbe_cur_mode(pdata); else mode = xgbe_phy_status_aneg(pdata); axgbe_printf(3, "%s: xgbe mode %d\n", __func__, mode); switch (mode) { case XGBE_MODE_SGMII_100: pdata->phy.speed = SPEED_100; break; case XGBE_MODE_X: case XGBE_MODE_KX_1000: case XGBE_MODE_SGMII_1000: pdata->phy.speed = SPEED_1000; break; case XGBE_MODE_KX_2500: pdata->phy.speed = SPEED_2500; break; case XGBE_MODE_KR: case XGBE_MODE_SFI: pdata->phy.speed = SPEED_10000; break; case XGBE_MODE_UNKNOWN: default: axgbe_printf(1, "%s: unknown mode\n", __func__); pdata->phy.speed = SPEED_UNKNOWN; } pdata->phy.duplex = DUPLEX_FULL; axgbe_printf(2, "%s: speed %d duplex %d\n", __func__, pdata->phy.speed, pdata->phy.duplex); if (xgbe_set_mode(pdata, mode) && pdata->an_again) xgbe_phy_reconfig_aneg(pdata); } static void xgbe_phy_status(struct xgbe_prv_data *pdata) { bool link_aneg; int an_restart; if (test_bit(XGBE_LINK_ERR, &pdata->dev_state)) { axgbe_error("%s: LINK_ERR\n", __func__); pdata->phy.link = 0; goto adjust_link; } link_aneg = (pdata->phy.autoneg == AUTONEG_ENABLE); axgbe_printf(3, "link_aneg - %d\n", link_aneg); /* Get the link status. Link status is latched low, so read * once to clear and then read again to get current state */ pdata->phy.link = pdata->phy_if.phy_impl.link_status(pdata, &an_restart); axgbe_printf(1, "link_status returned Link:%d an_restart:%d aneg:%d\n", pdata->phy.link, an_restart, link_aneg); if (an_restart) { xgbe_phy_config_aneg(pdata); return; } if (pdata->phy.link) { axgbe_printf(2, "Link Active\n"); if (link_aneg && !xgbe_phy_aneg_done(pdata)) { axgbe_printf(1, "phy_link set check timeout\n"); xgbe_check_link_timeout(pdata); return; } axgbe_printf(2, "%s: Link write phy_status result\n", __func__); xgbe_phy_status_result(pdata); if (test_bit(XGBE_LINK_INIT, &pdata->dev_state)) clear_bit(XGBE_LINK_INIT, &pdata->dev_state); } else { axgbe_printf(2, "Link Deactive\n"); if (test_bit(XGBE_LINK_INIT, &pdata->dev_state)) { axgbe_printf(1, "phy_link not set check timeout\n"); xgbe_check_link_timeout(pdata); if (link_aneg) { axgbe_printf(2, "link_aneg case\n"); return; } } xgbe_phy_status_result(pdata); } adjust_link: axgbe_printf(2, "%s: Link %d\n", __func__, pdata->phy.link); xgbe_phy_adjust_link(pdata); } static void xgbe_phy_stop(struct xgbe_prv_data *pdata) { axgbe_printf(2, "stopping PHY\n"); if (!pdata->phy_started) return; /* Indicate the PHY is down */ pdata->phy_started = 0; /* Disable auto-negotiation */ xgbe_an_disable_all(pdata); pdata->phy_if.phy_impl.stop(pdata); pdata->phy.link = 0; xgbe_phy_adjust_link(pdata); } static int xgbe_phy_start(struct xgbe_prv_data *pdata) { int ret; DBGPR("-->xgbe_phy_start\n"); ret = pdata->phy_if.phy_impl.start(pdata); if (ret) { axgbe_error("%s: impl start ret %d\n", __func__, ret); return (ret); } /* Set initial mode - call the mode setting routines * directly to insure we are properly configured */ if (xgbe_use_mode(pdata, XGBE_MODE_KR)) { axgbe_printf(2, "%s: KR\n", __func__); xgbe_kr_mode(pdata); } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_2500)) { axgbe_printf(2, "%s: KX 2500\n", __func__); xgbe_kx_2500_mode(pdata); } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_1000)) { axgbe_printf(2, "%s: KX 1000\n", __func__); xgbe_kx_1000_mode(pdata); } else if (xgbe_use_mode(pdata, XGBE_MODE_SFI)) { axgbe_printf(2, "%s: SFI\n", __func__); xgbe_sfi_mode(pdata); } else if (xgbe_use_mode(pdata, XGBE_MODE_X)) { axgbe_printf(2, "%s: X\n", __func__); xgbe_x_mode(pdata); } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_1000)) { axgbe_printf(2, "%s: SGMII 1000\n", __func__); xgbe_sgmii_1000_mode(pdata); } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_100)) { axgbe_printf(2, "%s: SGMII 100\n", __func__); xgbe_sgmii_100_mode(pdata); } else { axgbe_error("%s: invalid mode\n", __func__); ret = -EINVAL; goto err_stop; } /* Indicate the PHY is up and running */ pdata->phy_started = 1; /* Set up advertisement registers based on current settings */ xgbe_an_init(pdata); /* Enable auto-negotiation interrupts */ xgbe_an_enable_interrupts(pdata); ret = xgbe_phy_config_aneg(pdata); if (ret) axgbe_error("%s: phy_config_aneg %d\n", __func__, ret); return (ret); err_stop: pdata->phy_if.phy_impl.stop(pdata); return (ret); } static int xgbe_phy_reset(struct xgbe_prv_data *pdata) { int ret; ret = pdata->phy_if.phy_impl.reset(pdata); if (ret) { axgbe_error("%s: impl phy reset %d\n", __func__, ret); return (ret); } /* Disable auto-negotiation for now */ xgbe_an_disable_all(pdata); /* Clear auto-negotiation interrupts */ xgbe_an_clear_interrupts_all(pdata); return (0); } static int xgbe_phy_best_advertised_speed(struct xgbe_prv_data *pdata) { if (XGBE_ADV(&pdata->phy, 10000baseKR_Full)) return (SPEED_10000); else if (XGBE_ADV(&pdata->phy, 10000baseT_Full)) return (SPEED_10000); else if (XGBE_ADV(&pdata->phy, 2500baseX_Full)) return (SPEED_2500); else if (XGBE_ADV(&pdata->phy, 2500baseT_Full)) return (SPEED_2500); else if (XGBE_ADV(&pdata->phy, 1000baseKX_Full)) return (SPEED_1000); else if (XGBE_ADV(&pdata->phy, 1000baseT_Full)) return (SPEED_1000); else if (XGBE_ADV(&pdata->phy, 100baseT_Full)) return (SPEED_100); return (SPEED_UNKNOWN); } static void xgbe_phy_exit(struct xgbe_prv_data *pdata) { pdata->phy_if.phy_impl.exit(pdata); } static int xgbe_phy_init(struct xgbe_prv_data *pdata) { int ret = 0; DBGPR("-->xgbe_phy_init\n"); sx_init(&pdata->an_mutex, "axgbe AN lock"); pdata->mdio_mmd = MDIO_MMD_PCS; /* Initialize supported features */ pdata->fec_ability = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECABLE); pdata->fec_ability &= (MDIO_PMA_10GBR_FECABLE_ABLE | MDIO_PMA_10GBR_FECABLE_ERRABLE); /* Setup the phy (including supported features) */ ret = pdata->phy_if.phy_impl.init(pdata); if (ret) return (ret); /* Copy supported link modes to advertising link modes */ XGBE_LM_COPY(&pdata->phy, advertising, &pdata->phy, supported); pdata->phy.address = 0; if (XGBE_ADV(&pdata->phy, Autoneg)) { pdata->phy.autoneg = AUTONEG_ENABLE; pdata->phy.speed = SPEED_UNKNOWN; pdata->phy.duplex = DUPLEX_UNKNOWN; } else { pdata->phy.autoneg = AUTONEG_DISABLE; pdata->phy.speed = xgbe_phy_best_advertised_speed(pdata); pdata->phy.duplex = DUPLEX_FULL; } pdata->phy.link = 0; pdata->phy.pause_autoneg = pdata->pause_autoneg; pdata->phy.tx_pause = pdata->tx_pause; pdata->phy.rx_pause = pdata->rx_pause; /* Fix up Flow Control advertising */ XGBE_CLR_ADV(&pdata->phy, Pause); XGBE_CLR_ADV(&pdata->phy, Asym_Pause); if (pdata->rx_pause) { XGBE_SET_ADV(&pdata->phy, Pause); XGBE_SET_ADV(&pdata->phy, Asym_Pause); } if (pdata->tx_pause) { if (XGBE_ADV(&pdata->phy, Asym_Pause)) XGBE_CLR_ADV(&pdata->phy, Asym_Pause); else XGBE_SET_ADV(&pdata->phy, Asym_Pause); } return (0); } void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *phy_if) { phy_if->phy_init = xgbe_phy_init; phy_if->phy_exit = xgbe_phy_exit; phy_if->phy_reset = xgbe_phy_reset; phy_if->phy_start = xgbe_phy_start; phy_if->phy_stop = xgbe_phy_stop; phy_if->phy_status = xgbe_phy_status; phy_if->phy_config_aneg = xgbe_phy_config_aneg; phy_if->phy_valid_speed = xgbe_phy_valid_speed; phy_if->an_isr = xgbe_an_combined_isr; } Index: head/sys/dev/axgbe/xgbe-phy-v2.c =================================================================== --- head/sys/dev/axgbe/xgbe-phy-v2.c (revision 368304) +++ head/sys/dev/axgbe/xgbe-phy-v2.c (revision 368305) @@ -1,3771 +1,3771 @@ /* * AMD 10Gb Ethernet driver * * Copyright (c) 2020 Advanced Micro Devices, Inc. * * This file is available to you under your choice of the following two * licenses: * * License 1: GPLv2 * * This file is free software; you may copy, redistribute and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or (at * your option) any later version. * * This file is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . * * This file incorporates work covered by the following copyright and * permission notice: * The Synopsys DWC ETHER XGMAC Software Driver and documentation * (hereinafter "Software") is an unsupported proprietary work of Synopsys, * Inc. unless otherwise expressly agreed to in writing between Synopsys * and you. * * The Software IS NOT an item of Licensed Software or Licensed Product * under any End User Software License Agreement or Agreement for Licensed * Product with Synopsys or any supplement thereto. Permission is hereby * granted, free of charge, to any person obtaining a copy of this software * annotated with this license and the Software, to deal in the Software * without restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished * to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * * License 2: Modified BSD * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Advanced Micro Devices, Inc. nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * This file incorporates work covered by the following copyright and * permission notice: * The Synopsys DWC ETHER XGMAC Software Driver and documentation * (hereinafter "Software") is an unsupported proprietary work of Synopsys, * Inc. unless otherwise expressly agreed to in writing between Synopsys * and you. * * The Software IS NOT an item of Licensed Software or Licensed Product * under any End User Software License Agreement or Agreement for Licensed * Product with Synopsys or any supplement thereto. Permission is hereby * granted, free of charge, to any person obtaining a copy of this software * annotated with this license and the Software, to deal in the Software * without restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished * to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "xgbe.h" #include "xgbe-common.h" struct mtx xgbe_phy_comm_lock; #define XGBE_PHY_PORT_SPEED_100 BIT(0) #define XGBE_PHY_PORT_SPEED_1000 BIT(1) #define XGBE_PHY_PORT_SPEED_2500 BIT(2) #define XGBE_PHY_PORT_SPEED_10000 BIT(3) #define XGBE_MUTEX_RELEASE 0x80000000 #define XGBE_SFP_DIRECT 7 #define GPIO_MASK_WIDTH 4 /* I2C target addresses */ #define XGBE_SFP_SERIAL_ID_ADDRESS 0x50 #define XGBE_SFP_DIAG_INFO_ADDRESS 0x51 #define XGBE_SFP_PHY_ADDRESS 0x56 #define XGBE_GPIO_ADDRESS_PCA9555 0x20 /* SFP sideband signal indicators */ #define XGBE_GPIO_NO_TX_FAULT BIT(0) #define XGBE_GPIO_NO_RATE_SELECT BIT(1) #define XGBE_GPIO_NO_MOD_ABSENT BIT(2) #define XGBE_GPIO_NO_RX_LOS BIT(3) /* Rate-change complete wait/retry count */ #define XGBE_RATECHANGE_COUNT 500 /* CDR delay values for KR support (in usec) */ #define XGBE_CDR_DELAY_INIT 10000 #define XGBE_CDR_DELAY_INC 10000 #define XGBE_CDR_DELAY_MAX 100000 /* RRC frequency during link status check */ #define XGBE_RRC_FREQUENCY 10 enum xgbe_port_mode { XGBE_PORT_MODE_RSVD = 0, XGBE_PORT_MODE_BACKPLANE, XGBE_PORT_MODE_BACKPLANE_2500, XGBE_PORT_MODE_1000BASE_T, XGBE_PORT_MODE_1000BASE_X, XGBE_PORT_MODE_NBASE_T, XGBE_PORT_MODE_10GBASE_T, XGBE_PORT_MODE_10GBASE_R, XGBE_PORT_MODE_SFP, XGBE_PORT_MODE_MAX, }; enum xgbe_conn_type { XGBE_CONN_TYPE_NONE = 0, XGBE_CONN_TYPE_SFP, XGBE_CONN_TYPE_MDIO, XGBE_CONN_TYPE_RSVD1, XGBE_CONN_TYPE_BACKPLANE, XGBE_CONN_TYPE_MAX, }; /* SFP/SFP+ related definitions */ enum xgbe_sfp_comm { XGBE_SFP_COMM_DIRECT = 0, XGBE_SFP_COMM_PCA9545, }; enum xgbe_sfp_cable { XGBE_SFP_CABLE_UNKNOWN = 0, XGBE_SFP_CABLE_ACTIVE, XGBE_SFP_CABLE_PASSIVE, }; enum xgbe_sfp_base { XGBE_SFP_BASE_UNKNOWN = 0, XGBE_SFP_BASE_1000_T, XGBE_SFP_BASE_1000_SX, XGBE_SFP_BASE_1000_LX, XGBE_SFP_BASE_1000_CX, XGBE_SFP_BASE_10000_SR, XGBE_SFP_BASE_10000_LR, XGBE_SFP_BASE_10000_LRM, XGBE_SFP_BASE_10000_ER, XGBE_SFP_BASE_10000_CR, }; enum xgbe_sfp_speed { XGBE_SFP_SPEED_UNKNOWN = 0, XGBE_SFP_SPEED_100_1000, XGBE_SFP_SPEED_1000, XGBE_SFP_SPEED_10000, }; /* SFP Serial ID Base ID values relative to an offset of 0 */ #define XGBE_SFP_BASE_ID 0 #define XGBE_SFP_ID_SFP 0x03 #define XGBE_SFP_BASE_EXT_ID 1 #define XGBE_SFP_EXT_ID_SFP 0x04 #define XGBE_SFP_BASE_10GBE_CC 3 #define XGBE_SFP_BASE_10GBE_CC_SR BIT(4) #define XGBE_SFP_BASE_10GBE_CC_LR BIT(5) #define XGBE_SFP_BASE_10GBE_CC_LRM BIT(6) #define XGBE_SFP_BASE_10GBE_CC_ER BIT(7) #define XGBE_SFP_BASE_1GBE_CC 6 #define XGBE_SFP_BASE_1GBE_CC_SX BIT(0) #define XGBE_SFP_BASE_1GBE_CC_LX BIT(1) #define XGBE_SFP_BASE_1GBE_CC_CX BIT(2) #define XGBE_SFP_BASE_1GBE_CC_T BIT(3) #define XGBE_SFP_BASE_CABLE 8 #define XGBE_SFP_BASE_CABLE_PASSIVE BIT(2) #define XGBE_SFP_BASE_CABLE_ACTIVE BIT(3) #define XGBE_SFP_BASE_BR 12 #define XGBE_SFP_BASE_BR_1GBE_MIN 0x0a #define XGBE_SFP_BASE_BR_1GBE_MAX 0x0d #define XGBE_SFP_BASE_BR_10GBE_MIN 0x64 #define XGBE_SFP_BASE_BR_10GBE_MAX 0x68 #define XGBE_SFP_BASE_CU_CABLE_LEN 18 #define XGBE_SFP_BASE_VENDOR_NAME 20 #define XGBE_SFP_BASE_VENDOR_NAME_LEN 16 #define XGBE_SFP_BASE_VENDOR_PN 40 #define XGBE_SFP_BASE_VENDOR_PN_LEN 16 #define XGBE_SFP_BASE_VENDOR_REV 56 #define XGBE_SFP_BASE_VENDOR_REV_LEN 4 #define XGBE_SFP_BASE_CC 63 /* SFP Serial ID Extended ID values relative to an offset of 64 */ #define XGBE_SFP_BASE_VENDOR_SN 4 #define XGBE_SFP_BASE_VENDOR_SN_LEN 16 #define XGBE_SFP_EXTD_OPT1 1 #define XGBE_SFP_EXTD_OPT1_RX_LOS BIT(1) #define XGBE_SFP_EXTD_OPT1_TX_FAULT BIT(3) #define XGBE_SFP_EXTD_DIAG 28 #define XGBE_SFP_EXTD_DIAG_ADDR_CHANGE BIT(2) #define XGBE_SFP_EXTD_SFF_8472 30 #define XGBE_SFP_EXTD_CC 31 struct xgbe_sfp_eeprom { uint8_t base[64]; uint8_t extd[32]; uint8_t vendor[32]; }; #define XGBE_SFP_DIAGS_SUPPORTED(_x) \ ((_x)->extd[XGBE_SFP_EXTD_SFF_8472] && \ !((_x)->extd[XGBE_SFP_EXTD_DIAG] & XGBE_SFP_EXTD_DIAG_ADDR_CHANGE)) #define XGBE_SFP_EEPROM_BASE_LEN 256 #define XGBE_SFP_EEPROM_DIAG_LEN 256 #define XGBE_SFP_EEPROM_MAX (XGBE_SFP_EEPROM_BASE_LEN + \ XGBE_SFP_EEPROM_DIAG_LEN) #define XGBE_BEL_FUSE_VENDOR "BEL-FUSE " #define XGBE_BEL_FUSE_PARTNO "1GBT-SFP06 " struct xgbe_sfp_ascii { union { char vendor[XGBE_SFP_BASE_VENDOR_NAME_LEN + 1]; char partno[XGBE_SFP_BASE_VENDOR_PN_LEN + 1]; char rev[XGBE_SFP_BASE_VENDOR_REV_LEN + 1]; char serno[XGBE_SFP_BASE_VENDOR_SN_LEN + 1]; } u; }; /* MDIO PHY reset types */ enum xgbe_mdio_reset { XGBE_MDIO_RESET_NONE = 0, XGBE_MDIO_RESET_I2C_GPIO, XGBE_MDIO_RESET_INT_GPIO, XGBE_MDIO_RESET_MAX, }; /* Re-driver related definitions */ enum xgbe_phy_redrv_if { XGBE_PHY_REDRV_IF_MDIO = 0, XGBE_PHY_REDRV_IF_I2C, XGBE_PHY_REDRV_IF_MAX, }; enum xgbe_phy_redrv_model { XGBE_PHY_REDRV_MODEL_4223 = 0, XGBE_PHY_REDRV_MODEL_4227, XGBE_PHY_REDRV_MODEL_MAX, }; enum xgbe_phy_redrv_mode { XGBE_PHY_REDRV_MODE_CX = 5, XGBE_PHY_REDRV_MODE_SR = 9, }; #define XGBE_PHY_REDRV_MODE_REG 0x12b0 /* PHY related configuration information */ struct xgbe_phy_data { enum xgbe_port_mode port_mode; unsigned int port_id; unsigned int port_speeds; enum xgbe_conn_type conn_type; enum xgbe_mode cur_mode; enum xgbe_mode start_mode; unsigned int rrc_count; unsigned int mdio_addr; /* SFP Support */ enum xgbe_sfp_comm sfp_comm; unsigned int sfp_mux_address; unsigned int sfp_mux_channel; unsigned int sfp_gpio_address; unsigned int sfp_gpio_mask; unsigned int sfp_gpio_inputs; unsigned int sfp_gpio_rx_los; unsigned int sfp_gpio_tx_fault; unsigned int sfp_gpio_mod_absent; unsigned int sfp_gpio_rate_select; unsigned int sfp_rx_los; unsigned int sfp_tx_fault; unsigned int sfp_mod_absent; unsigned int sfp_changed; unsigned int sfp_phy_avail; unsigned int sfp_cable_len; enum xgbe_sfp_base sfp_base; enum xgbe_sfp_cable sfp_cable; enum xgbe_sfp_speed sfp_speed; struct xgbe_sfp_eeprom sfp_eeprom; /* External PHY support */ enum xgbe_mdio_mode phydev_mode; uint32_t phy_id; int phydev; enum xgbe_mdio_reset mdio_reset; unsigned int mdio_reset_addr; unsigned int mdio_reset_gpio; /* Re-driver support */ unsigned int redrv; unsigned int redrv_if; unsigned int redrv_addr; unsigned int redrv_lane; unsigned int redrv_model; /* KR AN support */ unsigned int phy_cdr_notrack; unsigned int phy_cdr_delay; uint8_t port_sfp_inputs; }; static enum xgbe_an_mode xgbe_phy_an_mode(struct xgbe_prv_data *pdata); static int xgbe_phy_i2c_xfer(struct xgbe_prv_data *pdata, struct xgbe_i2c_op *i2c_op) { return (pdata->i2c_if.i2c_xfer(pdata, i2c_op)); } static int xgbe_phy_redrv_write(struct xgbe_prv_data *pdata, unsigned int reg, unsigned int val) { struct xgbe_phy_data *phy_data = pdata->phy_data; struct xgbe_i2c_op i2c_op; __be16 *redrv_val; uint8_t redrv_data[5], csum; unsigned int i, retry; int ret; /* High byte of register contains read/write indicator */ redrv_data[0] = ((reg >> 8) & 0xff) << 1; redrv_data[1] = reg & 0xff; redrv_val = (__be16 *)&redrv_data[2]; *redrv_val = cpu_to_be16(val); /* Calculate 1 byte checksum */ csum = 0; for (i = 0; i < 4; i++) { csum += redrv_data[i]; if (redrv_data[i] > csum) csum++; } redrv_data[4] = ~csum; retry = 1; again1: i2c_op.cmd = XGBE_I2C_CMD_WRITE; i2c_op.target = phy_data->redrv_addr; i2c_op.len = sizeof(redrv_data); i2c_op.buf = redrv_data; ret = xgbe_phy_i2c_xfer(pdata, &i2c_op); if (ret) { if ((ret == -EAGAIN) && retry--) goto again1; return (ret); } retry = 1; again2: i2c_op.cmd = XGBE_I2C_CMD_READ; i2c_op.target = phy_data->redrv_addr; i2c_op.len = 1; i2c_op.buf = redrv_data; ret = xgbe_phy_i2c_xfer(pdata, &i2c_op); if (ret) { if ((ret == -EAGAIN) && retry--) goto again2; return (ret); } if (redrv_data[0] != 0xff) { axgbe_error("Redriver write checksum error\n"); ret = -EIO; } return (ret); } static int xgbe_phy_i2c_write(struct xgbe_prv_data *pdata, unsigned int target, void *val, unsigned int val_len) { struct xgbe_i2c_op i2c_op; int retry, ret; retry = 1; again: /* Write the specfied register */ i2c_op.cmd = XGBE_I2C_CMD_WRITE; i2c_op.target = target; i2c_op.len = val_len; i2c_op.buf = val; ret = xgbe_phy_i2c_xfer(pdata, &i2c_op); if ((ret == -EAGAIN) && retry--) goto again; return (ret); } static int xgbe_phy_i2c_read(struct xgbe_prv_data *pdata, unsigned int target, void *reg, unsigned int reg_len, void *val, unsigned int val_len) { struct xgbe_i2c_op i2c_op; int retry, ret; axgbe_printf(3, "%s: target 0x%x reg_len %d val_len %d\n", __func__, target, reg_len, val_len); retry = 1; again1: /* Set the specified register to read */ i2c_op.cmd = XGBE_I2C_CMD_WRITE; i2c_op.target = target; i2c_op.len = reg_len; i2c_op.buf = reg; ret = xgbe_phy_i2c_xfer(pdata, &i2c_op); axgbe_printf(3, "%s: ret1 %d retry %d\n", __func__, ret, retry); if (ret) { if ((ret == -EAGAIN) && retry--) goto again1; return (ret); } retry = 1; again2: /* Read the specfied register */ i2c_op.cmd = XGBE_I2C_CMD_READ; i2c_op.target = target; i2c_op.len = val_len; i2c_op.buf = val; ret = xgbe_phy_i2c_xfer(pdata, &i2c_op); axgbe_printf(3, "%s: ret2 %d retry %d\n", __func__, ret, retry); if ((ret == -EAGAIN) && retry--) goto again2; return (ret); } static int xgbe_phy_sfp_put_mux(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; struct xgbe_i2c_op i2c_op; uint8_t mux_channel; if (phy_data->sfp_comm == XGBE_SFP_COMM_DIRECT) return (0); /* Select no mux channels */ mux_channel = 0; i2c_op.cmd = XGBE_I2C_CMD_WRITE; i2c_op.target = phy_data->sfp_mux_address; i2c_op.len = sizeof(mux_channel); i2c_op.buf = &mux_channel; return (xgbe_phy_i2c_xfer(pdata, &i2c_op)); } static int xgbe_phy_sfp_get_mux(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; struct xgbe_i2c_op i2c_op; uint8_t mux_channel; if (phy_data->sfp_comm == XGBE_SFP_COMM_DIRECT) return (0); /* Select desired mux channel */ mux_channel = 1 << phy_data->sfp_mux_channel; i2c_op.cmd = XGBE_I2C_CMD_WRITE; i2c_op.target = phy_data->sfp_mux_address; i2c_op.len = sizeof(mux_channel); i2c_op.buf = &mux_channel; return (xgbe_phy_i2c_xfer(pdata, &i2c_op)); } static void xgbe_phy_put_comm_ownership(struct xgbe_prv_data *pdata) { mtx_unlock(&xgbe_phy_comm_lock); } static int xgbe_phy_get_comm_ownership(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; unsigned long timeout; unsigned int mutex_id; /* The I2C and MDIO/GPIO bus is multiplexed between multiple devices, * the driver needs to take the software mutex and then the hardware * mutexes before being able to use the busses. */ mtx_lock(&xgbe_phy_comm_lock); /* Clear the mutexes */ XP_IOWRITE(pdata, XP_I2C_MUTEX, XGBE_MUTEX_RELEASE); XP_IOWRITE(pdata, XP_MDIO_MUTEX, XGBE_MUTEX_RELEASE); /* Mutex formats are the same for I2C and MDIO/GPIO */ mutex_id = 0; XP_SET_BITS(mutex_id, XP_I2C_MUTEX, ID, phy_data->port_id); XP_SET_BITS(mutex_id, XP_I2C_MUTEX, ACTIVE, 1); timeout = ticks + (5 * hz); while (ticks < timeout) { /* Must be all zeroes in order to obtain the mutex */ if (XP_IOREAD(pdata, XP_I2C_MUTEX) || XP_IOREAD(pdata, XP_MDIO_MUTEX)) { DELAY(200); continue; } /* Obtain the mutex */ XP_IOWRITE(pdata, XP_I2C_MUTEX, mutex_id); XP_IOWRITE(pdata, XP_MDIO_MUTEX, mutex_id); return (0); } mtx_unlock(&xgbe_phy_comm_lock); axgbe_error("unable to obtain hardware mutexes\n"); return (-ETIMEDOUT); } static int xgbe_phy_mdio_mii_write(struct xgbe_prv_data *pdata, int addr, int reg, uint16_t val) { struct xgbe_phy_data *phy_data = pdata->phy_data; if (reg & MII_ADDR_C45) { if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL45) return (-ENOTSUP); } else { if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL22) return (-ENOTSUP); } return (pdata->hw_if.write_ext_mii_regs(pdata, addr, reg, val)); } static int xgbe_phy_i2c_mii_write(struct xgbe_prv_data *pdata, int reg, uint16_t val) { __be16 *mii_val; uint8_t mii_data[3]; int ret; ret = xgbe_phy_sfp_get_mux(pdata); if (ret) return (ret); mii_data[0] = reg & 0xff; mii_val = (__be16 *)&mii_data[1]; *mii_val = cpu_to_be16(val); ret = xgbe_phy_i2c_write(pdata, XGBE_SFP_PHY_ADDRESS, mii_data, sizeof(mii_data)); xgbe_phy_sfp_put_mux(pdata); return (ret); } int xgbe_phy_mii_write(struct xgbe_prv_data *pdata, int addr, int reg, uint16_t val) { struct xgbe_phy_data *phy_data = pdata->phy_data; int ret; axgbe_printf(3, "%s: addr %d reg %d val %#x\n", __func__, addr, reg, val); ret = xgbe_phy_get_comm_ownership(pdata); if (ret) return (ret); if (phy_data->conn_type == XGBE_CONN_TYPE_SFP) ret = xgbe_phy_i2c_mii_write(pdata, reg, val); else if (phy_data->conn_type & XGBE_CONN_TYPE_MDIO) ret = xgbe_phy_mdio_mii_write(pdata, addr, reg, val); else ret = -ENOTSUP; xgbe_phy_put_comm_ownership(pdata); return (ret); } static int xgbe_phy_mdio_mii_read(struct xgbe_prv_data *pdata, int addr, int reg) { struct xgbe_phy_data *phy_data = pdata->phy_data; if (reg & MII_ADDR_C45) { if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL45) return (-ENOTSUP); } else { if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL22) return (-ENOTSUP); } return (pdata->hw_if.read_ext_mii_regs(pdata, addr, reg)); } static int xgbe_phy_i2c_mii_read(struct xgbe_prv_data *pdata, int reg) { __be16 mii_val; uint8_t mii_reg; int ret; ret = xgbe_phy_sfp_get_mux(pdata); if (ret) return (ret); mii_reg = reg; ret = xgbe_phy_i2c_read(pdata, XGBE_SFP_PHY_ADDRESS, &mii_reg, sizeof(mii_reg), &mii_val, sizeof(mii_val)); if (!ret) ret = be16_to_cpu(mii_val); xgbe_phy_sfp_put_mux(pdata); return (ret); } int xgbe_phy_mii_read(struct xgbe_prv_data *pdata, int addr, int reg) { struct xgbe_phy_data *phy_data = pdata->phy_data; int ret; axgbe_printf(3, "%s: addr %d reg %d\n", __func__, addr, reg); ret = xgbe_phy_get_comm_ownership(pdata); if (ret) return (ret); if (phy_data->conn_type == XGBE_CONN_TYPE_SFP) ret = xgbe_phy_i2c_mii_read(pdata, reg); else if (phy_data->conn_type & XGBE_CONN_TYPE_MDIO) ret = xgbe_phy_mdio_mii_read(pdata, addr, reg); else ret = -ENOTSUP; xgbe_phy_put_comm_ownership(pdata); return (ret); } static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; if (!phy_data->sfp_mod_absent && !phy_data->sfp_changed) return; XGBE_ZERO_SUP(&pdata->phy); if (phy_data->sfp_mod_absent) { pdata->phy.speed = SPEED_UNKNOWN; pdata->phy.duplex = DUPLEX_UNKNOWN; pdata->phy.autoneg = AUTONEG_ENABLE; pdata->phy.pause_autoneg = AUTONEG_ENABLE; XGBE_SET_SUP(&pdata->phy, Autoneg); XGBE_SET_SUP(&pdata->phy, Pause); XGBE_SET_SUP(&pdata->phy, Asym_Pause); XGBE_SET_SUP(&pdata->phy, TP); XGBE_SET_SUP(&pdata->phy, FIBRE); XGBE_LM_COPY(&pdata->phy, advertising, &pdata->phy, supported); return; } switch (phy_data->sfp_base) { case XGBE_SFP_BASE_1000_T: case XGBE_SFP_BASE_1000_SX: case XGBE_SFP_BASE_1000_LX: case XGBE_SFP_BASE_1000_CX: pdata->phy.speed = SPEED_UNKNOWN; pdata->phy.duplex = DUPLEX_UNKNOWN; pdata->phy.autoneg = AUTONEG_ENABLE; pdata->phy.pause_autoneg = AUTONEG_ENABLE; XGBE_SET_SUP(&pdata->phy, Autoneg); XGBE_SET_SUP(&pdata->phy, Pause); XGBE_SET_SUP(&pdata->phy, Asym_Pause); if (phy_data->sfp_base == XGBE_SFP_BASE_1000_T) { if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) XGBE_SET_SUP(&pdata->phy, 100baseT_Full); if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) XGBE_SET_SUP(&pdata->phy, 1000baseT_Full); } else { if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) XGBE_SET_SUP(&pdata->phy, 1000baseX_Full); } break; case XGBE_SFP_BASE_10000_SR: case XGBE_SFP_BASE_10000_LR: case XGBE_SFP_BASE_10000_LRM: case XGBE_SFP_BASE_10000_ER: case XGBE_SFP_BASE_10000_CR: pdata->phy.speed = SPEED_10000; pdata->phy.duplex = DUPLEX_FULL; pdata->phy.autoneg = AUTONEG_DISABLE; pdata->phy.pause_autoneg = AUTONEG_DISABLE; if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) { switch (phy_data->sfp_base) { case XGBE_SFP_BASE_10000_SR: XGBE_SET_SUP(&pdata->phy, 10000baseSR_Full); break; case XGBE_SFP_BASE_10000_LR: XGBE_SET_SUP(&pdata->phy, 10000baseLR_Full); break; case XGBE_SFP_BASE_10000_LRM: XGBE_SET_SUP(&pdata->phy, 10000baseLRM_Full); break; case XGBE_SFP_BASE_10000_ER: XGBE_SET_SUP(&pdata->phy, 10000baseER_Full); break; case XGBE_SFP_BASE_10000_CR: XGBE_SET_SUP(&pdata->phy, 10000baseCR_Full); break; default: break; } } break; default: pdata->phy.speed = SPEED_UNKNOWN; pdata->phy.duplex = DUPLEX_UNKNOWN; pdata->phy.autoneg = AUTONEG_DISABLE; pdata->phy.pause_autoneg = AUTONEG_DISABLE; break; } switch (phy_data->sfp_base) { case XGBE_SFP_BASE_1000_T: case XGBE_SFP_BASE_1000_CX: case XGBE_SFP_BASE_10000_CR: XGBE_SET_SUP(&pdata->phy, TP); break; default: XGBE_SET_SUP(&pdata->phy, FIBRE); break; } XGBE_LM_COPY(&pdata->phy, advertising, &pdata->phy, supported); axgbe_printf(1, "%s: link speed %d spf_base 0x%x pause_autoneg %d " "advert 0x%x support 0x%x\n", __func__, pdata->phy.speed, phy_data->sfp_base, pdata->phy.pause_autoneg, pdata->phy.advertising, pdata->phy.supported); } static bool xgbe_phy_sfp_bit_rate(struct xgbe_sfp_eeprom *sfp_eeprom, enum xgbe_sfp_speed sfp_speed) { uint8_t *sfp_base, min, max; sfp_base = sfp_eeprom->base; switch (sfp_speed) { case XGBE_SFP_SPEED_1000: min = XGBE_SFP_BASE_BR_1GBE_MIN; max = XGBE_SFP_BASE_BR_1GBE_MAX; break; case XGBE_SFP_SPEED_10000: min = XGBE_SFP_BASE_BR_10GBE_MIN; max = XGBE_SFP_BASE_BR_10GBE_MAX; break; default: return (false); } return ((sfp_base[XGBE_SFP_BASE_BR] >= min) && (sfp_base[XGBE_SFP_BASE_BR] <= max)); } static void xgbe_phy_free_phy_device(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; if (phy_data->phydev) phy_data->phydev = 0; } static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; unsigned int phy_id = phy_data->phy_id; if (phy_data->port_mode != XGBE_PORT_MODE_SFP) return (false); if ((phy_id & 0xfffffff0) != 0x01ff0cc0) return (false); /* Enable Base-T AN */ xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x16, 0x0001); xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x00, 0x9140); xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x16, 0x0000); /* Enable SGMII at 100Base-T/1000Base-T Full Duplex */ xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x1b, 0x9084); xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x09, 0x0e00); xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x00, 0x8140); xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x04, 0x0d01); xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x00, 0x9140); axgbe_printf(3, "Finisar PHY quirk in place\n"); return (true); } static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom; unsigned int phy_id = phy_data->phy_id; int reg; if (phy_data->port_mode != XGBE_PORT_MODE_SFP) return (false); if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_NAME], XGBE_BEL_FUSE_VENDOR, XGBE_SFP_BASE_VENDOR_NAME_LEN)) return (false); /* For Bel-Fuse, use the extra AN flag */ pdata->an_again = 1; if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_PN], XGBE_BEL_FUSE_PARTNO, XGBE_SFP_BASE_VENDOR_PN_LEN)) return (false); if ((phy_id & 0xfffffff0) != 0x03625d10) return (false); /* Disable RGMII mode */ xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x18, 0x7007); reg = xgbe_phy_mii_read(pdata, phy_data->mdio_addr, 0x18); xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x18, reg & ~0x0080); /* Enable fiber register bank */ xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x1c, 0x7c00); reg = xgbe_phy_mii_read(pdata, phy_data->mdio_addr, 0x1c); reg &= 0x03ff; reg &= ~0x0001; xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x1c, 0x8000 | 0x7c00 | reg | 0x0001); /* Power down SerDes */ reg = xgbe_phy_mii_read(pdata, phy_data->mdio_addr, 0x00); xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x00, reg | 0x00800); /* Configure SGMII-to-Copper mode */ xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x1c, 0x7c00); reg = xgbe_phy_mii_read(pdata, phy_data->mdio_addr, 0x1c); reg &= 0x03ff; reg &= ~0x0006; xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x1c, 0x8000 | 0x7c00 | reg | 0x0004); /* Power up SerDes */ reg = xgbe_phy_mii_read(pdata, phy_data->mdio_addr, 0x00); xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x00, reg & ~0x00800); /* Enable copper register bank */ xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x1c, 0x7c00); reg = xgbe_phy_mii_read(pdata, phy_data->mdio_addr, 0x1c); reg &= 0x03ff; reg &= ~0x0001; xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x1c, 0x8000 | 0x7c00 | reg); /* Power up SerDes */ reg = xgbe_phy_mii_read(pdata, phy_data->mdio_addr, 0x00); xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x00, reg & ~0x00800); axgbe_printf(3, "BelFuse PHY quirk in place\n"); return (true); } static void xgbe_phy_external_phy_quirks(struct xgbe_prv_data *pdata) { if (xgbe_phy_belfuse_phy_quirks(pdata)) return; if (xgbe_phy_finisar_phy_quirks(pdata)) return; } static int xgbe_get_phy_id(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; uint32_t oui, model, phy_id1, phy_id2; int phy_reg; phy_reg = xgbe_phy_mii_read(pdata, phy_data->mdio_addr, 0x02); if (phy_reg < 0) return (-EIO); phy_id1 = (phy_reg & 0xffff); phy_data->phy_id = (phy_reg & 0xffff) << 16; phy_reg = xgbe_phy_mii_read(pdata, phy_data->mdio_addr, 0x03); if (phy_reg < 0) return (-EIO); phy_id2 = (phy_reg & 0xffff); phy_data->phy_id |= (phy_reg & 0xffff); oui = MII_OUI(phy_id1, phy_id2); model = MII_MODEL(phy_id2); axgbe_printf(2, "%s: phy_id1: 0x%x phy_id2: 0x%x oui: %#x model %#x\n", __func__, phy_id1, phy_id2, oui, model); return (0); } static int xgbe_phy_start_aneg(struct xgbe_prv_data *pdata) { uint16_t ctl = 0; int changed = 0; int ret; if (AUTONEG_ENABLE != pdata->phy.autoneg) { if (SPEED_1000 == pdata->phy.speed) ctl |= BMCR_SPEED1; else if (SPEED_100 == pdata->phy.speed) ctl |= BMCR_SPEED100; if (DUPLEX_FULL == pdata->phy.duplex) ctl |= BMCR_FDX; ret = xgbe_phy_mii_read(pdata, pdata->mdio_addr, MII_BMCR); if (ret) return (ret); ret = xgbe_phy_mii_write(pdata, pdata->mdio_addr, MII_BMCR, (ret & ~(~(BMCR_LOOP | BMCR_ISO | BMCR_PDOWN))) | ctl); } ctl = xgbe_phy_mii_read(pdata, pdata->mdio_addr, MII_BMCR); if (ctl < 0) return (ctl); if (!(ctl & BMCR_AUTOEN) || (ctl & BMCR_ISO)) changed = 1; if (changed > 0) { ret = xgbe_phy_mii_read(pdata, pdata->mdio_addr, MII_BMCR); if (ret) return (ret); ret = xgbe_phy_mii_write(pdata, pdata->mdio_addr, MII_BMCR, (ret & ~(BMCR_ISO)) | (BMCR_AUTOEN | BMCR_STARTNEG)); } return (0); } static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; int ret; axgbe_printf(2, "%s: phydev %d phydev_mode %d sfp_phy_avail %d phy_id " "0x%08x\n", __func__, phy_data->phydev, phy_data->phydev_mode, phy_data->sfp_phy_avail, phy_data->phy_id); /* If we already have a PHY, just return */ if (phy_data->phydev) { axgbe_printf(3, "%s: phy present already\n", __func__); return (0); } /* Clear the extra AN flag */ pdata->an_again = 0; /* Check for the use of an external PHY */ if (phy_data->phydev_mode == XGBE_MDIO_MODE_NONE) { axgbe_printf(3, "%s: phydev_mode %d\n", __func__, phy_data->phydev_mode); return (0); } /* For SFP, only use an external PHY if available */ if ((phy_data->port_mode == XGBE_PORT_MODE_SFP) && !phy_data->sfp_phy_avail) { axgbe_printf(3, "%s: port_mode %d avail %d\n", __func__, phy_data->port_mode, phy_data->sfp_phy_avail); return (0); } /* Set the proper MDIO mode for the PHY */ ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->mdio_addr, phy_data->phydev_mode); if (ret) { axgbe_error("mdio port/clause not compatible (%u/%u) ret %d\n", phy_data->mdio_addr, phy_data->phydev_mode, ret); return (ret); } ret = xgbe_get_phy_id(pdata); if (ret) return (ret); axgbe_printf(2, "Get phy_id 0x%08x\n", phy_data->phy_id); phy_data->phydev = 1; xgbe_phy_external_phy_quirks(pdata); xgbe_phy_start_aneg(pdata); return (0); } static void xgbe_phy_sfp_external_phy(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; int ret; axgbe_printf(3, "%s: sfp_changed: 0x%x\n", __func__, phy_data->sfp_changed); if (!phy_data->sfp_changed) return; phy_data->sfp_phy_avail = 0; if (phy_data->sfp_base != XGBE_SFP_BASE_1000_T) return; /* Check access to the PHY by reading CTRL1 */ ret = xgbe_phy_i2c_mii_read(pdata, MII_BMCR); if (ret < 0) { axgbe_error("%s: ext phy fail %d\n", __func__, ret); return; } /* Successfully accessed the PHY */ phy_data->sfp_phy_avail = 1; axgbe_printf(3, "Successfully accessed External PHY\n"); } static bool xgbe_phy_check_sfp_rx_los(struct xgbe_phy_data *phy_data) { uint8_t *sfp_extd = phy_data->sfp_eeprom.extd; if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_RX_LOS)) return (false); if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_RX_LOS) return (false); if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_rx_los)) return (true); return (false); } static bool xgbe_phy_check_sfp_tx_fault(struct xgbe_phy_data *phy_data) { uint8_t *sfp_extd = phy_data->sfp_eeprom.extd; if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_TX_FAULT)) return (false); if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_TX_FAULT) return (false); if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_tx_fault)) return (true); return (false); } static bool xgbe_phy_check_sfp_mod_absent(struct xgbe_phy_data *phy_data) { if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_MOD_ABSENT) return (false); if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_mod_absent)) return (true); return (false); } static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom; uint8_t *sfp_base; sfp_base = sfp_eeprom->base; if (sfp_base[XGBE_SFP_BASE_ID] != XGBE_SFP_ID_SFP) { axgbe_error("base id %d\n", sfp_base[XGBE_SFP_BASE_ID]); return; } if (sfp_base[XGBE_SFP_BASE_EXT_ID] != XGBE_SFP_EXT_ID_SFP) { axgbe_error("base id %d\n", sfp_base[XGBE_SFP_BASE_EXT_ID]); return; } /* Update transceiver signals (eeprom extd/options) */ phy_data->sfp_tx_fault = xgbe_phy_check_sfp_tx_fault(phy_data); phy_data->sfp_rx_los = xgbe_phy_check_sfp_rx_los(phy_data); /* Assume ACTIVE cable unless told it is PASSIVE */ if (sfp_base[XGBE_SFP_BASE_CABLE] & XGBE_SFP_BASE_CABLE_PASSIVE) { phy_data->sfp_cable = XGBE_SFP_CABLE_PASSIVE; phy_data->sfp_cable_len = sfp_base[XGBE_SFP_BASE_CU_CABLE_LEN]; } else phy_data->sfp_cable = XGBE_SFP_CABLE_ACTIVE; /* Determine the type of SFP */ if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_SR) phy_data->sfp_base = XGBE_SFP_BASE_10000_SR; else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_LR) phy_data->sfp_base = XGBE_SFP_BASE_10000_LR; else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_LRM) phy_data->sfp_base = XGBE_SFP_BASE_10000_LRM; else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_ER) phy_data->sfp_base = XGBE_SFP_BASE_10000_ER; else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_1GBE_CC_SX) phy_data->sfp_base = XGBE_SFP_BASE_1000_SX; else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_1GBE_CC_LX) phy_data->sfp_base = XGBE_SFP_BASE_1000_LX; else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_1GBE_CC_CX) phy_data->sfp_base = XGBE_SFP_BASE_1000_CX; else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_1GBE_CC_T) phy_data->sfp_base = XGBE_SFP_BASE_1000_T; else if ((phy_data->sfp_cable == XGBE_SFP_CABLE_PASSIVE) && xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_10000)) phy_data->sfp_base = XGBE_SFP_BASE_10000_CR; switch (phy_data->sfp_base) { case XGBE_SFP_BASE_1000_T: phy_data->sfp_speed = XGBE_SFP_SPEED_100_1000; break; case XGBE_SFP_BASE_1000_SX: case XGBE_SFP_BASE_1000_LX: case XGBE_SFP_BASE_1000_CX: phy_data->sfp_speed = XGBE_SFP_SPEED_1000; break; case XGBE_SFP_BASE_10000_SR: case XGBE_SFP_BASE_10000_LR: case XGBE_SFP_BASE_10000_LRM: case XGBE_SFP_BASE_10000_ER: case XGBE_SFP_BASE_10000_CR: phy_data->sfp_speed = XGBE_SFP_SPEED_10000; break; default: break; } axgbe_printf(3, "%s: sfp_base: 0x%x sfp_speed: 0x%x sfp_cable: 0x%x " "rx_los 0x%x tx_fault 0x%x\n", __func__, phy_data->sfp_base, phy_data->sfp_speed, phy_data->sfp_cable, phy_data->sfp_rx_los, phy_data->sfp_tx_fault); } static void xgbe_phy_sfp_eeprom_info(struct xgbe_prv_data *pdata, struct xgbe_sfp_eeprom *sfp_eeprom) { struct xgbe_sfp_ascii sfp_ascii; char *sfp_data = (char *)&sfp_ascii; axgbe_printf(3, "SFP detected:\n"); memcpy(sfp_data, &sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_NAME], XGBE_SFP_BASE_VENDOR_NAME_LEN); sfp_data[XGBE_SFP_BASE_VENDOR_NAME_LEN] = '\0'; axgbe_printf(3, " vendor: %s\n", sfp_data); memcpy(sfp_data, &sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_PN], XGBE_SFP_BASE_VENDOR_PN_LEN); sfp_data[XGBE_SFP_BASE_VENDOR_PN_LEN] = '\0'; axgbe_printf(3, " part number: %s\n", sfp_data); memcpy(sfp_data, &sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_REV], XGBE_SFP_BASE_VENDOR_REV_LEN); sfp_data[XGBE_SFP_BASE_VENDOR_REV_LEN] = '\0'; axgbe_printf(3, " revision level: %s\n", sfp_data); memcpy(sfp_data, &sfp_eeprom->extd[XGBE_SFP_BASE_VENDOR_SN], XGBE_SFP_BASE_VENDOR_SN_LEN); sfp_data[XGBE_SFP_BASE_VENDOR_SN_LEN] = '\0'; axgbe_printf(3, " serial number: %s\n", sfp_data); } static bool xgbe_phy_sfp_verify_eeprom(uint8_t cc_in, uint8_t *buf, unsigned int len) { uint8_t cc; for (cc = 0; len; buf++, len--) cc += *buf; return ((cc == cc_in) ? true : false); } static void dump_sfp_eeprom(struct xgbe_prv_data *pdata, uint8_t *sfp_base) { axgbe_printf(3, "sfp_base[XGBE_SFP_BASE_ID] : 0x%04x\n", sfp_base[XGBE_SFP_BASE_ID]); axgbe_printf(3, "sfp_base[XGBE_SFP_BASE_EXT_ID] : 0x%04x\n", sfp_base[XGBE_SFP_BASE_EXT_ID]); axgbe_printf(3, "sfp_base[XGBE_SFP_BASE_CABLE] : 0x%04x\n", sfp_base[XGBE_SFP_BASE_CABLE]); } static int xgbe_phy_sfp_read_eeprom(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; struct xgbe_sfp_eeprom sfp_eeprom, *eeprom; uint8_t eeprom_addr, *base; int ret; ret = xgbe_phy_sfp_get_mux(pdata); if (ret) { axgbe_error("I2C error setting SFP MUX\n"); return (ret); } /* Read the SFP serial ID eeprom */ eeprom_addr = 0; ret = xgbe_phy_i2c_read(pdata, XGBE_SFP_SERIAL_ID_ADDRESS, &eeprom_addr, sizeof(eeprom_addr), &sfp_eeprom, sizeof(sfp_eeprom)); eeprom = &sfp_eeprom; base = eeprom->base; dump_sfp_eeprom(pdata, base); if (ret) { axgbe_error("I2C error reading SFP EEPROM\n"); goto put; } /* Validate the contents read */ if (!xgbe_phy_sfp_verify_eeprom(sfp_eeprom.base[XGBE_SFP_BASE_CC], sfp_eeprom.base, sizeof(sfp_eeprom.base) - 1)) { axgbe_error("verify eeprom base failed\n"); ret = -EINVAL; goto put; } if (!xgbe_phy_sfp_verify_eeprom(sfp_eeprom.extd[XGBE_SFP_EXTD_CC], sfp_eeprom.extd, sizeof(sfp_eeprom.extd) - 1)) { axgbe_error("verify eeprom extd failed\n"); ret = -EINVAL; goto put; } /* Check for an added or changed SFP */ if (memcmp(&phy_data->sfp_eeprom, &sfp_eeprom, sizeof(sfp_eeprom))) { phy_data->sfp_changed = 1; xgbe_phy_sfp_eeprom_info(pdata, &sfp_eeprom); memcpy(&phy_data->sfp_eeprom, &sfp_eeprom, sizeof(sfp_eeprom)); xgbe_phy_free_phy_device(pdata); } else phy_data->sfp_changed = 0; put: xgbe_phy_sfp_put_mux(pdata); return (ret); } static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; uint8_t gpio_reg, gpio_ports[2]; int ret, prev_sfp_inputs = phy_data->port_sfp_inputs; int shift = GPIO_MASK_WIDTH * (3 - phy_data->port_id); /* Read the input port registers */ axgbe_printf(3, "%s: befor sfp_mod:%d sfp_gpio_address:0x%x\n", __func__, phy_data->sfp_mod_absent, phy_data->sfp_gpio_address); gpio_reg = 0; ret = xgbe_phy_i2c_read(pdata, phy_data->sfp_gpio_address, &gpio_reg, sizeof(gpio_reg), gpio_ports, sizeof(gpio_ports)); if (ret) { axgbe_error("%s: I2C error reading SFP GPIO addr:0x%x\n", __func__, phy_data->sfp_gpio_address); return; } phy_data->sfp_gpio_inputs = (gpio_ports[1] << 8) | gpio_ports[0]; phy_data->port_sfp_inputs = (phy_data->sfp_gpio_inputs >> shift) & 0x0F; if (prev_sfp_inputs != phy_data->port_sfp_inputs) axgbe_printf(0, "%s: port_sfp_inputs: 0x%0x\n", __func__, phy_data->port_sfp_inputs); phy_data->sfp_mod_absent = xgbe_phy_check_sfp_mod_absent(phy_data); axgbe_printf(3, "%s: after sfp_mod:%d sfp_gpio_inputs:0x%x\n", __func__, phy_data->sfp_mod_absent, phy_data->sfp_gpio_inputs); } static void xgbe_phy_sfp_mod_absent(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; xgbe_phy_free_phy_device(pdata); phy_data->sfp_mod_absent = 1; phy_data->sfp_phy_avail = 0; memset(&phy_data->sfp_eeprom, 0, sizeof(phy_data->sfp_eeprom)); } static void xgbe_phy_sfp_reset(struct xgbe_phy_data *phy_data) { phy_data->sfp_rx_los = 0; phy_data->sfp_tx_fault = 0; phy_data->sfp_mod_absent = 1; phy_data->sfp_base = XGBE_SFP_BASE_UNKNOWN; phy_data->sfp_cable = XGBE_SFP_CABLE_UNKNOWN; phy_data->sfp_speed = XGBE_SFP_SPEED_UNKNOWN; } static void xgbe_phy_sfp_detect(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; int ret, prev_sfp_state = phy_data->sfp_mod_absent; /* Reset the SFP signals and info */ xgbe_phy_sfp_reset(phy_data); ret = xgbe_phy_get_comm_ownership(pdata); if (ret) return; /* Read the SFP signals and check for module presence */ xgbe_phy_sfp_signals(pdata); if (phy_data->sfp_mod_absent) { if (prev_sfp_state != phy_data->sfp_mod_absent) axgbe_error("%s: mod absent\n", __func__); xgbe_phy_sfp_mod_absent(pdata); goto put; } ret = xgbe_phy_sfp_read_eeprom(pdata); if (ret) { /* Treat any error as if there isn't an SFP plugged in */ axgbe_error("%s: eeprom read failed\n", __func__); xgbe_phy_sfp_reset(phy_data); xgbe_phy_sfp_mod_absent(pdata); goto put; } xgbe_phy_sfp_parse_eeprom(pdata); xgbe_phy_sfp_external_phy(pdata); put: xgbe_phy_sfp_phy_settings(pdata); axgbe_printf(3, "%s: phy speed: 0x%x duplex: 0x%x autoneg: 0x%x " "pause_autoneg: 0x%x\n", __func__, pdata->phy.speed, pdata->phy.duplex, pdata->phy.autoneg, pdata->phy.pause_autoneg); xgbe_phy_put_comm_ownership(pdata); } static int xgbe_phy_module_eeprom(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; uint8_t eeprom_addr, eeprom_data[XGBE_SFP_EEPROM_MAX]; struct xgbe_sfp_eeprom *sfp_eeprom; int ret; if (phy_data->port_mode != XGBE_PORT_MODE_SFP) { ret = -ENXIO; goto done; } if (phy_data->sfp_mod_absent) { ret = -EIO; goto done; } ret = xgbe_phy_get_comm_ownership(pdata); if (ret) { ret = -EIO; goto done; } ret = xgbe_phy_sfp_get_mux(pdata); if (ret) { axgbe_error("I2C error setting SFP MUX\n"); ret = -EIO; goto put_own; } /* Read the SFP serial ID eeprom */ eeprom_addr = 0; ret = xgbe_phy_i2c_read(pdata, XGBE_SFP_SERIAL_ID_ADDRESS, &eeprom_addr, sizeof(eeprom_addr), eeprom_data, XGBE_SFP_EEPROM_BASE_LEN); if (ret) { axgbe_error("I2C error reading SFP EEPROM\n"); ret = -EIO; goto put_mux; } sfp_eeprom = (struct xgbe_sfp_eeprom *)eeprom_data; if (XGBE_SFP_DIAGS_SUPPORTED(sfp_eeprom)) { /* Read the SFP diagnostic eeprom */ eeprom_addr = 0; ret = xgbe_phy_i2c_read(pdata, XGBE_SFP_DIAG_INFO_ADDRESS, &eeprom_addr, sizeof(eeprom_addr), eeprom_data + XGBE_SFP_EEPROM_BASE_LEN, XGBE_SFP_EEPROM_DIAG_LEN); if (ret) { axgbe_error("I2C error reading SFP DIAGS\n"); ret = -EIO; goto put_mux; } } put_mux: xgbe_phy_sfp_put_mux(pdata); put_own: xgbe_phy_put_comm_ownership(pdata); done: return (ret); } static int xgbe_phy_module_info(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; if (phy_data->port_mode != XGBE_PORT_MODE_SFP) return (-ENXIO); if (phy_data->sfp_mod_absent) return (-EIO); return (0); } static void xgbe_phy_phydev_flowctrl(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; pdata->phy.tx_pause = 0; pdata->phy.rx_pause = 0; if (!phy_data->phydev) return; if (pdata->phy.pause) XGBE_SET_LP_ADV(&pdata->phy, Pause); if (pdata->phy.asym_pause) XGBE_SET_LP_ADV(&pdata->phy, Asym_Pause); axgbe_printf(1, "%s: pause tx/rx %d/%d\n", __func__, pdata->phy.tx_pause, pdata->phy.rx_pause); } static enum xgbe_mode xgbe_phy_an37_sgmii_outcome(struct xgbe_prv_data *pdata) { enum xgbe_mode mode; XGBE_SET_LP_ADV(&pdata->phy, Autoneg); XGBE_SET_LP_ADV(&pdata->phy, TP); axgbe_printf(1, "%s: pause_autoneg %d\n", __func__, pdata->phy.pause_autoneg); /* Use external PHY to determine flow control */ if (pdata->phy.pause_autoneg) xgbe_phy_phydev_flowctrl(pdata); switch (pdata->an_status & XGBE_SGMII_AN_LINK_SPEED) { case XGBE_SGMII_AN_LINK_SPEED_100: if (pdata->an_status & XGBE_SGMII_AN_LINK_DUPLEX) { XGBE_SET_LP_ADV(&pdata->phy, 100baseT_Full); mode = XGBE_MODE_SGMII_100; } else { /* Half-duplex not supported */ XGBE_SET_LP_ADV(&pdata->phy, 100baseT_Half); mode = XGBE_MODE_UNKNOWN; } break; case XGBE_SGMII_AN_LINK_SPEED_1000: if (pdata->an_status & XGBE_SGMII_AN_LINK_DUPLEX) { XGBE_SET_LP_ADV(&pdata->phy, 1000baseT_Full); mode = XGBE_MODE_SGMII_1000; } else { /* Half-duplex not supported */ XGBE_SET_LP_ADV(&pdata->phy, 1000baseT_Half); mode = XGBE_MODE_UNKNOWN; } break; default: mode = XGBE_MODE_UNKNOWN; } return (mode); } static enum xgbe_mode xgbe_phy_an37_outcome(struct xgbe_prv_data *pdata) { enum xgbe_mode mode; unsigned int ad_reg, lp_reg; XGBE_SET_LP_ADV(&pdata->phy, Autoneg); XGBE_SET_LP_ADV(&pdata->phy, FIBRE); /* Compare Advertisement and Link Partner register */ ad_reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_ADVERTISE); lp_reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_LP_ABILITY); if (lp_reg & 0x100) XGBE_SET_LP_ADV(&pdata->phy, Pause); if (lp_reg & 0x80) XGBE_SET_LP_ADV(&pdata->phy, Asym_Pause); axgbe_printf(1, "%s: pause_autoneg %d ad_reg 0x%x lp_reg 0x%x\n", __func__, pdata->phy.pause_autoneg, ad_reg, lp_reg); if (pdata->phy.pause_autoneg) { /* Set flow control based on auto-negotiation result */ pdata->phy.tx_pause = 0; pdata->phy.rx_pause = 0; if (ad_reg & lp_reg & 0x100) { pdata->phy.tx_pause = 1; pdata->phy.rx_pause = 1; } else if (ad_reg & lp_reg & 0x80) { if (ad_reg & 0x100) pdata->phy.rx_pause = 1; else if (lp_reg & 0x100) pdata->phy.tx_pause = 1; } } axgbe_printf(1, "%s: pause tx/rx %d/%d\n", __func__, pdata->phy.tx_pause, pdata->phy.rx_pause); if (lp_reg & 0x20) XGBE_SET_LP_ADV(&pdata->phy, 1000baseX_Full); /* Half duplex is not supported */ ad_reg &= lp_reg; mode = (ad_reg & 0x20) ? XGBE_MODE_X : XGBE_MODE_UNKNOWN; return (mode); } static enum xgbe_mode xgbe_phy_an73_redrv_outcome(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; enum xgbe_mode mode; unsigned int ad_reg, lp_reg; XGBE_SET_LP_ADV(&pdata->phy, Autoneg); XGBE_SET_LP_ADV(&pdata->phy, Backplane); axgbe_printf(1, "%s: pause_autoneg %d\n", __func__, pdata->phy.pause_autoneg); /* Use external PHY to determine flow control */ if (pdata->phy.pause_autoneg) xgbe_phy_phydev_flowctrl(pdata); /* Compare Advertisement and Link Partner register 2 */ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1); lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1); if (lp_reg & 0x80) XGBE_SET_LP_ADV(&pdata->phy, 10000baseKR_Full); if (lp_reg & 0x20) XGBE_SET_LP_ADV(&pdata->phy, 1000baseKX_Full); ad_reg &= lp_reg; if (ad_reg & 0x80) { switch (phy_data->port_mode) { case XGBE_PORT_MODE_BACKPLANE: mode = XGBE_MODE_KR; break; default: mode = XGBE_MODE_SFI; break; } } else if (ad_reg & 0x20) { switch (phy_data->port_mode) { case XGBE_PORT_MODE_BACKPLANE: mode = XGBE_MODE_KX_1000; break; case XGBE_PORT_MODE_1000BASE_X: mode = XGBE_MODE_X; break; case XGBE_PORT_MODE_SFP: switch (phy_data->sfp_base) { case XGBE_SFP_BASE_1000_T: if ((phy_data->phydev) && (pdata->phy.speed == SPEED_100)) mode = XGBE_MODE_SGMII_100; else mode = XGBE_MODE_SGMII_1000; break; case XGBE_SFP_BASE_1000_SX: case XGBE_SFP_BASE_1000_LX: case XGBE_SFP_BASE_1000_CX: default: mode = XGBE_MODE_X; break; } break; default: if ((phy_data->phydev) && (pdata->phy.speed == SPEED_100)) mode = XGBE_MODE_SGMII_100; else mode = XGBE_MODE_SGMII_1000; break; } } else { mode = XGBE_MODE_UNKNOWN; } /* Compare Advertisement and Link Partner register 3 */ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2); if (lp_reg & 0xc000) XGBE_SET_LP_ADV(&pdata->phy, 10000baseR_FEC); return (mode); } static enum xgbe_mode xgbe_phy_an73_outcome(struct xgbe_prv_data *pdata) { enum xgbe_mode mode; unsigned int ad_reg, lp_reg; XGBE_SET_LP_ADV(&pdata->phy, Autoneg); XGBE_SET_LP_ADV(&pdata->phy, Backplane); /* Compare Advertisement and Link Partner register 1 */ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE); lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA); if (lp_reg & 0x400) XGBE_SET_LP_ADV(&pdata->phy, Pause); if (lp_reg & 0x800) XGBE_SET_LP_ADV(&pdata->phy, Asym_Pause); axgbe_printf(1, "%s: pause_autoneg %d ad_reg 0x%x lp_reg 0x%x\n", __func__, pdata->phy.pause_autoneg, ad_reg, lp_reg); if (pdata->phy.pause_autoneg) { /* Set flow control based on auto-negotiation result */ pdata->phy.tx_pause = 0; pdata->phy.rx_pause = 0; if (ad_reg & lp_reg & 0x400) { pdata->phy.tx_pause = 1; pdata->phy.rx_pause = 1; } else if (ad_reg & lp_reg & 0x800) { if (ad_reg & 0x400) pdata->phy.rx_pause = 1; else if (lp_reg & 0x400) pdata->phy.tx_pause = 1; } } axgbe_printf(1, "%s: pause tx/rx %d/%d\n", __func__, pdata->phy.tx_pause, pdata->phy.rx_pause); /* Compare Advertisement and Link Partner register 2 */ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1); lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1); if (lp_reg & 0x80) XGBE_SET_LP_ADV(&pdata->phy, 10000baseKR_Full); if (lp_reg & 0x20) XGBE_SET_LP_ADV(&pdata->phy, 1000baseKX_Full); ad_reg &= lp_reg; if (ad_reg & 0x80) mode = XGBE_MODE_KR; else if (ad_reg & 0x20) mode = XGBE_MODE_KX_1000; else mode = XGBE_MODE_UNKNOWN; /* Compare Advertisement and Link Partner register 3 */ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2); if (lp_reg & 0xc000) XGBE_SET_LP_ADV(&pdata->phy, 10000baseR_FEC); return (mode); } static enum xgbe_mode xgbe_phy_an_outcome(struct xgbe_prv_data *pdata) { switch (pdata->an_mode) { case XGBE_AN_MODE_CL73: return (xgbe_phy_an73_outcome(pdata)); case XGBE_AN_MODE_CL73_REDRV: return (xgbe_phy_an73_redrv_outcome(pdata)); case XGBE_AN_MODE_CL37: return (xgbe_phy_an37_outcome(pdata)); case XGBE_AN_MODE_CL37_SGMII: return (xgbe_phy_an37_sgmii_outcome(pdata)); default: return (XGBE_MODE_UNKNOWN); } } static void xgbe_phy_an_advertising(struct xgbe_prv_data *pdata, struct xgbe_phy *dphy) { struct xgbe_phy_data *phy_data = pdata->phy_data; XGBE_LM_COPY(dphy, advertising, &pdata->phy, advertising); /* Without a re-driver, just return current advertising */ if (!phy_data->redrv) return; /* With the KR re-driver we need to advertise a single speed */ XGBE_CLR_ADV(dphy, 1000baseKX_Full); XGBE_CLR_ADV(dphy, 10000baseKR_Full); /* Advertise FEC support is present */ if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE) XGBE_SET_ADV(dphy, 10000baseR_FEC); switch (phy_data->port_mode) { case XGBE_PORT_MODE_BACKPLANE: XGBE_SET_ADV(dphy, 10000baseKR_Full); break; case XGBE_PORT_MODE_BACKPLANE_2500: XGBE_SET_ADV(dphy, 1000baseKX_Full); break; case XGBE_PORT_MODE_1000BASE_T: case XGBE_PORT_MODE_1000BASE_X: case XGBE_PORT_MODE_NBASE_T: XGBE_SET_ADV(dphy, 1000baseKX_Full); break; case XGBE_PORT_MODE_10GBASE_T: if ((phy_data->phydev) && (pdata->phy.speed == SPEED_10000)) XGBE_SET_ADV(dphy, 10000baseKR_Full); else XGBE_SET_ADV(dphy, 1000baseKX_Full); break; case XGBE_PORT_MODE_10GBASE_R: XGBE_SET_ADV(dphy, 10000baseKR_Full); break; case XGBE_PORT_MODE_SFP: switch (phy_data->sfp_base) { case XGBE_SFP_BASE_1000_T: case XGBE_SFP_BASE_1000_SX: case XGBE_SFP_BASE_1000_LX: case XGBE_SFP_BASE_1000_CX: XGBE_SET_ADV(dphy, 1000baseKX_Full); break; default: XGBE_SET_ADV(dphy, 10000baseKR_Full); break; } break; default: XGBE_SET_ADV(dphy, 10000baseKR_Full); break; } } static int xgbe_phy_an_config(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; int ret; ret = xgbe_phy_find_phy_device(pdata); if (ret) return (ret); axgbe_printf(2, "%s: find_phy_device return %s.\n", __func__, ret ? "Failure" : "Success"); if (!phy_data->phydev) return (0); ret = xgbe_phy_start_aneg(pdata); return (ret); } static enum xgbe_an_mode xgbe_phy_an_sfp_mode(struct xgbe_phy_data *phy_data) { switch (phy_data->sfp_base) { case XGBE_SFP_BASE_1000_T: return (XGBE_AN_MODE_CL37_SGMII); case XGBE_SFP_BASE_1000_SX: case XGBE_SFP_BASE_1000_LX: case XGBE_SFP_BASE_1000_CX: return (XGBE_AN_MODE_CL37); default: return (XGBE_AN_MODE_NONE); } } static enum xgbe_an_mode xgbe_phy_an_mode(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; /* A KR re-driver will always require CL73 AN */ if (phy_data->redrv) return (XGBE_AN_MODE_CL73_REDRV); switch (phy_data->port_mode) { case XGBE_PORT_MODE_BACKPLANE: return (XGBE_AN_MODE_CL73); case XGBE_PORT_MODE_BACKPLANE_2500: return (XGBE_AN_MODE_NONE); case XGBE_PORT_MODE_1000BASE_T: return (XGBE_AN_MODE_CL37_SGMII); case XGBE_PORT_MODE_1000BASE_X: return (XGBE_AN_MODE_CL37); case XGBE_PORT_MODE_NBASE_T: return (XGBE_AN_MODE_CL37_SGMII); case XGBE_PORT_MODE_10GBASE_T: return (XGBE_AN_MODE_CL73); case XGBE_PORT_MODE_10GBASE_R: return (XGBE_AN_MODE_NONE); case XGBE_PORT_MODE_SFP: return (xgbe_phy_an_sfp_mode(phy_data)); default: return (XGBE_AN_MODE_NONE); } } static int xgbe_phy_set_redrv_mode_mdio(struct xgbe_prv_data *pdata, enum xgbe_phy_redrv_mode mode) { struct xgbe_phy_data *phy_data = pdata->phy_data; uint16_t redrv_reg, redrv_val; redrv_reg = XGBE_PHY_REDRV_MODE_REG + (phy_data->redrv_lane * 0x1000); redrv_val = (uint16_t)mode; return (pdata->hw_if.write_ext_mii_regs(pdata, phy_data->redrv_addr, redrv_reg, redrv_val)); } static int xgbe_phy_set_redrv_mode_i2c(struct xgbe_prv_data *pdata, enum xgbe_phy_redrv_mode mode) { struct xgbe_phy_data *phy_data = pdata->phy_data; unsigned int redrv_reg; int ret; /* Calculate the register to write */ redrv_reg = XGBE_PHY_REDRV_MODE_REG + (phy_data->redrv_lane * 0x1000); ret = xgbe_phy_redrv_write(pdata, redrv_reg, mode); return (ret); } static void xgbe_phy_set_redrv_mode(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; enum xgbe_phy_redrv_mode mode; int ret; if (!phy_data->redrv) return; mode = XGBE_PHY_REDRV_MODE_CX; if ((phy_data->port_mode == XGBE_PORT_MODE_SFP) && (phy_data->sfp_base != XGBE_SFP_BASE_1000_CX) && (phy_data->sfp_base != XGBE_SFP_BASE_10000_CR)) mode = XGBE_PHY_REDRV_MODE_SR; ret = xgbe_phy_get_comm_ownership(pdata); if (ret) return; axgbe_printf(2, "%s: redrv_if set: %d\n", __func__, phy_data->redrv_if); if (phy_data->redrv_if) xgbe_phy_set_redrv_mode_i2c(pdata, mode); else xgbe_phy_set_redrv_mode_mdio(pdata, mode); xgbe_phy_put_comm_ownership(pdata); } static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata, unsigned int cmd, unsigned int sub_cmd) { unsigned int s0 = 0; unsigned int wait; /* Log if a previous command did not complete */ if (XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS)) axgbe_error("firmware mailbox not ready for command\n"); /* Construct the command */ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, cmd); XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, sub_cmd); /* Issue the command */ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0); XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0); XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1); /* Wait for command to complete */ wait = XGBE_RATECHANGE_COUNT; while (wait--) { if (!XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS)) { axgbe_printf(3, "%s: Rate change done\n", __func__); return; } DELAY(2000); } axgbe_printf(3, "firmware mailbox command did not complete\n"); } static void xgbe_phy_rrc(struct xgbe_prv_data *pdata) { /* Receiver Reset Cycle */ xgbe_phy_perform_ratechange(pdata, 5, 0); axgbe_printf(3, "receiver reset complete\n"); } static void xgbe_phy_power_off(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; /* Power off */ xgbe_phy_perform_ratechange(pdata, 0, 0); phy_data->cur_mode = XGBE_MODE_UNKNOWN; axgbe_printf(3, "phy powered off\n"); } static void xgbe_phy_sfi_mode(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; xgbe_phy_set_redrv_mode(pdata); /* 10G/SFI */ axgbe_printf(3, "%s: cable %d len %d\n", __func__, phy_data->sfp_cable, phy_data->sfp_cable_len); if (phy_data->sfp_cable != XGBE_SFP_CABLE_PASSIVE) xgbe_phy_perform_ratechange(pdata, 3, 0); else { if (phy_data->sfp_cable_len <= 1) xgbe_phy_perform_ratechange(pdata, 3, 1); else if (phy_data->sfp_cable_len <= 3) xgbe_phy_perform_ratechange(pdata, 3, 2); else xgbe_phy_perform_ratechange(pdata, 3, 3); } phy_data->cur_mode = XGBE_MODE_SFI; axgbe_printf(3, "10GbE SFI mode set\n"); } static void xgbe_phy_x_mode(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; xgbe_phy_set_redrv_mode(pdata); /* 1G/X */ xgbe_phy_perform_ratechange(pdata, 1, 3); phy_data->cur_mode = XGBE_MODE_X; axgbe_printf(3, "1GbE X mode set\n"); } static void xgbe_phy_sgmii_1000_mode(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; xgbe_phy_set_redrv_mode(pdata); /* 1G/SGMII */ xgbe_phy_perform_ratechange(pdata, 1, 2); phy_data->cur_mode = XGBE_MODE_SGMII_1000; axgbe_printf(2, "1GbE SGMII mode set\n"); } static void xgbe_phy_sgmii_100_mode(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; xgbe_phy_set_redrv_mode(pdata); /* 100M/SGMII */ xgbe_phy_perform_ratechange(pdata, 1, 1); phy_data->cur_mode = XGBE_MODE_SGMII_100; axgbe_printf(3, "100MbE SGMII mode set\n"); } static void xgbe_phy_kr_mode(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; xgbe_phy_set_redrv_mode(pdata); /* 10G/KR */ xgbe_phy_perform_ratechange(pdata, 4, 0); phy_data->cur_mode = XGBE_MODE_KR; axgbe_printf(3, "10GbE KR mode set\n"); } static void xgbe_phy_kx_2500_mode(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; xgbe_phy_set_redrv_mode(pdata); /* 2.5G/KX */ xgbe_phy_perform_ratechange(pdata, 2, 0); phy_data->cur_mode = XGBE_MODE_KX_2500; axgbe_printf(3, "2.5GbE KX mode set\n"); } static void xgbe_phy_kx_1000_mode(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; xgbe_phy_set_redrv_mode(pdata); /* 1G/KX */ xgbe_phy_perform_ratechange(pdata, 1, 3); phy_data->cur_mode = XGBE_MODE_KX_1000; axgbe_printf(3, "1GbE KX mode set\n"); } static enum xgbe_mode xgbe_phy_cur_mode(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; return (phy_data->cur_mode); } static enum xgbe_mode xgbe_phy_switch_baset_mode(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; /* No switching if not 10GBase-T */ if (phy_data->port_mode != XGBE_PORT_MODE_10GBASE_T) return (xgbe_phy_cur_mode(pdata)); switch (xgbe_phy_cur_mode(pdata)) { case XGBE_MODE_SGMII_100: case XGBE_MODE_SGMII_1000: return (XGBE_MODE_KR); case XGBE_MODE_KR: default: return (XGBE_MODE_SGMII_1000); } } static enum xgbe_mode xgbe_phy_switch_bp_2500_mode(struct xgbe_prv_data *pdata) { return (XGBE_MODE_KX_2500); } static enum xgbe_mode xgbe_phy_switch_bp_mode(struct xgbe_prv_data *pdata) { /* If we are in KR switch to KX, and vice-versa */ switch (xgbe_phy_cur_mode(pdata)) { case XGBE_MODE_KX_1000: return (XGBE_MODE_KR); case XGBE_MODE_KR: default: return (XGBE_MODE_KX_1000); } } static enum xgbe_mode xgbe_phy_switch_mode(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; switch (phy_data->port_mode) { case XGBE_PORT_MODE_BACKPLANE: return (xgbe_phy_switch_bp_mode(pdata)); case XGBE_PORT_MODE_BACKPLANE_2500: return (xgbe_phy_switch_bp_2500_mode(pdata)); case XGBE_PORT_MODE_1000BASE_T: case XGBE_PORT_MODE_NBASE_T: case XGBE_PORT_MODE_10GBASE_T: return (xgbe_phy_switch_baset_mode(pdata)); case XGBE_PORT_MODE_1000BASE_X: case XGBE_PORT_MODE_10GBASE_R: case XGBE_PORT_MODE_SFP: /* No switching, so just return current mode */ return (xgbe_phy_cur_mode(pdata)); default: return (XGBE_MODE_UNKNOWN); } } static enum xgbe_mode xgbe_phy_get_basex_mode(struct xgbe_phy_data *phy_data, int speed) { switch (speed) { case SPEED_1000: return (XGBE_MODE_X); case SPEED_10000: return (XGBE_MODE_KR); default: return (XGBE_MODE_UNKNOWN); } } static enum xgbe_mode xgbe_phy_get_baset_mode(struct xgbe_phy_data *phy_data, int speed) { switch (speed) { case SPEED_100: return (XGBE_MODE_SGMII_100); case SPEED_1000: return (XGBE_MODE_SGMII_1000); case SPEED_2500: return (XGBE_MODE_KX_2500); case SPEED_10000: return (XGBE_MODE_KR); default: return (XGBE_MODE_UNKNOWN); } } static enum xgbe_mode xgbe_phy_get_sfp_mode(struct xgbe_phy_data *phy_data, int speed) { switch (speed) { case SPEED_100: return (XGBE_MODE_SGMII_100); case SPEED_1000: if (phy_data->sfp_base == XGBE_SFP_BASE_1000_T) return (XGBE_MODE_SGMII_1000); else return (XGBE_MODE_X); case SPEED_10000: case SPEED_UNKNOWN: return (XGBE_MODE_SFI); default: return (XGBE_MODE_UNKNOWN); } } static enum xgbe_mode xgbe_phy_get_bp_2500_mode(int speed) { switch (speed) { case SPEED_2500: return (XGBE_MODE_KX_2500); default: return (XGBE_MODE_UNKNOWN); } } static enum xgbe_mode xgbe_phy_get_bp_mode(int speed) { switch (speed) { case SPEED_1000: return (XGBE_MODE_KX_1000); case SPEED_10000: return (XGBE_MODE_KR); default: return (XGBE_MODE_UNKNOWN); } } static enum xgbe_mode xgbe_phy_get_mode(struct xgbe_prv_data *pdata, int speed) { struct xgbe_phy_data *phy_data = pdata->phy_data; switch (phy_data->port_mode) { case XGBE_PORT_MODE_BACKPLANE: return (xgbe_phy_get_bp_mode(speed)); case XGBE_PORT_MODE_BACKPLANE_2500: return (xgbe_phy_get_bp_2500_mode(speed)); case XGBE_PORT_MODE_1000BASE_T: case XGBE_PORT_MODE_NBASE_T: case XGBE_PORT_MODE_10GBASE_T: return (xgbe_phy_get_baset_mode(phy_data, speed)); case XGBE_PORT_MODE_1000BASE_X: case XGBE_PORT_MODE_10GBASE_R: return (xgbe_phy_get_basex_mode(phy_data, speed)); case XGBE_PORT_MODE_SFP: return (xgbe_phy_get_sfp_mode(phy_data, speed)); default: return (XGBE_MODE_UNKNOWN); } } static void xgbe_phy_set_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) { switch (mode) { case XGBE_MODE_KX_1000: xgbe_phy_kx_1000_mode(pdata); break; case XGBE_MODE_KX_2500: xgbe_phy_kx_2500_mode(pdata); break; case XGBE_MODE_KR: xgbe_phy_kr_mode(pdata); break; case XGBE_MODE_SGMII_100: xgbe_phy_sgmii_100_mode(pdata); break; case XGBE_MODE_SGMII_1000: xgbe_phy_sgmii_1000_mode(pdata); break; case XGBE_MODE_X: xgbe_phy_x_mode(pdata); break; case XGBE_MODE_SFI: xgbe_phy_sfi_mode(pdata); break; default: break; } } static void xgbe_phy_get_type(struct xgbe_prv_data *pdata, struct ifmediareq * ifmr) { struct xgbe_phy_data *phy_data = pdata->phy_data; switch (pdata->phy.speed) { case SPEED_10000: if (phy_data->port_mode == XGBE_PORT_MODE_BACKPLANE) ifmr->ifm_active |= IFM_10G_KR; else if(phy_data->port_mode == XGBE_PORT_MODE_10GBASE_T) ifmr->ifm_active |= IFM_10G_T; else if(phy_data->port_mode == XGBE_PORT_MODE_10GBASE_R) ifmr->ifm_active |= IFM_10G_KR; else if(phy_data->port_mode == XGBE_PORT_MODE_SFP) ifmr->ifm_active |= IFM_10G_SFI; else ifmr->ifm_active |= IFM_OTHER; break; case SPEED_2500: if (phy_data->port_mode == XGBE_PORT_MODE_BACKPLANE_2500) ifmr->ifm_active |= IFM_2500_KX; else ifmr->ifm_active |= IFM_OTHER; break; case SPEED_1000: if (phy_data->port_mode == XGBE_PORT_MODE_BACKPLANE) ifmr->ifm_active |= IFM_1000_KX; else if(phy_data->port_mode == XGBE_PORT_MODE_1000BASE_T) ifmr->ifm_active |= IFM_1000_T; #if 0 else if(phy_data->port_mode == XGBE_PORT_MODE_1000BASE_X) ifmr->ifm_active |= IFM_1000_SX; ifmr->ifm_active |= IFM_1000_LX; ifmr->ifm_active |= IFM_1000_CX; #endif else if(phy_data->port_mode == XGBE_PORT_MODE_SFP) ifmr->ifm_active |= IFM_1000_SGMII; else ifmr->ifm_active |= IFM_OTHER; break; case SPEED_100: if(phy_data->port_mode == XGBE_PORT_MODE_NBASE_T) ifmr->ifm_active |= IFM_100_T; else if(phy_data->port_mode == XGBE_PORT_MODE_SFP) ifmr->ifm_active |= IFM_1000_SGMII; else ifmr->ifm_active |= IFM_OTHER; break; default: ifmr->ifm_active |= IFM_OTHER; axgbe_printf(1, "Unknown mode detected\n"); break; } } static bool xgbe_phy_check_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode, bool advert) { if (pdata->phy.autoneg == AUTONEG_ENABLE) return (advert); else { enum xgbe_mode cur_mode; cur_mode = xgbe_phy_get_mode(pdata, pdata->phy.speed); if (cur_mode == mode) return (true); } return (false); } static bool xgbe_phy_use_basex_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) { switch (mode) { case XGBE_MODE_X: return (xgbe_phy_check_mode(pdata, mode, XGBE_ADV(&pdata->phy, 1000baseX_Full))); case XGBE_MODE_KR: return (xgbe_phy_check_mode(pdata, mode, XGBE_ADV(&pdata->phy, 10000baseKR_Full))); default: return (false); } } static bool xgbe_phy_use_baset_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) { axgbe_printf(3, "%s: check mode %d\n", __func__, mode); switch (mode) { case XGBE_MODE_SGMII_100: return (xgbe_phy_check_mode(pdata, mode, XGBE_ADV(&pdata->phy, 100baseT_Full))); case XGBE_MODE_SGMII_1000: return (xgbe_phy_check_mode(pdata, mode, XGBE_ADV(&pdata->phy, 1000baseT_Full))); case XGBE_MODE_KX_2500: return (xgbe_phy_check_mode(pdata, mode, XGBE_ADV(&pdata->phy, 2500baseT_Full))); case XGBE_MODE_KR: return (xgbe_phy_check_mode(pdata, mode, XGBE_ADV(&pdata->phy, 10000baseT_Full))); default: return (false); } } static bool xgbe_phy_use_sfp_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) { struct xgbe_phy_data *phy_data = pdata->phy_data; switch (mode) { case XGBE_MODE_X: if (phy_data->sfp_base == XGBE_SFP_BASE_1000_T) return (false); return (xgbe_phy_check_mode(pdata, mode, XGBE_ADV(&pdata->phy, 1000baseX_Full))); case XGBE_MODE_SGMII_100: if (phy_data->sfp_base != XGBE_SFP_BASE_1000_T) return (false); return (xgbe_phy_check_mode(pdata, mode, XGBE_ADV(&pdata->phy, 100baseT_Full))); case XGBE_MODE_SGMII_1000: if (phy_data->sfp_base != XGBE_SFP_BASE_1000_T) return (false); return (xgbe_phy_check_mode(pdata, mode, XGBE_ADV(&pdata->phy, 1000baseT_Full))); case XGBE_MODE_SFI: if (phy_data->sfp_mod_absent) return (true); return (xgbe_phy_check_mode(pdata, mode, XGBE_ADV(&pdata->phy, 10000baseSR_Full) || XGBE_ADV(&pdata->phy, 10000baseLR_Full) || XGBE_ADV(&pdata->phy, 10000baseLRM_Full) || XGBE_ADV(&pdata->phy, 10000baseER_Full) || XGBE_ADV(&pdata->phy, 10000baseCR_Full))); default: return (false); } } static bool xgbe_phy_use_bp_2500_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) { switch (mode) { case XGBE_MODE_KX_2500: return (xgbe_phy_check_mode(pdata, mode, XGBE_ADV(&pdata->phy, 2500baseX_Full))); default: return (false); } } static bool xgbe_phy_use_bp_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) { switch (mode) { case XGBE_MODE_KX_1000: return (xgbe_phy_check_mode(pdata, mode, XGBE_ADV(&pdata->phy, 1000baseKX_Full))); case XGBE_MODE_KR: return (xgbe_phy_check_mode(pdata, mode, XGBE_ADV(&pdata->phy, 10000baseKR_Full))); default: return (false); } } static bool xgbe_phy_use_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) { struct xgbe_phy_data *phy_data = pdata->phy_data; switch (phy_data->port_mode) { case XGBE_PORT_MODE_BACKPLANE: return (xgbe_phy_use_bp_mode(pdata, mode)); case XGBE_PORT_MODE_BACKPLANE_2500: return (xgbe_phy_use_bp_2500_mode(pdata, mode)); case XGBE_PORT_MODE_1000BASE_T: axgbe_printf(3, "use_mode %s\n", xgbe_phy_use_baset_mode(pdata, mode) ? "found" : "Not found"); case XGBE_PORT_MODE_NBASE_T: case XGBE_PORT_MODE_10GBASE_T: return (xgbe_phy_use_baset_mode(pdata, mode)); case XGBE_PORT_MODE_1000BASE_X: case XGBE_PORT_MODE_10GBASE_R: return (xgbe_phy_use_basex_mode(pdata, mode)); case XGBE_PORT_MODE_SFP: return (xgbe_phy_use_sfp_mode(pdata, mode)); default: return (false); } } static bool xgbe_phy_valid_speed_basex_mode(struct xgbe_phy_data *phy_data, int speed) { switch (speed) { case SPEED_1000: return (phy_data->port_mode == XGBE_PORT_MODE_1000BASE_X); case SPEED_10000: return (phy_data->port_mode == XGBE_PORT_MODE_10GBASE_R); default: return (false); } } static bool xgbe_phy_valid_speed_baset_mode(struct xgbe_phy_data *phy_data, int speed) { switch (speed) { case SPEED_100: case SPEED_1000: return (true); case SPEED_2500: return (phy_data->port_mode == XGBE_PORT_MODE_NBASE_T); case SPEED_10000: return (phy_data->port_mode == XGBE_PORT_MODE_10GBASE_T); default: return (false); } } static bool xgbe_phy_valid_speed_sfp_mode(struct xgbe_phy_data *phy_data, int speed) { switch (speed) { case SPEED_100: return (phy_data->sfp_speed == XGBE_SFP_SPEED_100_1000); case SPEED_1000: return ((phy_data->sfp_speed == XGBE_SFP_SPEED_100_1000) || (phy_data->sfp_speed == XGBE_SFP_SPEED_1000)); case SPEED_10000: return (phy_data->sfp_speed == XGBE_SFP_SPEED_10000); default: return (false); } } static bool xgbe_phy_valid_speed_bp_2500_mode(int speed) { switch (speed) { case SPEED_2500: return (true); default: return (false); } } static bool xgbe_phy_valid_speed_bp_mode(int speed) { switch (speed) { case SPEED_1000: case SPEED_10000: return (true); default: return (false); } } static bool xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed) { struct xgbe_phy_data *phy_data = pdata->phy_data; switch (phy_data->port_mode) { case XGBE_PORT_MODE_BACKPLANE: return (xgbe_phy_valid_speed_bp_mode(speed)); case XGBE_PORT_MODE_BACKPLANE_2500: return (xgbe_phy_valid_speed_bp_2500_mode(speed)); case XGBE_PORT_MODE_1000BASE_T: case XGBE_PORT_MODE_NBASE_T: case XGBE_PORT_MODE_10GBASE_T: return (xgbe_phy_valid_speed_baset_mode(phy_data, speed)); case XGBE_PORT_MODE_1000BASE_X: case XGBE_PORT_MODE_10GBASE_R: return (xgbe_phy_valid_speed_basex_mode(phy_data, speed)); case XGBE_PORT_MODE_SFP: return (xgbe_phy_valid_speed_sfp_mode(phy_data, speed)); default: return (false); } } static int xgbe_upd_link(struct xgbe_prv_data *pdata) { int reg; axgbe_printf(2, "%s: Link %d\n", __func__, pdata->phy.link); reg = xgbe_phy_mii_read(pdata, pdata->mdio_addr, MII_BMSR); if (reg < 0) return (reg); if ((reg & BMSR_LINK) == 0) pdata->phy.link = 0; else pdata->phy.link = 1; axgbe_printf(2, "Link: %d updated reg %#x\n", pdata->phy.link, reg); return (0); } static int xgbe_phy_read_status(struct xgbe_prv_data *pdata) { - int common_adv_gb; + int common_adv_gb = 0; int common_adv; int lpagb = 0; int adv, lpa; int ret; ret = xgbe_upd_link(pdata); if (ret) { axgbe_printf(2, "Link Update return %d\n", ret); return (ret); } if (AUTONEG_ENABLE == pdata->phy.autoneg) { if (pdata->phy.supported == SUPPORTED_1000baseT_Half || pdata->phy.supported == SUPPORTED_1000baseT_Full) { lpagb = xgbe_phy_mii_read(pdata, pdata->mdio_addr, MII_100T2SR); if (lpagb < 0) return (lpagb); adv = xgbe_phy_mii_read(pdata, pdata->mdio_addr, MII_100T2CR); if (adv < 0) return (adv); if (lpagb & GTSR_MAN_MS_FLT) { if (adv & GTCR_MAN_MS) axgbe_printf(2, "Master/Slave Resolution " "failed, maybe conflicting manual settings\n"); else axgbe_printf(2, "Master/Slave Resolution failed\n"); return (-ENOLINK); } if (pdata->phy.supported == SUPPORTED_1000baseT_Half) - XGBE_ADV(&pdata->phy, 1000baseT_Half); + XGBE_SET_ADV(&pdata->phy, 1000baseT_Half); else if (pdata->phy.supported == SUPPORTED_1000baseT_Full) - XGBE_ADV(&pdata->phy, 1000baseT_Full); + XGBE_SET_ADV(&pdata->phy, 1000baseT_Full); common_adv_gb = lpagb & adv << 2; } lpa = xgbe_phy_mii_read(pdata, pdata->mdio_addr, MII_ANLPAR); if (lpa < 0) return (lpa); if (pdata->phy.supported == SUPPORTED_Autoneg) - XGBE_ADV(&pdata->phy, Autoneg); + XGBE_SET_ADV(&pdata->phy, Autoneg); adv = xgbe_phy_mii_read(pdata, pdata->mdio_addr, MII_ANAR); if (adv < 0) return (adv); common_adv = lpa & adv; pdata->phy.speed = SPEED_10; pdata->phy.duplex = DUPLEX_HALF; pdata->phy.pause = 0; pdata->phy.asym_pause = 0; axgbe_printf(2, "%s: lpa %#x adv %#x common_adv_gb %#x " "common_adv %#x\n", __func__, lpa, adv, common_adv_gb, common_adv); if (common_adv_gb & (GTSR_LP_1000TFDX | GTSR_LP_1000THDX)) { axgbe_printf(2, "%s: SPEED 1000\n", __func__); pdata->phy.speed = SPEED_1000; if (common_adv_gb & GTSR_LP_1000TFDX) pdata->phy.duplex = DUPLEX_FULL; } else if (common_adv & (ANLPAR_TX_FD | ANLPAR_TX)) { axgbe_printf(2, "%s: SPEED 100\n", __func__); pdata->phy.speed = SPEED_100; if (common_adv & ANLPAR_TX_FD) pdata->phy.duplex = DUPLEX_FULL; } else if (common_adv & ANLPAR_10_FD) pdata->phy.duplex = DUPLEX_FULL; if (pdata->phy.duplex == DUPLEX_FULL) { pdata->phy.pause = lpa & ANLPAR_FC ? 1 : 0; pdata->phy.asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0; } } else { int bmcr = xgbe_phy_mii_read(pdata, pdata->mdio_addr, MII_BMCR); if (bmcr < 0) return (bmcr); if (bmcr & BMCR_FDX) pdata->phy.duplex = DUPLEX_FULL; else pdata->phy.duplex = DUPLEX_HALF; if (bmcr & BMCR_SPEED1) pdata->phy.speed = SPEED_1000; else if (bmcr & BMCR_SPEED100) pdata->phy.speed = SPEED_100; else pdata->phy.speed = SPEED_10; pdata->phy.pause = 0; pdata->phy.asym_pause = 0; axgbe_printf(2, "%s: link speed %#x duplex %#x media %#x " "autoneg %#x\n", __func__, pdata->phy.speed, pdata->phy.duplex, pdata->phy.link, pdata->phy.autoneg); } return (0); } static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart) { struct xgbe_phy_data *phy_data = pdata->phy_data; struct mii_data *mii = NULL; unsigned int reg; int ret; *an_restart = 0; if (phy_data->port_mode == XGBE_PORT_MODE_SFP) { /* Check SFP signals */ axgbe_printf(3, "%s: calling phy detect\n", __func__); xgbe_phy_sfp_detect(pdata); if (phy_data->sfp_changed) { axgbe_printf(1, "%s: SFP changed observed\n", __func__); *an_restart = 1; return (0); } if (phy_data->sfp_mod_absent || phy_data->sfp_rx_los) { axgbe_printf(1, "%s: SFP absent 0x%x & sfp_rx_los 0x%x\n", __func__, phy_data->sfp_mod_absent, phy_data->sfp_rx_los); return (0); } } else { mii = device_get_softc(pdata->axgbe_miibus); mii_tick(mii); ret = xgbe_phy_read_status(pdata); if (ret) { axgbe_printf(2, "Link: Read status returned %d\n", ret); return (ret); } axgbe_printf(2, "%s: link speed %#x duplex %#x media %#x " "autoneg %#x\n", __func__, pdata->phy.speed, pdata->phy.duplex, pdata->phy.link, pdata->phy.autoneg); ret = xgbe_phy_mii_read(pdata, pdata->mdio_addr, MII_BMSR); ret = (ret < 0) ? ret : (ret & BMSR_ACOMP); axgbe_printf(2, "Link: BMCR returned %d\n", ret); if ((pdata->phy.autoneg == AUTONEG_ENABLE) && !ret) return (0); return (pdata->phy.link); } /* Link status is latched low, so read once to clear * and then read again to get current state */ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1); reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1); axgbe_printf(1, "%s: link_status reg: 0x%x\n", __func__, reg); if (reg & MDIO_STAT1_LSTATUS) return (1); /* No link, attempt a receiver reset cycle */ if (phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) { axgbe_printf(1, "ENTERED RRC: rrc_count: %d\n", phy_data->rrc_count); phy_data->rrc_count = 0; xgbe_phy_rrc(pdata); } return (0); } static void xgbe_phy_sfp_gpio_setup(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; phy_data->sfp_gpio_address = XGBE_GPIO_ADDRESS_PCA9555 + XP_GET_BITS(pdata->pp3, XP_PROP_3, GPIO_ADDR); phy_data->sfp_gpio_mask = XP_GET_BITS(pdata->pp3, XP_PROP_3, GPIO_MASK); phy_data->sfp_gpio_rx_los = XP_GET_BITS(pdata->pp3, XP_PROP_3, GPIO_RX_LOS); phy_data->sfp_gpio_tx_fault = XP_GET_BITS(pdata->pp3, XP_PROP_3, GPIO_TX_FAULT); phy_data->sfp_gpio_mod_absent = XP_GET_BITS(pdata->pp3, XP_PROP_3, GPIO_MOD_ABS); phy_data->sfp_gpio_rate_select = XP_GET_BITS(pdata->pp3, XP_PROP_3, GPIO_RATE_SELECT); DBGPR("SFP: gpio_address=%#x\n", phy_data->sfp_gpio_address); DBGPR("SFP: gpio_mask=%#x\n", phy_data->sfp_gpio_mask); DBGPR("SFP: gpio_rx_los=%u\n", phy_data->sfp_gpio_rx_los); DBGPR("SFP: gpio_tx_fault=%u\n", phy_data->sfp_gpio_tx_fault); DBGPR("SFP: gpio_mod_absent=%u\n", phy_data->sfp_gpio_mod_absent); DBGPR("SFP: gpio_rate_select=%u\n", phy_data->sfp_gpio_rate_select); } static void xgbe_phy_sfp_comm_setup(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; unsigned int mux_addr_hi, mux_addr_lo; mux_addr_hi = XP_GET_BITS(pdata->pp4, XP_PROP_4, MUX_ADDR_HI); mux_addr_lo = XP_GET_BITS(pdata->pp4, XP_PROP_4, MUX_ADDR_LO); if (mux_addr_lo == XGBE_SFP_DIRECT) return; phy_data->sfp_comm = XGBE_SFP_COMM_PCA9545; phy_data->sfp_mux_address = (mux_addr_hi << 2) + mux_addr_lo; phy_data->sfp_mux_channel = XP_GET_BITS(pdata->pp4, XP_PROP_4, MUX_CHAN); DBGPR("SFP: mux_address=%#x\n", phy_data->sfp_mux_address); DBGPR("SFP: mux_channel=%u\n", phy_data->sfp_mux_channel); } static void xgbe_phy_sfp_setup(struct xgbe_prv_data *pdata) { xgbe_phy_sfp_comm_setup(pdata); xgbe_phy_sfp_gpio_setup(pdata); } static int xgbe_phy_int_mdio_reset(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; unsigned int ret; ret = pdata->hw_if.set_gpio(pdata, phy_data->mdio_reset_gpio); if (ret) return (ret); ret = pdata->hw_if.clr_gpio(pdata, phy_data->mdio_reset_gpio); return (ret); } static int xgbe_phy_i2c_mdio_reset(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; uint8_t gpio_reg, gpio_ports[2], gpio_data[3]; int ret; /* Read the output port registers */ gpio_reg = 2; ret = xgbe_phy_i2c_read(pdata, phy_data->mdio_reset_addr, &gpio_reg, sizeof(gpio_reg), gpio_ports, sizeof(gpio_ports)); if (ret) return (ret); /* Prepare to write the GPIO data */ gpio_data[0] = 2; gpio_data[1] = gpio_ports[0]; gpio_data[2] = gpio_ports[1]; /* Set the GPIO pin */ if (phy_data->mdio_reset_gpio < 8) gpio_data[1] |= (1 << (phy_data->mdio_reset_gpio % 8)); else gpio_data[2] |= (1 << (phy_data->mdio_reset_gpio % 8)); /* Write the output port registers */ ret = xgbe_phy_i2c_write(pdata, phy_data->mdio_reset_addr, gpio_data, sizeof(gpio_data)); if (ret) return (ret); /* Clear the GPIO pin */ if (phy_data->mdio_reset_gpio < 8) gpio_data[1] &= ~(1 << (phy_data->mdio_reset_gpio % 8)); else gpio_data[2] &= ~(1 << (phy_data->mdio_reset_gpio % 8)); /* Write the output port registers */ ret = xgbe_phy_i2c_write(pdata, phy_data->mdio_reset_addr, gpio_data, sizeof(gpio_data)); return (ret); } static int xgbe_phy_mdio_reset(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; int ret; if (phy_data->conn_type != XGBE_CONN_TYPE_MDIO) return (0); ret = xgbe_phy_get_comm_ownership(pdata); if (ret) return (ret); if (phy_data->mdio_reset == XGBE_MDIO_RESET_I2C_GPIO) ret = xgbe_phy_i2c_mdio_reset(pdata); else if (phy_data->mdio_reset == XGBE_MDIO_RESET_INT_GPIO) ret = xgbe_phy_int_mdio_reset(pdata); xgbe_phy_put_comm_ownership(pdata); return (ret); } static bool xgbe_phy_redrv_error(struct xgbe_phy_data *phy_data) { if (!phy_data->redrv) return (false); if (phy_data->redrv_if >= XGBE_PHY_REDRV_IF_MAX) return (true); switch (phy_data->redrv_model) { case XGBE_PHY_REDRV_MODEL_4223: if (phy_data->redrv_lane > 3) return (true); break; case XGBE_PHY_REDRV_MODEL_4227: if (phy_data->redrv_lane > 1) return (true); break; default: return (true); } return (false); } static int xgbe_phy_mdio_reset_setup(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; if (phy_data->conn_type != XGBE_CONN_TYPE_MDIO) return (0); phy_data->mdio_reset = XP_GET_BITS(pdata->pp3, XP_PROP_3, MDIO_RESET); switch (phy_data->mdio_reset) { case XGBE_MDIO_RESET_NONE: case XGBE_MDIO_RESET_I2C_GPIO: case XGBE_MDIO_RESET_INT_GPIO: break; default: axgbe_error("unsupported MDIO reset (%#x)\n", phy_data->mdio_reset); return (-EINVAL); } if (phy_data->mdio_reset == XGBE_MDIO_RESET_I2C_GPIO) { phy_data->mdio_reset_addr = XGBE_GPIO_ADDRESS_PCA9555 + XP_GET_BITS(pdata->pp3, XP_PROP_3, MDIO_RESET_I2C_ADDR); phy_data->mdio_reset_gpio = XP_GET_BITS(pdata->pp3, XP_PROP_3, MDIO_RESET_I2C_GPIO); } else if (phy_data->mdio_reset == XGBE_MDIO_RESET_INT_GPIO) phy_data->mdio_reset_gpio = XP_GET_BITS(pdata->pp3, XP_PROP_3, MDIO_RESET_INT_GPIO); return (0); } static bool xgbe_phy_port_mode_mismatch(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; switch (phy_data->port_mode) { case XGBE_PORT_MODE_BACKPLANE: if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) || (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000)) return (false); break; case XGBE_PORT_MODE_BACKPLANE_2500: if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_2500) return (false); break; case XGBE_PORT_MODE_1000BASE_T: if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) || (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)) return (false); break; case XGBE_PORT_MODE_1000BASE_X: if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) return (false); break; case XGBE_PORT_MODE_NBASE_T: if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) || (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) || (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_2500)) return (false); break; case XGBE_PORT_MODE_10GBASE_T: if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) || (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) || (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000)) return (false); break; case XGBE_PORT_MODE_10GBASE_R: if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) return (false); break; case XGBE_PORT_MODE_SFP: if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) || (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) || (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000)) return (false); break; default: break; } return (true); } static bool xgbe_phy_conn_type_mismatch(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; switch (phy_data->port_mode) { case XGBE_PORT_MODE_BACKPLANE: case XGBE_PORT_MODE_BACKPLANE_2500: if (phy_data->conn_type == XGBE_CONN_TYPE_BACKPLANE) return (false); break; case XGBE_PORT_MODE_1000BASE_T: case XGBE_PORT_MODE_1000BASE_X: case XGBE_PORT_MODE_NBASE_T: case XGBE_PORT_MODE_10GBASE_T: case XGBE_PORT_MODE_10GBASE_R: if (phy_data->conn_type == XGBE_CONN_TYPE_MDIO) return (false); break; case XGBE_PORT_MODE_SFP: if (phy_data->conn_type == XGBE_CONN_TYPE_SFP) return (false); break; default: break; } return (true); } static bool xgbe_phy_port_enabled(struct xgbe_prv_data *pdata) { if (!XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_SPEEDS)) return (false); if (!XP_GET_BITS(pdata->pp0, XP_PROP_0, CONN_TYPE)) return (false); return (true); } static void xgbe_phy_cdr_track(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; axgbe_printf(2, "%s: an_cdr_workaround %d phy_cdr_notrack %d\n", __func__, pdata->sysctl_an_cdr_workaround, phy_data->phy_cdr_notrack); if (!pdata->sysctl_an_cdr_workaround) return; if (!phy_data->phy_cdr_notrack) return; DELAY(phy_data->phy_cdr_delay + 500); XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL, XGBE_PMA_CDR_TRACK_EN_MASK, XGBE_PMA_CDR_TRACK_EN_ON); phy_data->phy_cdr_notrack = 0; axgbe_printf(2, "CDR TRACK DONE\n"); } static void xgbe_phy_cdr_notrack(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; axgbe_printf(2, "%s: an_cdr_workaround %d phy_cdr_notrack %d\n", __func__, pdata->sysctl_an_cdr_workaround, phy_data->phy_cdr_notrack); if (!pdata->sysctl_an_cdr_workaround) return; if (phy_data->phy_cdr_notrack) return; XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL, XGBE_PMA_CDR_TRACK_EN_MASK, XGBE_PMA_CDR_TRACK_EN_OFF); xgbe_phy_rrc(pdata); phy_data->phy_cdr_notrack = 1; } static void xgbe_phy_kr_training_post(struct xgbe_prv_data *pdata) { if (!pdata->sysctl_an_cdr_track_early) xgbe_phy_cdr_track(pdata); } static void xgbe_phy_kr_training_pre(struct xgbe_prv_data *pdata) { if (pdata->sysctl_an_cdr_track_early) xgbe_phy_cdr_track(pdata); } static void xgbe_phy_an_post(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; switch (pdata->an_mode) { case XGBE_AN_MODE_CL73: case XGBE_AN_MODE_CL73_REDRV: if (phy_data->cur_mode != XGBE_MODE_KR) break; xgbe_phy_cdr_track(pdata); switch (pdata->an_result) { case XGBE_AN_READY: case XGBE_AN_COMPLETE: break; default: if (phy_data->phy_cdr_delay < XGBE_CDR_DELAY_MAX) phy_data->phy_cdr_delay += XGBE_CDR_DELAY_INC; else phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT; break; } break; default: break; } } static void xgbe_phy_an_pre(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; switch (pdata->an_mode) { case XGBE_AN_MODE_CL73: case XGBE_AN_MODE_CL73_REDRV: if (phy_data->cur_mode != XGBE_MODE_KR) break; xgbe_phy_cdr_notrack(pdata); break; default: break; } } static void xgbe_phy_stop(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; /* If we have an external PHY, free it */ xgbe_phy_free_phy_device(pdata); /* Reset SFP data */ xgbe_phy_sfp_reset(phy_data); xgbe_phy_sfp_mod_absent(pdata); /* Reset CDR support */ xgbe_phy_cdr_track(pdata); /* Power off the PHY */ xgbe_phy_power_off(pdata); /* Stop the I2C controller */ pdata->i2c_if.i2c_stop(pdata); } static int xgbe_phy_start(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; int ret; axgbe_printf(2, "%s: redrv %d redrv_if %d start_mode %d\n", __func__, phy_data->redrv, phy_data->redrv_if, phy_data->start_mode); /* Start the I2C controller */ ret = pdata->i2c_if.i2c_start(pdata); if (ret) { axgbe_error("%s: impl i2c start ret %d\n", __func__, ret); return (ret); } /* Set the proper MDIO mode for the re-driver */ if (phy_data->redrv && !phy_data->redrv_if) { ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->redrv_addr, XGBE_MDIO_MODE_CL22); if (ret) { axgbe_error("redriver mdio port not compatible (%u)\n", phy_data->redrv_addr); return (ret); } } /* Start in highest supported mode */ xgbe_phy_set_mode(pdata, phy_data->start_mode); /* Reset CDR support */ xgbe_phy_cdr_track(pdata); /* After starting the I2C controller, we can check for an SFP */ switch (phy_data->port_mode) { case XGBE_PORT_MODE_SFP: axgbe_printf(3, "%s: calling phy detect\n", __func__); xgbe_phy_sfp_detect(pdata); break; default: break; } /* If we have an external PHY, start it */ ret = xgbe_phy_find_phy_device(pdata); if (ret) { axgbe_error("%s: impl find phy dev ret %d\n", __func__, ret); goto err_i2c; } axgbe_printf(3, "%s: impl return success\n", __func__); return (0); err_i2c: pdata->i2c_if.i2c_stop(pdata); return (ret); } static int xgbe_phy_reset(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; enum xgbe_mode cur_mode; int ret; /* Reset by power cycling the PHY */ cur_mode = phy_data->cur_mode; xgbe_phy_power_off(pdata); xgbe_phy_set_mode(pdata, cur_mode); axgbe_printf(3, "%s: mode %d\n", __func__, cur_mode); if (!phy_data->phydev) { axgbe_printf(1, "%s: no phydev\n", __func__); return (0); } /* Reset the external PHY */ ret = xgbe_phy_mdio_reset(pdata); if (ret) { axgbe_error("%s: mdio reset %d\n", __func__, ret); return (ret); } axgbe_printf(3, "%s: return success\n", __func__); return (0); } static void axgbe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct axgbe_if_softc *sc; struct xgbe_prv_data *pdata; struct mii_data *mii; sc = ifp->if_softc; pdata = &sc->pdata; axgbe_printf(2, "%s: Invoked\n", __func__); mtx_lock_spin(&pdata->mdio_mutex); mii = device_get_softc(pdata->axgbe_miibus); axgbe_printf(2, "%s: media_active %#x media_status %#x\n", __func__, mii->mii_media_active, mii->mii_media_status); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; mtx_unlock_spin(&pdata->mdio_mutex); } static int axgbe_ifmedia_upd(struct ifnet *ifp) { struct xgbe_prv_data *pdata; struct axgbe_if_softc *sc; struct mii_data *mii; struct mii_softc *miisc; int ret; sc = ifp->if_softc; pdata = &sc->pdata; axgbe_printf(2, "%s: Invoked\n", __func__); mtx_lock_spin(&pdata->mdio_mutex); mii = device_get_softc(pdata->axgbe_miibus); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); ret = mii_mediachg(mii); mtx_unlock_spin(&pdata->mdio_mutex); return (ret); } static void xgbe_phy_exit(struct xgbe_prv_data *pdata) { if (pdata->axgbe_miibus != NULL) device_delete_child(pdata->dev, pdata->axgbe_miibus); /* free phy_data structure */ free(pdata->phy_data, M_AXGBE); } static int xgbe_phy_init(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data; int ret; /* Initialize the global lock */ if (!mtx_initialized(&xgbe_phy_comm_lock)) mtx_init(&xgbe_phy_comm_lock, "xgbe phy common lock", NULL, MTX_DEF); /* Check if enabled */ if (!xgbe_phy_port_enabled(pdata)) { axgbe_error("device is not enabled\n"); return (-ENODEV); } /* Initialize the I2C controller */ ret = pdata->i2c_if.i2c_init(pdata); if (ret) return (ret); phy_data = malloc(sizeof(*phy_data), M_AXGBE, M_WAITOK | M_ZERO); if (!phy_data) return (-ENOMEM); pdata->phy_data = phy_data; phy_data->port_mode = XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_MODE); phy_data->port_id = XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_ID); phy_data->port_speeds = XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_SPEEDS); phy_data->conn_type = XP_GET_BITS(pdata->pp0, XP_PROP_0, CONN_TYPE); phy_data->mdio_addr = XP_GET_BITS(pdata->pp0, XP_PROP_0, MDIO_ADDR); pdata->mdio_addr = phy_data->mdio_addr; DBGPR("port mode=%u\n", phy_data->port_mode); DBGPR("port id=%u\n", phy_data->port_id); DBGPR("port speeds=%#x\n", phy_data->port_speeds); DBGPR("conn type=%u\n", phy_data->conn_type); DBGPR("mdio addr=%u\n", phy_data->mdio_addr); phy_data->redrv = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_PRESENT); phy_data->redrv_if = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_IF); phy_data->redrv_addr = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_ADDR); phy_data->redrv_lane = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_LANE); phy_data->redrv_model = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_MODEL); if (phy_data->redrv) { DBGPR("redrv present\n"); DBGPR("redrv i/f=%u\n", phy_data->redrv_if); DBGPR("redrv addr=%#x\n", phy_data->redrv_addr); DBGPR("redrv lane=%u\n", phy_data->redrv_lane); DBGPR("redrv model=%u\n", phy_data->redrv_model); } DBGPR("%s: redrv addr=%#x redrv i/f=%u\n", __func__, phy_data->redrv_addr, phy_data->redrv_if); /* Validate the connection requested */ if (xgbe_phy_conn_type_mismatch(pdata)) { axgbe_error("phy mode/connection mismatch " "(%#x/%#x)\n", phy_data->port_mode, phy_data->conn_type); return (-EINVAL); } /* Validate the mode requested */ if (xgbe_phy_port_mode_mismatch(pdata)) { axgbe_error("phy mode/speed mismatch " "(%#x/%#x)\n", phy_data->port_mode, phy_data->port_speeds); return (-EINVAL); } /* Check for and validate MDIO reset support */ ret = xgbe_phy_mdio_reset_setup(pdata); if (ret) { axgbe_error("%s, mdio_reset_setup ret %d\n", __func__, ret); return (ret); } /* Validate the re-driver information */ if (xgbe_phy_redrv_error(phy_data)) { axgbe_error("phy re-driver settings error\n"); return (-EINVAL); } pdata->kr_redrv = phy_data->redrv; /* Indicate current mode is unknown */ phy_data->cur_mode = XGBE_MODE_UNKNOWN; /* Initialize supported features. Current code does not support ethtool */ XGBE_ZERO_SUP(&pdata->phy); DBGPR("%s: port mode %d\n", __func__, phy_data->port_mode); switch (phy_data->port_mode) { /* Backplane support */ case XGBE_PORT_MODE_BACKPLANE: XGBE_SET_SUP(&pdata->phy, Autoneg); XGBE_SET_SUP(&pdata->phy, Pause); XGBE_SET_SUP(&pdata->phy, Asym_Pause); XGBE_SET_SUP(&pdata->phy, Backplane); if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) { XGBE_SET_SUP(&pdata->phy, 1000baseKX_Full); phy_data->start_mode = XGBE_MODE_KX_1000; } if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) { XGBE_SET_SUP(&pdata->phy, 10000baseKR_Full); if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE) XGBE_SET_SUP(&pdata->phy, 10000baseR_FEC); phy_data->start_mode = XGBE_MODE_KR; } phy_data->phydev_mode = XGBE_MDIO_MODE_NONE; break; case XGBE_PORT_MODE_BACKPLANE_2500: XGBE_SET_SUP(&pdata->phy, Pause); XGBE_SET_SUP(&pdata->phy, Asym_Pause); XGBE_SET_SUP(&pdata->phy, Backplane); XGBE_SET_SUP(&pdata->phy, 2500baseX_Full); phy_data->start_mode = XGBE_MODE_KX_2500; phy_data->phydev_mode = XGBE_MDIO_MODE_NONE; break; /* MDIO 1GBase-T support */ case XGBE_PORT_MODE_1000BASE_T: XGBE_SET_SUP(&pdata->phy, Autoneg); XGBE_SET_SUP(&pdata->phy, Pause); XGBE_SET_SUP(&pdata->phy, Asym_Pause); XGBE_SET_SUP(&pdata->phy, TP); if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) { XGBE_SET_SUP(&pdata->phy, 100baseT_Full); phy_data->start_mode = XGBE_MODE_SGMII_100; } if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) { XGBE_SET_SUP(&pdata->phy, 1000baseT_Full); phy_data->start_mode = XGBE_MODE_SGMII_1000; } phy_data->phydev_mode = XGBE_MDIO_MODE_CL22; break; /* MDIO Base-X support */ case XGBE_PORT_MODE_1000BASE_X: XGBE_SET_SUP(&pdata->phy, Autoneg); XGBE_SET_SUP(&pdata->phy, Pause); XGBE_SET_SUP(&pdata->phy, Asym_Pause); XGBE_SET_SUP(&pdata->phy, FIBRE); XGBE_SET_SUP(&pdata->phy, 1000baseX_Full); phy_data->start_mode = XGBE_MODE_X; phy_data->phydev_mode = XGBE_MDIO_MODE_CL22; break; /* MDIO NBase-T support */ case XGBE_PORT_MODE_NBASE_T: XGBE_SET_SUP(&pdata->phy, Autoneg); XGBE_SET_SUP(&pdata->phy, Pause); XGBE_SET_SUP(&pdata->phy, Asym_Pause); XGBE_SET_SUP(&pdata->phy, TP); if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) { XGBE_SET_SUP(&pdata->phy, 100baseT_Full); phy_data->start_mode = XGBE_MODE_SGMII_100; } if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) { XGBE_SET_SUP(&pdata->phy, 1000baseT_Full); phy_data->start_mode = XGBE_MODE_SGMII_1000; } if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_2500) { XGBE_SET_SUP(&pdata->phy, 2500baseT_Full); phy_data->start_mode = XGBE_MODE_KX_2500; } phy_data->phydev_mode = XGBE_MDIO_MODE_CL45; break; /* 10GBase-T support */ case XGBE_PORT_MODE_10GBASE_T: XGBE_SET_SUP(&pdata->phy, Autoneg); XGBE_SET_SUP(&pdata->phy, Pause); XGBE_SET_SUP(&pdata->phy, Asym_Pause); XGBE_SET_SUP(&pdata->phy, TP); if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) { XGBE_SET_SUP(&pdata->phy, 100baseT_Full); phy_data->start_mode = XGBE_MODE_SGMII_100; } if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) { XGBE_SET_SUP(&pdata->phy, 1000baseT_Full); phy_data->start_mode = XGBE_MODE_SGMII_1000; } if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) { XGBE_SET_SUP(&pdata->phy, 10000baseT_Full); phy_data->start_mode = XGBE_MODE_KR; } phy_data->phydev_mode = XGBE_MDIO_MODE_CL45; break; /* 10GBase-R support */ case XGBE_PORT_MODE_10GBASE_R: XGBE_SET_SUP(&pdata->phy, Autoneg); XGBE_SET_SUP(&pdata->phy, Pause); XGBE_SET_SUP(&pdata->phy, Asym_Pause); XGBE_SET_SUP(&pdata->phy, FIBRE); XGBE_SET_SUP(&pdata->phy, 10000baseSR_Full); XGBE_SET_SUP(&pdata->phy, 10000baseLR_Full); XGBE_SET_SUP(&pdata->phy, 10000baseLRM_Full); XGBE_SET_SUP(&pdata->phy, 10000baseER_Full); if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE) XGBE_SET_SUP(&pdata->phy, 10000baseR_FEC); phy_data->start_mode = XGBE_MODE_SFI; phy_data->phydev_mode = XGBE_MDIO_MODE_NONE; break; /* SFP support */ case XGBE_PORT_MODE_SFP: XGBE_SET_SUP(&pdata->phy, Autoneg); XGBE_SET_SUP(&pdata->phy, Pause); XGBE_SET_SUP(&pdata->phy, Asym_Pause); XGBE_SET_SUP(&pdata->phy, TP); XGBE_SET_SUP(&pdata->phy, FIBRE); if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) phy_data->start_mode = XGBE_MODE_SGMII_100; if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) phy_data->start_mode = XGBE_MODE_SGMII_1000; if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) phy_data->start_mode = XGBE_MODE_SFI; phy_data->phydev_mode = XGBE_MDIO_MODE_CL22; xgbe_phy_sfp_setup(pdata); DBGPR("%s: start %d mode %d adv 0x%x\n", __func__, phy_data->start_mode, phy_data->phydev_mode, pdata->phy.advertising); break; default: return (-EINVAL); } axgbe_printf(2, "%s: start %d mode %d adv 0x%x\n", __func__, phy_data->start_mode, phy_data->phydev_mode, pdata->phy.advertising); DBGPR("%s: conn type %d mode %d\n", __func__, phy_data->conn_type, phy_data->phydev_mode); if ((phy_data->conn_type & XGBE_CONN_TYPE_MDIO) && (phy_data->phydev_mode != XGBE_MDIO_MODE_NONE)) { ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->mdio_addr, phy_data->phydev_mode); if (ret) { axgbe_error("mdio port/clause not compatible (%d/%u)\n", phy_data->mdio_addr, phy_data->phydev_mode); return (-EINVAL); } } if (phy_data->redrv && !phy_data->redrv_if) { ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->redrv_addr, XGBE_MDIO_MODE_CL22); if (ret) { axgbe_error("redriver mdio port not compatible (%u)\n", phy_data->redrv_addr); return (-EINVAL); } } phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT; if (phy_data->port_mode != XGBE_PORT_MODE_SFP) { ret = mii_attach(pdata->dev, &pdata->axgbe_miibus, pdata->netdev, (ifm_change_cb_t)axgbe_ifmedia_upd, (ifm_stat_cb_t)axgbe_ifmedia_sts, BMSR_DEFCAPMASK, pdata->mdio_addr, MII_OFFSET_ANY, MIIF_FORCEANEG); if (ret){ axgbe_printf(2, "mii attach failed with err=(%d)\n", ret); return (-EINVAL); } } DBGPR("%s: return success\n", __func__); return (0); } void xgbe_init_function_ptrs_phy_v2(struct xgbe_phy_if *phy_if) { struct xgbe_phy_impl_if *phy_impl = &phy_if->phy_impl; phy_impl->init = xgbe_phy_init; phy_impl->exit = xgbe_phy_exit; phy_impl->reset = xgbe_phy_reset; phy_impl->start = xgbe_phy_start; phy_impl->stop = xgbe_phy_stop; phy_impl->link_status = xgbe_phy_link_status; phy_impl->valid_speed = xgbe_phy_valid_speed; phy_impl->use_mode = xgbe_phy_use_mode; phy_impl->set_mode = xgbe_phy_set_mode; phy_impl->get_mode = xgbe_phy_get_mode; phy_impl->switch_mode = xgbe_phy_switch_mode; phy_impl->cur_mode = xgbe_phy_cur_mode; phy_impl->get_type = xgbe_phy_get_type; phy_impl->an_mode = xgbe_phy_an_mode; phy_impl->an_config = xgbe_phy_an_config; phy_impl->an_advertising = xgbe_phy_an_advertising; phy_impl->an_outcome = xgbe_phy_an_outcome; phy_impl->an_pre = xgbe_phy_an_pre; phy_impl->an_post = xgbe_phy_an_post; phy_impl->kr_training_pre = xgbe_phy_kr_training_pre; phy_impl->kr_training_post = xgbe_phy_kr_training_post; phy_impl->module_info = xgbe_phy_module_info; phy_impl->module_eeprom = xgbe_phy_module_eeprom; }